^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Bluetooth Software UART Qualcomm protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * protocol extension to H4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2007 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Acknowledgements:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This file is based on hci_ll.c, which was...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Written by Ohad Ben-Cohen <ohad@bencohen.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * which was in turn based on hci_h4.c, which was written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * by Maxim Krasnyansky and Marcel Holtmann.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/devcoredump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/serdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "hci_uart.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "btqca.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* HCI_IBS protocol messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define HCI_IBS_SLEEP_IND 0xFE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define HCI_IBS_WAKE_IND 0xFD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define HCI_IBS_WAKE_ACK 0xFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define HCI_MAX_IBS_SIZE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CMD_TRANS_TIMEOUT_MS 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MEMDUMP_TIMEOUT_MS 8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* susclk rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SUSCLK_RATE_32KHZ 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Controller debug log header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define QCA_DEBUG_HANDLE 0x2EDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* max retry count when init fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define MAX_INIT_RETRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Controller dump header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define QCA_SSR_DUMP_HANDLE 0x0108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define QCA_DUMP_PACKET_SIZE 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define QCA_LAST_SEQUENCE_NUM 0xFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define QCA_CRASHBYTE_PACKET_LEN 1096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define QCA_MEMDUMP_BYTE 0xFB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) enum qca_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) QCA_IBS_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) QCA_DROP_VENDOR_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) QCA_SUSPENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) QCA_MEMDUMP_COLLECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) QCA_HW_ERROR_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) QCA_SSR_TRIGGERED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum qca_capabilities {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) QCA_CAP_WIDEBAND_SPEECH = BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) QCA_CAP_VALID_LE_STATES = BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* HCI_IBS transmit side sleep protocol states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) enum tx_ibs_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) HCI_IBS_TX_ASLEEP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) HCI_IBS_TX_WAKING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) HCI_IBS_TX_AWAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* HCI_IBS receive side sleep protocol states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) enum rx_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) HCI_IBS_RX_ASLEEP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) HCI_IBS_RX_AWAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* HCI_IBS transmit and receive side clock state vote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) enum hci_ibs_clock_state_vote {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) HCI_IBS_VOTE_STATS_UPDATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) HCI_IBS_TX_VOTE_CLOCK_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) HCI_IBS_TX_VOTE_CLOCK_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) HCI_IBS_RX_VOTE_CLOCK_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) HCI_IBS_RX_VOTE_CLOCK_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Controller memory dump states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) enum qca_memdump_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) QCA_MEMDUMP_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) QCA_MEMDUMP_COLLECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) QCA_MEMDUMP_COLLECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) QCA_MEMDUMP_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct qca_memdump_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) char *memdump_buf_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) char *memdump_buf_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 current_seq_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u32 received_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 ram_dump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct qca_memdump_event_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __u8 evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __u8 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __u16 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) __u16 seq_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __u8 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct qca_dump_size {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 dump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct qca_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct hci_uart *hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct sk_buff *rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct sk_buff_head txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u8 rx_ibs_state; /* HCI_IBS receive side power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool tx_vote; /* Clock must be on for TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bool rx_vote; /* Clock must be on for RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct timer_list tx_idle_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 tx_idle_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct timer_list wake_retrans_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 wake_retrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct workqueue_struct *workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct work_struct ws_awake_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct work_struct ws_awake_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct work_struct ws_rx_vote_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct work_struct ws_tx_vote_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct work_struct ctrl_memdump_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct delayed_work ctrl_memdump_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct qca_memdump_data *qca_memdump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct completion drop_ev_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) wait_queue_head_t suspend_wait_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) enum qca_memdump_states memdump_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct mutex hci_memdump_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* For debugging purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u64 ibs_sent_wacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u64 ibs_sent_slps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u64 ibs_sent_wakes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 ibs_recv_wacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 ibs_recv_slps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u64 ibs_recv_wakes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u64 vote_last_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 vote_on_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 vote_off_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u64 tx_votes_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u64 rx_votes_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u64 tx_votes_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u64 rx_votes_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u64 votes_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u64 votes_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) enum qca_speed_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) QCA_INIT_SPEED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) QCA_OPER_SPEED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Voltage regulator information required for configuring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * QCA Bluetooth chipset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct qca_vreg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int load_uA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct qca_device_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) enum qca_btsoc_type soc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct qca_vreg *vregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) size_t num_vregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) uint32_t capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Platform data for the QCA Bluetooth power driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct qca_power {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct regulator_bulk_data *vreg_bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int num_vregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bool vregs_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct qca_serdev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct hci_uart serdev_hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct gpio_desc *bt_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct clk *susclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) enum qca_btsoc_type btsoc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct qca_power *bt_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 init_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u32 oper_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) const char *firmware_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int qca_regulator_enable(struct qca_serdev *qcadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void qca_regulator_disable(struct qca_serdev *qcadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void qca_power_shutdown(struct hci_uart *hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int qca_power_off(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void qca_controller_memdump(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) enum qca_btsoc_type soc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) soc_type = qsd->btsoc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) soc_type = QCA_ROME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return soc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static const char *qca_get_firmware_name(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return qsd->firmware_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void __serial_clock_on(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* TODO: Some chipset requires to enable UART clock on client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * side to save power consumption or manual work is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Please put your code to control UART clock here if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void __serial_clock_off(struct tty_struct *tty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* TODO: Some chipset requires to disable UART clock on client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * side to save power consumption or manual work is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Please put your code to control UART clock off here if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* serial_clock_vote needs to be called with the ibs lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bool old_vote = (qca->tx_vote | qca->rx_vote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bool new_vote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) switch (vote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) case HCI_IBS_VOTE_STATS_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (old_vote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) qca->vote_off_ms += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) qca->vote_on_ms += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) case HCI_IBS_TX_VOTE_CLOCK_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) qca->tx_vote = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) qca->tx_votes_on++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case HCI_IBS_RX_VOTE_CLOCK_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) qca->rx_vote = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) qca->rx_votes_on++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case HCI_IBS_TX_VOTE_CLOCK_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) qca->tx_vote = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) qca->tx_votes_off++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) case HCI_IBS_RX_VOTE_CLOCK_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) qca->rx_vote = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) qca->rx_votes_off++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) BT_ERR("Voting irregularity");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) new_vote = qca->rx_vote | qca->tx_vote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (new_vote != old_vote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (new_vote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) __serial_clock_on(hu->tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) __serial_clock_off(hu->tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) vote ? "true" : "false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (new_vote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) qca->votes_on++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) qca->vote_off_ms += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) qca->votes_off++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) qca->vote_on_ms += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) qca->vote_last_jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Builds and sends an HCI_IBS command packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * These are very simple packets with only 1 cmd byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) skb = bt_skb_alloc(1, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) BT_ERR("Failed to allocate memory for HCI_IBS packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Assign HCI_IBS type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) skb_put_u8(skb, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) skb_queue_tail(&qca->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void qca_wq_awake_device(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct qca_data *qca = container_of(work, struct qca_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ws_awake_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned long retrans_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) BT_DBG("hu %p wq awake device", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Vote for serial clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Send wake indication to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) BT_ERR("Failed to send WAKE to device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) qca->ibs_sent_wakes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Start retransmit timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) retrans_delay = msecs_to_jiffies(qca->wake_retrans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Actually send the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static void qca_wq_awake_rx(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct qca_data *qca = container_of(work, struct qca_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ws_awake_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) BT_DBG("hu %p wq awake rx", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* Always acknowledge device wake up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * sending IBS message doesn't count as TX ON.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) BT_ERR("Failed to acknowledge device wake up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) qca->ibs_sent_wacks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Actually send the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct qca_data *qca = container_of(work, struct qca_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ws_rx_vote_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) BT_DBG("hu %p rx clock vote off", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct qca_data *qca = container_of(work, struct qca_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ws_tx_vote_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) BT_DBG("hu %p tx clock vote off", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Run HCI tx handling unlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Now that message queued to tty driver, vote for tty clocks off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * It is up to the tty driver to pend the clocks off until tx done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void hci_ibs_tx_idle_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock_irqsave_nested(&qca->hci_ibs_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) flags, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) switch (qca->tx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) case HCI_IBS_TX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* TX_IDLE, go to SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) BT_ERR("Failed to send SLEEP to device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) qca->ibs_sent_slps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) queue_work(qca->workqueue, &qca->ws_tx_vote_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case HCI_IBS_TX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) case HCI_IBS_TX_WAKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long flags, retrans_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) bool retransmit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) BT_DBG("hu %p wake retransmit timeout in %d state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) hu, qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_lock_irqsave_nested(&qca->hci_ibs_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) flags, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (test_bit(QCA_SUSPENDING, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) switch (qca->tx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case HCI_IBS_TX_WAKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* No WAKE_ACK, retransmit WAKE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) retransmit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) BT_ERR("Failed to acknowledge device wake up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) qca->ibs_sent_wakes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) retrans_delay = msecs_to_jiffies(qca->wake_retrans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case HCI_IBS_TX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case HCI_IBS_TX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (retransmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static void qca_controller_memdump_timeout(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct qca_data *qca = container_of(work, struct qca_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ctrl_memdump_timeout.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) mutex_lock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Inject hw error event to reset the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * and driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) hci_reset_dev(hu->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Initialize protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int qca_open(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct qca_serdev *qcadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct qca_data *qca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) BT_DBG("hu %p qca_open", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!hci_uart_has_flow_control(hu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!qca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) skb_queue_head_init(&qca->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) skb_queue_head_init(&qca->tx_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) skb_queue_head_init(&qca->rx_memdump_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) spin_lock_init(&qca->hci_ibs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) mutex_init(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!qca->workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) BT_ERR("QCA Workqueue not initialized properly");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) kfree(qca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) qca_controller_memdump_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) init_waitqueue_head(&qca->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) qca->hu = hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) init_completion(&qca->drop_ev_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Assume we start with both sides asleep -- extra wakes OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) qca->vote_last_jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) hu->priv = qca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) qcadev = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (qca_is_wcn399x(qcadev->btsoc_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) hu->init_speed = qcadev->init_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (qcadev->oper_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) hu->oper_speed = qcadev->oper_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) qca->tx_idle_delay, qca->wake_retrans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void qca_debugfs_init(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct dentry *ibs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!hdev->debugfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* read only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) mode = S_IRUGO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) &qca->ibs_sent_slps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &qca->ibs_sent_wakes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) &qca->ibs_sent_wacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) &qca->ibs_recv_slps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) &qca->ibs_recv_wakes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) &qca->ibs_recv_wacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* read/write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) mode = S_IRUGO | S_IWUSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) &qca->tx_idle_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Flush protocol data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int qca_flush(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) BT_DBG("hu %p qca flush", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) skb_queue_purge(&qca->tx_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) skb_queue_purge(&qca->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Close protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int qca_close(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) BT_DBG("hu %p qca close", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) skb_queue_purge(&qca->tx_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) skb_queue_purge(&qca->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) skb_queue_purge(&qca->rx_memdump_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) del_timer(&qca->tx_idle_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) del_timer(&qca->wake_retrans_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) destroy_workqueue(qca->workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) qca->hu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) kfree_skb(qca->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) hu->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) kfree(qca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* Called upon a wake-up-indication from the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static void device_want_to_wakeup(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) BT_DBG("hu %p want to wake up", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) qca->ibs_recv_wakes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Don't wake the rx up when suspending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (test_bit(QCA_SUSPENDING, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) switch (qca->rx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) case HCI_IBS_RX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Make sure clock is on - we may have turned clock off since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * receiving the wake up indicator awake rx clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) queue_work(qca->workqueue, &qca->ws_awake_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) case HCI_IBS_RX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Always acknowledge device wake up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * sending IBS message doesn't count as TX ON.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) BT_ERR("Failed to acknowledge device wake up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) qca->ibs_sent_wacks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Any other state is illegal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) qca->rx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* Actually send the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Called upon a sleep-indication from the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static void device_want_to_sleep(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) qca->ibs_recv_slps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) switch (qca->rx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) case HCI_IBS_RX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Update state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* Vote off rx clock under workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) queue_work(qca->workqueue, &qca->ws_rx_vote_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) case HCI_IBS_RX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Any other state is illegal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) qca->rx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) wake_up_interruptible(&qca->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Called upon wake-up-acknowledgement from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void device_woke_up(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned long flags, idle_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) BT_DBG("hu %p woke up", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) qca->ibs_recv_wacks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Don't react to the wake-up-acknowledgment when suspending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (test_bit(QCA_SUSPENDING, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) switch (qca->tx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) case HCI_IBS_TX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* Expect one if we send 2 WAKEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) case HCI_IBS_TX_WAKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* Send pending packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) while ((skb = skb_dequeue(&qca->tx_wait_q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) skb_queue_tail(&qca->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* Switch timers and change state to HCI_IBS_TX_AWAKE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) del_timer(&qca->wake_retrans_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case HCI_IBS_TX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* Actually send the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Enqueue frame for transmittion (padding, crc, etc) may be called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * two simultaneous tasklets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned long flags = 0, idle_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* As SSR is in progress, ignore the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) bt_dev_dbg(hu->hdev, "SSR is in progress");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Prepend skb with frame type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Don't go to sleep in middle of patch download or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Out-Of-Band(GPIOs control) sleep is selected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Don't wake the device up when suspending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) test_bit(QCA_SUSPENDING, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) skb_queue_tail(&qca->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* Act according to current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) switch (qca->tx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case HCI_IBS_TX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) BT_DBG("Device awake, sending normally");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) skb_queue_tail(&qca->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case HCI_IBS_TX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) BT_DBG("Device asleep, waking up and queueing packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Save packet for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) skb_queue_tail(&qca->tx_wait_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) qca->tx_ibs_state = HCI_IBS_TX_WAKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* Schedule a work queue to wake up device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) queue_work(qca->workqueue, &qca->ws_awake_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case HCI_IBS_TX_WAKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) BT_DBG("Device waking up, queueing packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Transient state; just keep packet for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) skb_queue_tail(&qca->tx_wait_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) BT_ERR("Illegal tx state: %d (losing packet)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) device_want_to_sleep(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) device_want_to_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) device_woke_up(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* We receive debug logs from chip as an ACL packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Instead of sending the data to ACL to decode the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * received data, we are pushing them to the above layers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * as a diagnostic packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return hci_recv_diag(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return hci_recv_frame(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static void qca_controller_memdump(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct qca_data *qca = container_of(work, struct qca_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ctrl_memdump_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct hci_uart *hu = qca->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct qca_memdump_event_hdr *cmd_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct qca_memdump_data *qca_memdump = qca->qca_memdump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct qca_dump_size *dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) char *memdump_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u16 seq_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) u32 dump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) u32 rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) enum qca_btsoc_type soc_type = qca_soc_type(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mutex_lock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Skip processing the received packets if timeout detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * or memdump collection completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (!qca_memdump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!qca_memdump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) qca->qca_memdump = qca_memdump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) qca->memdump_state = QCA_MEMDUMP_COLLECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) cmd_hdr = (void *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) seq_no = __le16_to_cpu(cmd_hdr->seq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!seq_no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* This is the first frame of memdump packet from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * the controller, Disable IBS to recevie dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * with out any interruption, ideally time required for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * the controller to send the dump is 8 seconds. let us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * start timer to handle this asynchronous activity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) clear_bit(QCA_IBS_ENABLED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) dump = (void *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dump_size = __le32_to_cpu(dump->dump_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!(dump_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) bt_dev_err(hu->hdev, "Rx invalid memdump size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) kfree(qca_memdump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) qca->qca_memdump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) dump_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) queue_delayed_work(qca->workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) &qca->ctrl_memdump_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) skb_pull(skb, sizeof(dump_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) memdump_buf = vmalloc(dump_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) qca_memdump->ram_dump_size = dump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) qca_memdump->memdump_buf_head = memdump_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) qca_memdump->memdump_buf_tail = memdump_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) memdump_buf = qca_memdump->memdump_buf_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* If sequence no 0 is missed then there is no point in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * accepting the other sequences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!memdump_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) bt_dev_err(hu->hdev, "QCA: Discarding other packets");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) kfree(qca_memdump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) qca->qca_memdump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* There could be chance of missing some packets from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * the controller. In such cases let us store the dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * packets in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* For QCA6390, controller does not lost packets but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * sequence number field of packat sometimes has error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * bits, so skip this checking for missing packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) while ((seq_no > qca_memdump->current_seq_no + 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) (soc_type != QCA_QCA6390) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) seq_no != QCA_LAST_SEQUENCE_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) qca_memdump->current_seq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) rx_size = qca_memdump->received_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) rx_size += QCA_DUMP_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (rx_size > qca_memdump->ram_dump_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) bt_dev_err(hu->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) "QCA memdump received %d, no space for missed packet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) qca_memdump->received_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) qca_memdump->current_seq_no++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rx_size = qca_memdump->received_dump + skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (rx_size <= qca_memdump->ram_dump_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) (seq_no != qca_memdump->current_seq_no))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) bt_dev_err(hu->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) "QCA memdump unexpected packet %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) seq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) bt_dev_dbg(hu->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) "QCA memdump packet %d with length %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) seq_no, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) memcpy(memdump_buf, (unsigned char *)skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) memdump_buf = memdump_buf + skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) qca_memdump->memdump_buf_tail = memdump_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) qca_memdump->current_seq_no = seq_no + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) qca_memdump->received_dump += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) bt_dev_err(hu->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) "QCA memdump received %d, no space for packet %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) qca_memdump->received_dump, seq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) qca->qca_memdump = qca_memdump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (seq_no == QCA_LAST_SEQUENCE_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) bt_dev_info(hu->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) "QCA memdump Done, received %d, total %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) qca_memdump->received_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) qca_memdump->ram_dump_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) memdump_buf = qca_memdump->memdump_buf_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dev_coredumpv(&hu->serdev->dev, memdump_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) qca_memdump->received_dump, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) cancel_delayed_work(&qca->ctrl_memdump_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) kfree(qca->qca_memdump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) qca->qca_memdump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) qca->memdump_state = QCA_MEMDUMP_COLLECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int qca_controller_memdump_event(struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) set_bit(QCA_SSR_TRIGGERED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) skb_queue_tail(&qca->rx_memdump_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct hci_event_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* For the WCN3990 the vendor command for a baudrate change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * isn't sent as synchronous HCI command, because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * controller sends the corresponding vendor event with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * new baudrate. The event is received and properly decoded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * after changing the baudrate of the host port. It needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * be dropped, otherwise it can be misinterpreted as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * response to a later firmware download command (also a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * vendor command).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (hdr->evt == HCI_EV_VENDOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) complete(&qca->drop_ev_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* We receive chip memory dump as an event packet, With a dedicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * handler followed by a hardware error event. When this event is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * received we store dump into a file before closing hci. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * dump will help in triaging the issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if ((skb->data[0] == HCI_VENDOR_PKT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return qca_controller_memdump_event(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return hci_recv_frame(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) #define QCA_IBS_SLEEP_IND_EVENT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) .type = HCI_IBS_SLEEP_IND, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) .loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .maxlen = HCI_MAX_IBS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) #define QCA_IBS_WAKE_IND_EVENT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) .type = HCI_IBS_WAKE_IND, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) .hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) .loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) .lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .maxlen = HCI_MAX_IBS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) #define QCA_IBS_WAKE_ACK_EVENT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) .type = HCI_IBS_WAKE_ACK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) .hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) .loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) .maxlen = HCI_MAX_IBS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static const struct h4_recv_pkt qca_recv_pkts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) { H4_RECV_ACL, .recv = qca_recv_acl_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) { H4_RECV_SCO, .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) { H4_RECV_EVENT, .recv = qca_recv_event },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int qca_recv(struct hci_uart *hu, const void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -EUNATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (IS_ERR(qca->rx_skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int err = PTR_ERR(qca->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) qca->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static struct sk_buff *qca_dequeue(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return skb_dequeue(&qca->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static uint8_t qca_get_baudrate_value(int speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) case 9600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return QCA_BAUDRATE_9600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) case 19200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return QCA_BAUDRATE_19200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) case 38400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return QCA_BAUDRATE_38400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) case 57600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return QCA_BAUDRATE_57600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) case 115200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return QCA_BAUDRATE_115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) case 230400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return QCA_BAUDRATE_230400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case 460800:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return QCA_BAUDRATE_460800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case 500000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return QCA_BAUDRATE_500000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) case 921600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return QCA_BAUDRATE_921600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) case 1000000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return QCA_BAUDRATE_1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) case 2000000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return QCA_BAUDRATE_2000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) case 3000000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return QCA_BAUDRATE_3000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) case 3200000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return QCA_BAUDRATE_3200000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) case 3500000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return QCA_BAUDRATE_3500000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return QCA_BAUDRATE_115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (baudrate > QCA_BAUDRATE_3200000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) cmd[4] = baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) bt_dev_err(hdev, "Failed to allocate baudrate packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* Assign commands to change baudrate and packet type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) skb_put_data(skb, cmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) skb_queue_tail(&qca->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* Wait for the baudrate change request to be sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) while (!skb_queue_empty(&qca->txq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) usleep_range(100, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (hu->serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) serdev_device_wait_until_sent(hu->serdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Give the controller time to process the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (qca_is_wcn399x(qca_soc_type(hu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) msleep(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (hu->serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) serdev_device_set_baudrate(hu->serdev, speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) hci_uart_set_baudrate(hu, speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static int qca_send_power_pulse(struct hci_uart *hu, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* These power pulses are single byte command which are sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * at required baudrate to wcn3990. On wcn3990, we have an external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * circuit at Tx pin which decodes the pulse sent at specific baudrate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * and also we use the same power inputs to turn on and off for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * Wi-Fi/BT. Powering up the power sources will not enable BT, until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * we send a power on pulse at 115200 bps. This algorithm will help to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * save power. Disabling hardware flow control is mandatory while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * sending power pulses to SoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) serdev_device_write_flush(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) hci_uart_set_flow_control(hu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) serdev_device_wait_until_sent(hu->serdev, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) hci_uart_set_flow_control(hu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /* Give to controller time to boot/shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static unsigned int qca_get_speed(struct hci_uart *hu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) enum qca_speed_type speed_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) unsigned int speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (speed_type == QCA_INIT_SPEED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (hu->init_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) speed = hu->init_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) else if (hu->proto->init_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) speed = hu->proto->init_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (hu->oper_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) speed = hu->oper_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) else if (hu->proto->oper_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) speed = hu->proto->oper_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int qca_check_speeds(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (qca_is_wcn399x(qca_soc_type(hu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) !qca_get_speed(hu, QCA_OPER_SPEED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) !qca_get_speed(hu, QCA_OPER_SPEED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) unsigned int speed, qca_baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (speed_type == QCA_INIT_SPEED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) speed = qca_get_speed(hu, QCA_INIT_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) host_set_baudrate(hu, speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) enum qca_btsoc_type soc_type = qca_soc_type(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) speed = qca_get_speed(hu, QCA_OPER_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (!speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* Disable flow control for wcn3990 to deassert RTS while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * changing the baudrate of chip and host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (qca_is_wcn399x(soc_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) hci_uart_set_flow_control(hu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (soc_type == QCA_WCN3990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) reinit_completion(&qca->drop_ev_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) qca_baudrate = qca_get_baudrate_value(speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ret = qca_set_baudrate(hu->hdev, qca_baudrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) host_set_baudrate(hu, speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (qca_is_wcn399x(soc_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) hci_uart_set_flow_control(hu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (soc_type == QCA_WCN3990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* Wait for the controller to send the vendor event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * for the baudrate change command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (!wait_for_completion_timeout(&qca->drop_ev_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) msecs_to_jiffies(100))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) bt_dev_err(hu->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) "Failed to change controller baudrate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static int qca_send_crashbuffer(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* We forcefully crash the controller, by sending 0xfb byte for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * 1024 times. We also might have chance of losing data, To be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * on safer side we send 1096 bytes to the SoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) QCA_CRASHBYTE_PACKET_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) skb_queue_tail(&qca->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static void qca_wait_for_dump_collection(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static void qca_hw_error(struct hci_dev *hdev, u8 code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) set_bit(QCA_SSR_TRIGGERED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /* If hardware error event received for other than QCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * soc memory dump event, then we need to crash the SOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * and wait here for 8 seconds to get the dump packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * This will block main thread to be on hold until we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * collect dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) qca_send_crashbuffer(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) qca_wait_for_dump_collection(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /* Let us wait here until memory dump collected or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * memory dump timer expired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) bt_dev_info(hdev, "waiting for dump to complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) qca_wait_for_dump_collection(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) mutex_lock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (qca->qca_memdump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) vfree(qca->qca_memdump->memdump_buf_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) kfree(qca->qca_memdump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) qca->qca_memdump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) cancel_delayed_work(&qca->ctrl_memdump_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) cancel_work_sync(&qca->ctrl_memdump_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) skb_queue_purge(&qca->rx_memdump_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static void qca_cmd_timeout(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) set_bit(QCA_SSR_TRIGGERED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) qca_send_crashbuffer(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) qca_wait_for_dump_collection(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /* Let us wait here until memory dump collected or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * memory dump timer expired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) bt_dev_info(hdev, "waiting for dump to complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) qca_wait_for_dump_collection(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) mutex_lock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* Inject hw error event to reset the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * and driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) hci_reset_dev(hu->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) mutex_unlock(&qca->hci_memdump_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static int qca_wcn3990_init(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct qca_serdev *qcadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* Check for vregs status, may be hci down has turned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * off the voltage regulator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) qcadev = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (!qcadev->bt_power->vregs_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) serdev_device_close(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ret = qca_regulator_enable(qcadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ret = serdev_device_open(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) bt_dev_err(hu->hdev, "failed to open port");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* Forcefully enable wcn3990 to enter in to boot mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) host_set_baudrate(hu, 2400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) ret = qca_send_power_pulse(hu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) qca_set_speed(hu, QCA_INIT_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) ret = qca_send_power_pulse(hu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* Now the device is in ready state to communicate with host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * To sync host with device we need to reopen port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * Without this, we will have RTS and CTS synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) serdev_device_close(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ret = serdev_device_open(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) bt_dev_err(hu->hdev, "failed to open port");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) hci_uart_set_flow_control(hu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static int qca_power_on(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) enum qca_btsoc_type soc_type = qca_soc_type(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) struct qca_serdev *qcadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /* Non-serdev device usually is powered by external power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * and don't need additional action in driver for power on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (!hu->serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (qca_is_wcn399x(soc_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ret = qca_wcn3990_init(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) qcadev = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (qcadev->bt_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) gpiod_set_value_cansleep(qcadev->bt_en, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* Controller needs time to bootup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) msleep(150);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static int qca_setup(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) struct hci_dev *hdev = hu->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) unsigned int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) enum qca_btsoc_type soc_type = qca_soc_type(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) const char *firmware_name = qca_get_firmware_name(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) int soc_ver = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ret = qca_check_speeds(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /* Patch downloading has to be done without IBS mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) clear_bit(QCA_IBS_ENABLED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /* Enable controller to do both LE scan and BR/EDR inquiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) bt_dev_info(hdev, "setting up %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) qca->memdump_state = QCA_MEMDUMP_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ret = qca_power_on(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (qca_is_wcn399x(soc_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) qca_set_speed(hu, QCA_INIT_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Setup user speed if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) speed = qca_get_speed(hu, QCA_OPER_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ret = qca_set_speed(hu, QCA_OPER_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) qca_baudrate = qca_get_baudrate_value(speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!qca_is_wcn399x(soc_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /* Get QCA version information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /* Setup patch / NVM configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) firmware_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) set_bit(QCA_IBS_ENABLED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) qca_debugfs_init(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) hu->hdev->hw_error = qca_hw_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) hu->hdev->cmd_timeout = qca_cmd_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) } else if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /* No patch/nvm-config found, run with original fw/config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) } else if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * Userspace firmware loader will return -EAGAIN in case no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * patch/nvm-config is found, so run with original fw/config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (retries < MAX_INIT_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) qca_power_shutdown(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) serdev_device_close(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) ret = serdev_device_open(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) bt_dev_err(hdev, "failed to open port");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) /* Setup bdaddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (soc_type == QCA_ROME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) hu->hdev->set_bdaddr = qca_set_bdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static const struct hci_uart_proto qca_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) .id = HCI_UART_QCA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) .name = "QCA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) .manufacturer = 29,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) .init_speed = 115200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) .oper_speed = 3000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) .open = qca_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) .close = qca_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) .flush = qca_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) .setup = qca_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) .recv = qca_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) .enqueue = qca_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) .dequeue = qca_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static const struct qca_device_data qca_soc_data_wcn3990 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) .soc_type = QCA_WCN3990,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) .vregs = (struct qca_vreg []) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) { "vddio", 15000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) { "vddxo", 80000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) { "vddrf", 300000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) { "vddch0", 450000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) .num_vregs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static const struct qca_device_data qca_soc_data_wcn3991 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) .soc_type = QCA_WCN3991,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) .vregs = (struct qca_vreg []) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) { "vddio", 15000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) { "vddxo", 80000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) { "vddrf", 300000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) { "vddch0", 450000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) .num_vregs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static const struct qca_device_data qca_soc_data_wcn3998 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) .soc_type = QCA_WCN3998,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) .vregs = (struct qca_vreg []) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) { "vddio", 10000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) { "vddxo", 80000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) { "vddrf", 300000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) { "vddch0", 450000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) .num_vregs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) static const struct qca_device_data qca_soc_data_qca6390 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) .soc_type = QCA_QCA6390,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) .num_vregs = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) static void qca_power_shutdown(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) struct qca_serdev *qcadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) enum qca_btsoc_type soc_type = qca_soc_type(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /* From this point we go into power off state. But serial port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * still open, stop queueing the IBS data and flush all the buffered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * data in skb's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) spin_lock_irqsave(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) clear_bit(QCA_IBS_ENABLED, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) qca_flush(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* Non-serdev device usually is powered by external power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * and don't need additional action in driver for power down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (!hu->serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) qcadev = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (qca_is_wcn399x(soc_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) host_set_baudrate(hu, 2400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) qca_send_power_pulse(hu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) qca_regulator_disable(qcadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) } else if (qcadev->bt_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) gpiod_set_value_cansleep(qcadev->bt_en, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) static int qca_power_off(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) enum qca_btsoc_type soc_type = qca_soc_type(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) hu->hdev->hw_error = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) hu->hdev->cmd_timeout = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) del_timer_sync(&qca->wake_retrans_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) del_timer_sync(&qca->tx_idle_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /* Stop sending shutdown command if soc crashes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (soc_type != QCA_ROME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) && qca->memdump_state == QCA_MEMDUMP_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) qca_send_pre_shutdown_cmd(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) usleep_range(8000, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) qca_power_shutdown(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static int qca_regulator_enable(struct qca_serdev *qcadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct qca_power *power = qcadev->bt_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /* Already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (power->vregs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) BT_DBG("enabling %d regulators)", power->num_vregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) power->vregs_on = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) ret = clk_prepare_enable(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) qca_regulator_disable(qcadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) static void qca_regulator_disable(struct qca_serdev *qcadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct qca_power *power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (!qcadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) power = qcadev->bt_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* Already disabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (!power->vregs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) power->vregs_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) clk_disable_unprepare(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static int qca_init_regulators(struct qca_power *qca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) const struct qca_vreg *vregs, size_t num_vregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct regulator_bulk_data *bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (!bulk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) for (i = 0; i < num_vregs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) bulk[i].supply = vregs[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) for (i = 0; i < num_vregs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) qca->vreg_bulk = bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) qca->num_vregs = num_vregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static int qca_serdev_probe(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct qca_serdev *qcadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) const struct qca_device_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) bool power_ctrl_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (!qcadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) qcadev->serdev_hu.serdev = serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) data = device_get_match_data(&serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) serdev_device_set_drvdata(serdev, qcadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) device_property_read_string(&serdev->dev, "firmware-name",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) &qcadev->firmware_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) device_property_read_u32(&serdev->dev, "max-speed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) &qcadev->oper_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (!qcadev->oper_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) BT_DBG("UART will pick default operating speed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (data && qca_is_wcn399x(data->soc_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) qcadev->btsoc_type = data->soc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) qcadev->bt_power = devm_kzalloc(&serdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) sizeof(struct qca_power),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (!qcadev->bt_power)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) qcadev->bt_power->dev = &serdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) err = qca_init_regulators(qcadev->bt_power, data->vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) data->num_vregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) BT_ERR("Failed to init regulators:%d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) qcadev->bt_power->vregs_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (IS_ERR(qcadev->susclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) dev_err(&serdev->dev, "failed to acquire clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return PTR_ERR(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) BT_ERR("wcn3990 serdev registration failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) qcadev->btsoc_type = data->soc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) qcadev->btsoc_type = QCA_ROME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (IS_ERR_OR_NULL(qcadev->bt_en)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) power_ctrl_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (IS_ERR(qcadev->susclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dev_warn(&serdev->dev, "failed to acquire clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return PTR_ERR(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) err = clk_prepare_enable(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) BT_ERR("Rome serdev registration failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) clk_disable_unprepare(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) hdev = qcadev->serdev_hu.hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (power_ctrl_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) hdev->shutdown = qca_power_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) /* Wideband speech support must be set per driver since it can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * be queried via hci. Same with the valid le states quirk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (data->capabilities & QCA_CAP_VALID_LE_STATES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) static void qca_serdev_remove(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) struct qca_power *power = qcadev->bt_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) qca_power_shutdown(&qcadev->serdev_hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) else if (qcadev->susclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) clk_disable_unprepare(qcadev->susclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) hci_uart_unregister_device(&qcadev->serdev_hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static void qca_serdev_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) struct serdev_device *serdev = to_serdev_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) const u8 ibs_wake_cmd[] = { 0xFD };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (qcadev->btsoc_type == QCA_QCA6390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) serdev_device_write_flush(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) sizeof(ibs_wake_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) serdev_device_wait_until_sent(serdev, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) usleep_range(8000, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) serdev_device_write_flush(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) sizeof(edl_reset_soc_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) serdev_device_wait_until_sent(serdev, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) usleep_range(8000, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) static int __maybe_unused qca_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) struct serdev_device *serdev = to_serdev_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct hci_uart *hu = &qcadev->serdev_hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) bool tx_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) u8 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) set_bit(QCA_SUSPENDING, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /* Device is downloading patch or doesn't support in-band sleep. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) cancel_work_sync(&qca->ws_awake_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) cancel_work_sync(&qca->ws_awake_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) spin_lock_irqsave_nested(&qca->hci_ibs_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) flags, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) switch (qca->tx_ibs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case HCI_IBS_TX_WAKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) del_timer(&qca->wake_retrans_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) case HCI_IBS_TX_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) del_timer(&qca->tx_idle_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) serdev_device_write_flush(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) cmd = HCI_IBS_SLEEP_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) BT_ERR("Failed to send SLEEP to device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) qca->ibs_sent_slps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) tx_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) case HCI_IBS_TX_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (tx_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) serdev_device_wait_until_sent(hu->serdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * to sleep, so that the packet does not wake the system later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) clear_bit(QCA_SUSPENDING, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) static int __maybe_unused qca_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) struct serdev_device *serdev = to_serdev_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct hci_uart *hu = &qcadev->serdev_hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) struct qca_data *qca = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) clear_bit(QCA_SUSPENDING, &qca->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) static const struct of_device_id qca_bluetooth_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) { .compatible = "qcom,qca6174-bt" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) { .compatible = "qcom,qca9377-bt" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static struct serdev_device_driver qca_serdev_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) .probe = qca_serdev_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) .remove = qca_serdev_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) .name = "hci_uart_qca",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) .of_match_table = of_match_ptr(qca_bluetooth_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) .shutdown = qca_serdev_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) .pm = &qca_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) int __init qca_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) serdev_device_driver_register(&qca_serdev_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) return hci_uart_register_proto(&qca_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) int __exit qca_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) serdev_device_driver_unregister(&qca_serdev_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return hci_uart_unregister_proto(&qca_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }