^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) BlueZ - Bluetooth protocol stack for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Copyright (C) 2000-2001 Qualcomm Incorporated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) it under the terms of the GNU General Public License version 2 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) published by the Free Software Foundation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) SOFTWARE IS DISCLAIMED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Bluetooth HCI sockets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/bluetooth/hci_mon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <net/bluetooth/mgmt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "mgmt_util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static LIST_HEAD(mgmt_chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static DEFINE_MUTEX(mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_IDA(sock_cookie_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static atomic_t monitor_promisc = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* ----- HCI socket interface ----- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Socket info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define hci_pi(sk) ((struct hci_pinfo *) sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct hci_pinfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct bt_sock bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct hci_filter filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __u8 cmsg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned short channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __u32 cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) char comm[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct hci_dev *hdev = hci_pi(sk)->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return ERR_PTR(-EBADFD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return ERR_PTR(-EPIPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void hci_sock_set_flag(struct sock *sk, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) set_bit(nr, &hci_pi(sk)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void hci_sock_clear_flag(struct sock *sk, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) clear_bit(nr, &hci_pi(sk)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int hci_sock_test_flag(struct sock *sk, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return test_bit(nr, &hci_pi(sk)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned short hci_sock_get_channel(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return hci_pi(sk)->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 hci_sock_get_cookie(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return hci_pi(sk)->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static bool hci_sock_gen_cookie(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int id = hci_pi(sk)->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) id = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) hci_pi(sk)->cookie = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) get_task_comm(hci_pi(sk)->comm, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void hci_sock_free_cookie(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int id = hci_pi(sk)->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) hci_pi(sk)->cookie = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ida_simple_remove(&sock_cookie_ida, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline int hci_test_bit(int nr, const void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Security filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define HCI_SFLT_MAX_OGF 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct hci_sec_filter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __u32 type_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __u32 event_mask[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static const struct hci_sec_filter hci_sec_filter = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Packet types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) { 0x1000d9fe, 0x0000b00c },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) { 0x0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* OGF_LINK_CTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* OGF_LINK_POLICY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) { 0x00005200, 0x00000000, 0x00000000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* OGF_HOST_CTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* OGF_INFO_PARAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) { 0x000002be, 0x00000000, 0x00000000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* OGF_STATUS_PARAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct bt_sock_list hci_sk_list = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct hci_filter *flt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int flt_type, flt_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Apply filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) flt = &hci_pi(sk)->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!test_bit(flt_type, &flt->type_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Extra filter for event packets only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!hci_test_bit(flt_event, &flt->event_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Check filter only when opcode is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!flt->opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (flt_event == HCI_EV_CMD_COMPLETE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (flt_event == HCI_EV_CMD_STATUS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Send frame to RAW socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct sk_buff *skb_copy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) BT_DBG("hdev %p len %d", hdev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) read_lock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) sk_for_each(sk, &hci_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Don't send frame to the socket it came from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (skb->sk == sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (is_filtered_packet(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!bt_cb(skb)->incoming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Don't send frame to other channel types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!skb_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Create a private copy with headroom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!skb_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Put type byte before the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) nskb = skb_clone(skb_copy, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (sock_queue_rcv_skb(sk, nskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) read_unlock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) kfree_skb(skb_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Send frame to sockets with specific channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int flag, struct sock *skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) BT_DBG("channel %u len %d", channel, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) sk_for_each(sk, &hci_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Ignore socket without the flag set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!hci_sock_test_flag(sk, flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Skip the original socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (sk == skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (sk->sk_state != BT_BOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (hci_pi(sk)->channel != channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) nskb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (sock_queue_rcv_skb(sk, nskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int flag, struct sock *skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) read_lock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __hci_send_to_channel(channel, skb, flag, skip_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) read_unlock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Send frame to monitor socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct sk_buff *skb_copy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) __le16 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!atomic_read(&monitor_promisc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) BT_DBG("hdev %p len %d", hdev, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) switch (hci_skb_pkt_type(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case HCI_COMMAND_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case HCI_EVENT_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) case HCI_ACLDATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (bt_cb(skb)->incoming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) case HCI_SCODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (bt_cb(skb)->incoming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case HCI_ISODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (bt_cb(skb)->incoming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) case HCI_DIAG_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Create a private copy with headroom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!skb_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Put header before the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) hdr->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) hdr->index = cpu_to_le16(hdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) hdr->len = cpu_to_le16(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kfree_skb(skb_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) void *data, u16 data_len, ktime_t tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int flag, struct sock *skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) __le16 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) index = cpu_to_le16(hdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) index = cpu_to_le16(MGMT_INDEX_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) read_lock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) sk_for_each(sk, &hci_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Ignore socket without the flag set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!hci_sock_test_flag(sk, flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Skip the original socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (sk == skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) put_unaligned_le16(event, skb_put(skb, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) skb_put_data(skb, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) skb->tstamp = tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) hdr = skb_push(skb, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) hdr->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) read_unlock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct hci_mon_new_index *ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct hci_mon_index_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) __le16 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) case HCI_DEV_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ni->type = hdev->dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ni->bus = hdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bacpy(&ni->bdaddr, &hdev->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) memcpy(ni->name, hdev->name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case HCI_DEV_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) skb = bt_skb_alloc(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case HCI_DEV_SETUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (hdev->manufacturer == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case HCI_DEV_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) bacpy(&ii->bdaddr, &hdev->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ii->manufacturer = cpu_to_le16(hdev->manufacturer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) case HCI_DEV_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) skb = bt_skb_alloc(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case HCI_DEV_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) skb = bt_skb_alloc(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) hdr = skb_push(skb, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) hdr->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) hdr->index = cpu_to_le16(hdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u16 format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u8 ver[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* No message needed when cookie is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!hci_pi(sk)->cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) switch (hci_pi(sk)->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case HCI_CHANNEL_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) format = 0x0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ver[0] = BT_SUBSYS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case HCI_CHANNEL_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) format = 0x0001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ver[0] = BT_SUBSYS_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case HCI_CHANNEL_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) format = 0x0002;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mgmt_fill_version_info(ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* No message for unsupported format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) put_unaligned_le16(format, skb_put(skb, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) skb_put_data(skb, ver, sizeof(ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) put_unaligned_le32(flags, skb_put(skb, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) skb_put_u8(skb, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) hdr = skb_push(skb, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (hci_pi(sk)->hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) hdr->index = cpu_to_le16(HCI_DEV_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* No message needed when cookie is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!hci_pi(sk)->cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) switch (hci_pi(sk)->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) case HCI_CHANNEL_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) case HCI_CHANNEL_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) case HCI_CHANNEL_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* No message for unsupported format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) skb = bt_skb_alloc(4, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) hdr = skb_push(skb, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (hci_pi(sk)->hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) hdr->index = cpu_to_le16(HCI_DEV_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u16 opcode, u16 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) put_unaligned_le16(opcode, skb_put(skb, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) skb_put_data(skb, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) hdr = skb_push(skb, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) hdr->index = cpu_to_le16(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static void __printf(2, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) send_monitor_note(struct sock *sk, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) len = vsnprintf(NULL, 0, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) vsprintf(skb_put(skb, len), fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *(u8 *)skb_put(skb, 1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) hdr->index = cpu_to_le16(HCI_DEV_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (sock_queue_rcv_skb(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void send_monitor_replay(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) read_lock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) list_for_each_entry(hdev, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) skb = create_monitor_event(hdev, HCI_DEV_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (sock_queue_rcv_skb(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!test_bit(HCI_RUNNING, &hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) skb = create_monitor_event(hdev, HCI_DEV_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (sock_queue_rcv_skb(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (test_bit(HCI_UP, &hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) skb = create_monitor_event(hdev, HCI_DEV_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) else if (hci_dev_test_flag(hdev, HCI_SETUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) skb = create_monitor_event(hdev, HCI_DEV_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (sock_queue_rcv_skb(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static void send_monitor_control_replay(struct sock *mon_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) read_lock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) sk_for_each(sk, &hci_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) skb = create_monitor_ctrl_open(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (sock_queue_rcv_skb(mon_sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) read_unlock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Generate internal stack event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct hci_event_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct hci_ev_stack_internal *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) hdr->evt = HCI_EV_STACK_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) hdr->plen = sizeof(*ev) + dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ev = skb_put(skb, sizeof(*ev) + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ev->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) memcpy(ev->data, data, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) bt_cb(skb)->incoming = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) hci_send_to_sock(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) void hci_sock_dev_event(struct hci_dev *hdev, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) BT_DBG("hdev %s event %d", hdev->name, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (atomic_read(&monitor_promisc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) skb = create_monitor_event(hdev, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (event <= HCI_DEV_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct hci_ev_si_device ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Send event to sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ev.event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ev.dev_id = hdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (event == HCI_DEV_UNREG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Wake up sockets using this dead device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) read_lock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) sk_for_each(sk, &hci_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (hci_pi(sk)->hdev == hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) sk->sk_err = EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) read_unlock(&hci_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct hci_mgmt_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) list_for_each_entry(c, &mgmt_chan_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (c->channel == channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct hci_mgmt_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) mutex_lock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) c = __hci_mgmt_chan_find(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) mutex_unlock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (c->channel < HCI_CHANNEL_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mutex_lock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (__hci_mgmt_chan_find(c->channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mutex_unlock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) list_add_tail(&c->list, &mgmt_chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) mutex_unlock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) EXPORT_SYMBOL(hci_mgmt_chan_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) mutex_lock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) list_del(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) mutex_unlock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) EXPORT_SYMBOL(hci_mgmt_chan_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static int hci_sock_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) BT_DBG("sock %p sk %p", sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) switch (hci_pi(sk)->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) case HCI_CHANNEL_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) atomic_dec(&monitor_promisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case HCI_CHANNEL_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) case HCI_CHANNEL_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) case HCI_CHANNEL_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) skb = create_monitor_ctrl_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) hci_sock_free_cookie(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) bt_sock_unlink(&hci_sk_list, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) hdev = hci_pi(sk)->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* When releasing a user channel exclusive access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * call hci_dev_do_close directly instead of calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * hci_dev_close to ensure the exclusive access will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * be released and the controller brought back down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * The checking of HCI_AUTO_OFF is not needed in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * case since it will have been cleared already when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * opening the user channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) hci_dev_do_close(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) mgmt_index_added(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) atomic_dec(&hdev->promisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) skb_queue_purge(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bdaddr_t bdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) bdaddr_t bdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* Ioctls that require bound socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct hci_dev *hdev = hci_hdev_from_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (IS_ERR(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return PTR_ERR(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (hdev->dev_type != HCI_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) case HCISETRAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case HCIGETCONNINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return hci_get_conn_info(hdev, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case HCIGETAUTHINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return hci_get_auth_info(hdev, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) case HCIBLOCKADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return hci_sock_blacklist_add(hdev, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) case HCIUNBLOCKADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return hci_sock_blacklist_del(hdev, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) BT_DBG("cmd %x arg %lx", cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* When calling an ioctl on an unbound raw socket, then ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * that the monitor gets informed. Ensure that the resulting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * is only send once by checking if the cookie exists or not. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * socket cookie will be only ever generated once for the lifetime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * of a given socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (hci_sock_gen_cookie(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) skb = create_monitor_ctrl_open(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case HCIGETDEVLIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return hci_get_dev_list(argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) case HCIGETDEVINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return hci_get_dev_info(argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) case HCIGETCONNLIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return hci_get_conn_list(argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) case HCIDEVUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return hci_dev_open(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) case HCIDEVDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return hci_dev_close(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) case HCIDEVRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return hci_dev_reset(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) case HCIDEVRESTAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return hci_dev_reset_stat(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) case HCISETSCAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) case HCISETAUTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) case HCISETENCRYPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case HCISETPTYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) case HCISETLINKPOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) case HCISETLINKMODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) case HCISETACLMTU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case HCISETSCOMTU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return hci_dev_cmd(cmd, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) case HCIINQUIRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return hci_inquiry(argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) err = hci_sock_bound_ioctl(sk, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) case HCIDEVUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) case HCIDEVDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) case HCIDEVRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) case HCIDEVRESTAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return hci_sock_ioctl(sock, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct sockaddr_hci haddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct hci_dev *hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) int len, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) BT_DBG("sock %p sk %p", sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) memset(&haddr, 0, sizeof(haddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) len = min_t(unsigned int, sizeof(haddr), addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) memcpy(&haddr, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (haddr.hci_family != AF_BLUETOOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* Allow detaching from dead device and attaching to alive device, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * the caller wants to re-bind (instead of close) this socket in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) hdev = hci_pi(sk)->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) hci_pi(sk)->hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) sk->sk_state = BT_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (sk->sk_state == BT_BOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) err = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) switch (haddr.hci_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) case HCI_CHANNEL_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (hci_pi(sk)->hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) err = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (haddr.hci_dev != HCI_DEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) hdev = hci_dev_get(haddr.hci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) atomic_inc(&hdev->promisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) hci_pi(sk)->channel = haddr.hci_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!hci_sock_gen_cookie(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* In the case when a cookie has already been assigned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * then there has been already an ioctl issued against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * an unbound socket and with that triggerd an open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * notification. Send a close notification first to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * allow the state transition to bounded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) skb = create_monitor_ctrl_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) hci_pi(sk)->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) skb = create_monitor_ctrl_open(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) case HCI_CHANNEL_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (hci_pi(sk)->hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) err = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (haddr.hci_dev == HCI_DEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!capable(CAP_NET_ADMIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) hdev = hci_dev_get(haddr.hci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (test_bit(HCI_INIT, &hdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) hci_dev_test_flag(hdev, HCI_SETUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) hci_dev_test_flag(hdev, HCI_CONFIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) test_bit(HCI_UP, &hdev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) err = -EUSERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) mgmt_index_removed(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) err = hci_dev_open(hdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (err == -EALREADY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* In case the transport is already up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * running, clear the error here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * This can happen when opening a user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * channel and HCI_AUTO_OFF grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * is still active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mgmt_index_added(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) hci_pi(sk)->channel = haddr.hci_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!hci_sock_gen_cookie(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* In the case when a cookie has already been assigned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * this socket will transition from a raw socket into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * a user channel socket. For a clean transition, send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * the close notification first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) skb = create_monitor_ctrl_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* The user channel is restricted to CAP_NET_ADMIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * capabilities and with that implicitly trusted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) hci_pi(sk)->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) skb = create_monitor_ctrl_open(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) atomic_inc(&hdev->promisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) case HCI_CHANNEL_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (haddr.hci_dev != HCI_DEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (!capable(CAP_NET_RAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) hci_pi(sk)->channel = haddr.hci_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* The monitor interface is restricted to CAP_NET_RAW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * capabilities and with that implicitly trusted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) send_monitor_note(sk, "Linux version %s (%s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) init_utsname()->release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) init_utsname()->machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) send_monitor_replay(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) send_monitor_control_replay(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) atomic_inc(&monitor_promisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) case HCI_CHANNEL_LOGGING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (haddr.hci_dev != HCI_DEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (!capable(CAP_NET_ADMIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) hci_pi(sk)->channel = haddr.hci_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!hci_mgmt_chan_find(haddr.hci_channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (haddr.hci_dev != HCI_DEV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* Users with CAP_NET_ADMIN capabilities are allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * access to all management commands and events. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * untrusted users the interface is restricted and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * also only untrusted events are sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) hci_pi(sk)->channel = haddr.hci_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* At the moment the index and unconfigured index events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * are enabled unconditionally. Setting them on each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * socket when binding keeps this functionality. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * however might be cleared later and then sending of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * events will be disabled, but that is then intentional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * This also enables generic events that are safe to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * received by untrusted users. Example for such events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * are changes to settings, class of device, name etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (!hci_sock_gen_cookie(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* In the case when a cookie has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * assigned, this socket will transtion from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * a raw socket into a control socket. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * allow for a clean transtion, send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * close notification first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) skb = create_monitor_ctrl_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) skb = create_monitor_ctrl_open(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) sk->sk_state = BT_BOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) BT_DBG("sock %p sk %p", sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) hdev = hci_hdev_from_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (IS_ERR(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) err = PTR_ERR(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) haddr->hci_family = AF_BLUETOOTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) haddr->hci_dev = hdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) haddr->hci_channel= hci_pi(sk)->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) err = sizeof(*haddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) __u8 mask = hci_pi(sk)->cmsg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (mask & HCI_CMSG_DIR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int incoming = bt_cb(skb)->incoming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) &incoming);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (mask & HCI_CMSG_TSTAMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct old_timeval32 ctv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) struct __kernel_old_timeval tv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) skb_get_timestamp(skb, &tv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) data = &tv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) len = sizeof(tv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (!COMPAT_USE_64BIT_TIME &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) (msg->msg_flags & MSG_CMSG_COMPAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ctv.tv_sec = tv.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) ctv.tv_usec = tv.tv_usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) data = &ctv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) len = sizeof(ctv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) size_t len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) int noblock = flags & MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) int copied, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) unsigned int skblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) BT_DBG("sock %p, sk %p", sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (sk->sk_state == BT_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) skb = skb_recv_datagram(sk, flags, noblock, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) skblen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) copied = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (len < copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) copied = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) err = skb_copy_datagram_msg(skb, 0, msg, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) switch (hci_pi(sk)->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) case HCI_CHANNEL_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) hci_sock_cmsg(sk, msg, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) case HCI_CHANNEL_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) case HCI_CHANNEL_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) sock_recv_timestamp(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (hci_mgmt_chan_find(hci_pi(sk)->channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) sock_recv_timestamp(msg, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) skb_free_datagram(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (flags & MSG_TRUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) copied = skblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return err ? : copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct msghdr *msg, size_t msglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) u8 *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct mgmt_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) u16 opcode, index, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct hci_dev *hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) const struct hci_mgmt_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) bool var_len, no_hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) BT_DBG("got %zu bytes", msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (msglen < sizeof(*hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) buf = kmalloc(msglen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (memcpy_from_msg(buf, msg, msglen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) hdr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) opcode = __le16_to_cpu(hdr->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) index = __le16_to_cpu(hdr->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) len = __le16_to_cpu(hdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (len != msglen - sizeof(*hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (chan->channel == HCI_CHANNEL_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* Send event to monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) skb = create_monitor_ctrl_command(sk, index, opcode, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) buf + sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (opcode >= chan->handler_count ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) chan->handlers[opcode].func == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) BT_DBG("Unknown op %u", opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) MGMT_STATUS_UNKNOWN_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) handler = &chan->handlers[opcode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) !(handler->flags & HCI_MGMT_UNTRUSTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) MGMT_STATUS_PERMISSION_DENIED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (index != MGMT_INDEX_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) hdev = hci_dev_get(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (hci_dev_test_flag(hdev, HCI_SETUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) hci_dev_test_flag(hdev, HCI_CONFIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (no_hdev != !hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) var_len = (handler->flags & HCI_MGMT_VAR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if ((var_len && len < handler->data_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) (!var_len && len != handler->data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) err = mgmt_cmd_status(sk, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (hdev && chan->hdev_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) chan->hdev_init(sk, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) cp = buf + sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) err = handler->func(sk, hdev, cp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) err = msglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct hci_mon_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) u16 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* The logging frame consists at minimum of the standard header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * the priority byte, the ident length byte and at least one string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * terminator NUL byte. Anything shorter are invalid packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (len < sizeof(*hdr) + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (__le16_to_cpu(hdr->opcode) == 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) __u8 priority = skb->data[sizeof(*hdr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) __u8 ident_len = skb->data[sizeof(*hdr) + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) /* Only the priorities 0-7 are valid and with that any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * value results in an invalid packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * The priority byte is followed by an ident length byte and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * the NUL terminated ident string. Check that the ident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * length is not overflowing the packet and also that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * ident string itself is NUL terminated. In case the ident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * length is zero, the length value actually doubles as NUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * terminator identifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * The message follows the ident string (if present) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * must be NUL terminated. Otherwise it is not a valid packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (priority > 7 || skb->data[len - 1] != 0x00 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) ident_len > len - sizeof(*hdr) - 3 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) index = __le16_to_cpu(hdr->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (index != MGMT_INDEX_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) hdev = hci_dev_get(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) err = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) struct hci_mgmt_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) BT_DBG("sock %p sk %p", sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (msg->msg_flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) MSG_CMSG_COMPAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (len < 4 || len > HCI_MAX_FRAME_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) switch (hci_pi(sk)->channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) case HCI_CHANNEL_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) case HCI_CHANNEL_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) case HCI_CHANNEL_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) case HCI_CHANNEL_LOGGING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) err = hci_logging_frame(sk, msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) mutex_lock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) err = hci_mgmt_cmd(chan, sk, msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) mutex_unlock(&mgmt_chan_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) hdev = hci_hdev_from_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (IS_ERR(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) err = PTR_ERR(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (!test_bit(HCI_UP, &hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) hci_skb_pkt_type(skb) = skb->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) skb_pull(skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) /* No permission check is needed for user channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * since that gets enforced when binding the socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * However check that the packet type is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) skb_queue_tail(&hdev->raw_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) queue_work(hdev->workqueue, &hdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) u16 opcode = get_unaligned_le16(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) u16 ogf = hci_opcode_ogf(opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) u16 ocf = hci_opcode_ocf(opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (((ogf > HCI_SFLT_MAX_OGF) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) &hci_sec_filter.ocf_mask[ogf])) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) !capable(CAP_NET_RAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /* Since the opcode has already been extracted here, store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * a copy of the value for later use by the drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) hci_skb_opcode(skb) = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (ogf == 0x3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) skb_queue_tail(&hdev->raw_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) queue_work(hdev->workqueue, &hdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* Stand-alone HCI commands must be flagged as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * single-command requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) skb_queue_tail(&hdev->cmd_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) queue_work(hdev->workqueue, &hdev->cmd_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (!capable(CAP_NET_RAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) skb_queue_tail(&hdev->raw_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) queue_work(hdev->workqueue, &hdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) err = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) sockptr_t optval, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct hci_ufilter uf = { .opcode = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) int err = 0, opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) BT_DBG("sk %p, opt %d", sk, optname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (level != SOL_HCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) case HCI_DATA_DIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) case HCI_TIME_STAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) case HCI_FILTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct hci_filter *f = &hci_pi(sk)->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) uf.type_mask = f->type_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) uf.opcode = f->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) uf.event_mask[0] = *((u32 *) f->event_mask + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) uf.event_mask[1] = *((u32 *) f->event_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) len = min_t(unsigned int, len, sizeof(uf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (copy_from_sockptr(&uf, optval, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (!capable(CAP_NET_RAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) uf.type_mask &= hci_sec_filter.type_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct hci_filter *f = &hci_pi(sk)->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) f->type_mask = uf.type_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) f->opcode = uf.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) *((u32 *) f->event_mask + 0) = uf.event_mask[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) *((u32 *) f->event_mask + 1) = uf.event_mask[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) err = -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) char __user *optval, int __user *optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct hci_ufilter uf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) int len, opt, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) BT_DBG("sk %p, opt %d", sk, optname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (level != SOL_HCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (get_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) case HCI_DATA_DIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) opt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (put_user(opt, optval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) case HCI_TIME_STAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) opt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) if (put_user(opt, optval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) case HCI_FILTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct hci_filter *f = &hci_pi(sk)->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) memset(&uf, 0, sizeof(uf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) uf.type_mask = f->type_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) uf.opcode = f->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) uf.event_mask[0] = *((u32 *) f->event_mask + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) uf.event_mask[1] = *((u32 *) f->event_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) len = min_t(unsigned int, len, sizeof(uf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (copy_to_user(optval, &uf, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) err = -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) static const struct proto_ops hci_sock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) .family = PF_BLUETOOTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) .release = hci_sock_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) .bind = hci_sock_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) .getname = hci_sock_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) .sendmsg = hci_sock_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) .recvmsg = hci_sock_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) .ioctl = hci_sock_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) .compat_ioctl = hci_sock_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) .poll = datagram_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) .listen = sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) .shutdown = sock_no_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) .setsockopt = hci_sock_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) .getsockopt = hci_sock_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) .connect = sock_no_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) .socketpair = sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) .accept = sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) .mmap = sock_no_mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static struct proto hci_sk_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) .name = "HCI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) .obj_size = sizeof(struct hci_pinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) BT_DBG("sock %p", sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (sock->type != SOCK_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) sock->ops = &hci_sock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) sock_reset_flag(sk, SOCK_ZAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) sk->sk_protocol = protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) sock->state = SS_UNCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) sk->sk_state = BT_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) bt_sock_link(&hci_sk_list, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static const struct net_proto_family hci_sock_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) .family = PF_BLUETOOTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) .create = hci_sock_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) int __init hci_sock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) err = proto_register(&hci_sk_proto, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) BT_ERR("HCI socket registration failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) BT_ERR("Failed to create HCI proc file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) bt_sock_unregister(BTPROTO_HCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) BT_INFO("HCI socket layer initialized");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) proto_unregister(&hci_sk_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) void hci_sock_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) bt_procfs_cleanup(&init_net, "hci");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) bt_sock_unregister(BTPROTO_HCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) proto_unregister(&hci_sk_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }