^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) BlueZ - Bluetooth protocol stack for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Copyright (C) 2014 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) it under the terms of the GNU General Public License version 2 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) published by the Free Software Foundation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) SOFTWARE IS DISCLAIMED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/bluetooth/mgmt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "smp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "hci_request.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define HCI_REQ_DONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define HCI_REQ_PEND 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define HCI_REQ_CANCELED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) skb_queue_head_init(&req->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) req->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) req->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void hci_req_purge(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) skb_queue_purge(&req->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool hci_req_status_pend(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return hdev->req_status == HCI_REQ_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static int req_run(struct hci_request *req, hci_req_complete_t complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) hci_req_complete_skb_t complete_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) BT_DBG("length %u", skb_queue_len(&req->cmd_q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* If an error occurred during request building, remove all HCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * commands queued on the HCI request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (req->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) skb_queue_purge(&req->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return req->err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Do not allow empty requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (skb_queue_empty(&req->cmd_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) skb = skb_peek_tail(&req->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bt_cb(skb)->hci.req_complete = complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } else if (complete_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bt_cb(skb)->hci.req_complete_skb = complete_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) spin_lock_irqsave(&hdev->cmd_q.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) queue_work(hdev->workqueue, &hdev->cmd_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return req_run(req, complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return req_run(req, NULL, complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) BT_DBG("%s result 0x%2.2x", hdev->name, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (hdev->req_status == HCI_REQ_PEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) hdev->req_result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) hdev->req_status = HCI_REQ_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) hdev->req_skb = skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) wake_up_interruptible(&hdev->req_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void hci_req_sync_cancel(struct hci_dev *hdev, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) BT_DBG("%s err 0x%2.2x", hdev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (hdev->req_status == HCI_REQ_PEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) hdev->req_result = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) hdev->req_status = HCI_REQ_CANCELED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) wake_up_interruptible(&hdev->req_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) const void *param, u8 event, u32 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) hci_req_add_ev(&req, opcode, plen, param, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) hdev->req_status = HCI_REQ_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) err = hci_req_run_skb(&req, hci_req_sync_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) err = wait_event_interruptible_timeout(hdev->req_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) hdev->req_status != HCI_REQ_PEND, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (err == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return ERR_PTR(-EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) switch (hdev->req_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) case HCI_REQ_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) err = -bt_to_errno(hdev->req_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) case HCI_REQ_CANCELED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) err = -hdev->req_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) hdev->req_status = hdev->req_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) skb = hdev->req_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) hdev->req_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) BT_DBG("%s end: err %d", hdev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return ERR_PTR(-ENODATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EXPORT_SYMBOL(__hci_cmd_sync_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) const void *param, u32 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) EXPORT_SYMBOL(__hci_cmd_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Execute request and wait for completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long opt, u32 timeout, u8 *hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) BT_DBG("%s start", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) hdev->req_status = HCI_REQ_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err = func(&req, opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *hci_status = HCI_ERROR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) err = hci_req_run_skb(&req, hci_req_sync_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) hdev->req_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* ENODATA means the HCI request command queue is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * This can happen when a request with conditionals doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * trigger any commands to be sent. This is normal behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * and should not trigger an error return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (err == -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *hci_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *hci_status = HCI_ERROR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) err = wait_event_interruptible_timeout(hdev->req_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) hdev->req_status != HCI_REQ_PEND, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (err == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) switch (hdev->req_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case HCI_REQ_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) err = -bt_to_errno(hdev->req_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *hci_status = hdev->req_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case HCI_REQ_CANCELED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) err = -hdev->req_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *hci_status = HCI_ERROR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *hci_status = HCI_ERROR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kfree_skb(hdev->req_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) hdev->req_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) hdev->req_status = hdev->req_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) BT_DBG("%s end: err %d", hdev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned long opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned long opt, u32 timeout, u8 *hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* Serialize all requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) hci_req_sync_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* check the state after obtaing the lock to protect the HCI_UP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * against any races from hci_dev_do_close when the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * gets removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (test_bit(HCI_UP, &hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ret = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) hci_req_sync_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) const void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int len = HCI_COMMAND_HDR_SIZE + plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct hci_command_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) skb = bt_skb_alloc(len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) hdr->opcode = cpu_to_le16(opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) hdr->plen = plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) skb_put_data(skb, param, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) BT_DBG("skb len %d", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) hci_skb_opcode(skb) = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Queue a command to an asynchronous HCI request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) const void *param, u8 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* If an error occurred during request building, there is no point in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * queueing the HCI command. We can simply return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (req->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) skb = hci_prepare_cmd(hdev, opcode, plen, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) req->err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (skb_queue_empty(&req->cmd_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) bt_cb(skb)->hci.req_event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) skb_queue_tail(&req->cmd_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) const void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) hci_req_add_ev(req, opcode, plen, param, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct hci_cp_write_page_scan_activity acp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (hdev->hci_ver < BLUETOOTH_VER_1_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) type = PAGE_SCAN_TYPE_INTERLACED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* 160 msec page scan interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) acp.interval = cpu_to_le16(0x0100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) type = hdev->def_page_scan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) acp.interval = cpu_to_le16(hdev->def_page_scan_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) acp.window = cpu_to_le16(hdev->def_page_scan_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __cpu_to_le16(hdev->page_scan_window) != acp.window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) sizeof(acp), &acp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (hdev->page_scan_type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* This function controls the background scanning based on hdev->pend_le_conns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * list. If there are pending LE connection we start the background scanning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * otherwise we stop it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * This function requires the caller holds hdev->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void __hci_update_background_scan(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!test_bit(HCI_UP, &hdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) test_bit(HCI_INIT, &hdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) hci_dev_test_flag(hdev, HCI_SETUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) hci_dev_test_flag(hdev, HCI_CONFIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) hci_dev_test_flag(hdev, HCI_UNREGISTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* No point in doing scanning if LE support hasn't been enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* If discovery is active don't interfere with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (hdev->discovery.state != DISCOVERY_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Reset RSSI and UUID filters when starting background scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * since these filters are meant for service discovery only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * The Start Discovery and Start Service Discovery operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * ensure to set proper values for RSSI threshold and UUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * filter list. So it is safe to just reset them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) hci_discovery_filter_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) BT_DBG("%s ADV monitoring is %s", hdev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) hci_is_adv_monitoring(hdev) ? "on" : "off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (list_empty(&hdev->pend_le_conns) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) list_empty(&hdev->pend_le_reports) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) !hci_is_adv_monitoring(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* If there is no pending LE connections or devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * to be scanned for or no ADV monitors, we should stop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * background scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /* If controller is not scanning we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) BT_DBG("%s stopping background scanning", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* If there is at least one pending LE connection, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * keep the background scan running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* If controller is connecting, we should not start scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * since some controllers are not able to scan and connect at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (hci_lookup_le_connect(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* If controller is currently scanning, we stop it to ensure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * don't miss any advertising (due to duplicates filter).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) hci_req_add_le_passive_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) BT_DBG("%s starting background scanning", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) void __hci_req_update_name(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct hci_cp_write_local_name cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #define PNP_INFO_SVCLASS_ID 0x1200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u8 *ptr = data, *uuids_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct bt_uuid *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (len < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) list_for_each_entry(uuid, &hdev->uuids, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u16 uuid16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (uuid->size != 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) uuid16 = get_unaligned_le16(&uuid->uuid[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (uuid16 < 0x1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (uuid16 == PNP_INFO_SVCLASS_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!uuids_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) uuids_start = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) uuids_start[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) uuids_start[1] = EIR_UUID16_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* Stop if not enough space to put next UUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if ((ptr - data) + sizeof(u16) > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) uuids_start[1] = EIR_UUID16_SOME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *ptr++ = (uuid16 & 0x00ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *ptr++ = (uuid16 & 0xff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) uuids_start[0] += sizeof(uuid16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u8 *ptr = data, *uuids_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct bt_uuid *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (len < 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) list_for_each_entry(uuid, &hdev->uuids, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (uuid->size != 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (!uuids_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) uuids_start = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) uuids_start[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) uuids_start[1] = EIR_UUID32_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Stop if not enough space to put next UUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if ((ptr - data) + sizeof(u32) > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) uuids_start[1] = EIR_UUID32_SOME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) memcpy(ptr, &uuid->uuid[12], sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ptr += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) uuids_start[0] += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u8 *ptr = data, *uuids_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct bt_uuid *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (len < 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) list_for_each_entry(uuid, &hdev->uuids, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (uuid->size != 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!uuids_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) uuids_start = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) uuids_start[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) uuids_start[1] = EIR_UUID128_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Stop if not enough space to put next UUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if ((ptr - data) + 16 > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) uuids_start[1] = EIR_UUID128_SOME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) memcpy(ptr, uuid->uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ptr += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) uuids_start[0] += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static void create_eir(struct hci_dev *hdev, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u8 *ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) size_t name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) name_len = strlen(hdev->dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (name_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* EIR Data type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (name_len > 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) name_len = 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ptr[1] = EIR_NAME_SHORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ptr[1] = EIR_NAME_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* EIR Data length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ptr[0] = name_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) memcpy(ptr + 2, hdev->dev_name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ptr += (name_len + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ptr[0] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ptr[1] = EIR_TX_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ptr[2] = (u8) hdev->inq_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ptr += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (hdev->devid_source > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ptr[0] = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ptr[1] = EIR_DEVICE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) put_unaligned_le16(hdev->devid_source, ptr + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) put_unaligned_le16(hdev->devid_vendor, ptr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) put_unaligned_le16(hdev->devid_product, ptr + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) put_unaligned_le16(hdev->devid_version, ptr + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ptr += 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void __hci_req_update_eir(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct hci_cp_write_eir cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!lmp_ext_inq_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) create_eir(hdev, cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) memcpy(hdev->eir, cp.data, sizeof(cp.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (hdev->scanning_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) bt_dev_dbg(hdev, "Scanning is paused for suspend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (use_ext_scan(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct hci_cp_le_set_ext_scan_enable cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) cp.enable = LE_SCAN_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct hci_cp_le_set_scan_enable cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) cp.enable = LE_SCAN_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* Disable address resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (use_ll_privacy(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) __u8 enable = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u8 bdaddr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct hci_cp_le_del_from_white_list cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cp.bdaddr_type = bdaddr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) bacpy(&cp.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) cp.bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (use_ll_privacy(req->hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct smp_irk *irk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (irk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct hci_cp_le_del_from_resolv_list cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) cp.bdaddr_type = bdaddr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bacpy(&cp.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* Adds connection to white list if needed. On error, returns -1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int add_to_white_list(struct hci_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct hci_conn_params *params, u8 *num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) bool allow_rpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct hci_cp_le_add_to_white_list cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Already in white list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) params->addr_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Select filter policy to accept all advertising */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (*num_entries >= hdev->le_white_list_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* White list can not be used with RPAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!allow_rpa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* During suspend, only wakeable devices can be in whitelist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) params->current_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) *num_entries += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) cp.bdaddr_type = params->addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) bacpy(&cp.bdaddr, ¶ms->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cp.bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (use_ll_privacy(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct smp_irk *irk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) params->addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (irk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct hci_cp_le_add_to_resolv_list cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) cp.bdaddr_type = params->addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) bacpy(&cp.bdaddr, ¶ms->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) memcpy(cp.peer_irk, irk->val, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (hci_dev_test_flag(hdev, HCI_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) memcpy(cp.local_irk, hdev->irk, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) memset(cp.local_irk, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static u8 update_white_list(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct bdaddr_list *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u8 num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) bool pend_conn, pend_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* We allow whitelisting even with RPAs in suspend. In the worst case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * we won't be able to wake from devices that use the privacy1.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * features. Additionally, once we support privacy1.2 and IRK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * offloading, we can update this to also check for those conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) bool allow_rpa = hdev->suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Go through the current white list programmed into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * controller one by one and check if that address is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * in the list of pending connections or list of devices to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * report. If not present in either list, then queue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * command to remove it from the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) list_for_each_entry(b, &hdev->le_white_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) &b->bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) b->bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) &b->bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) b->bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* If the device is not likely to connect or report,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * remove it from the whitelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (!pend_conn && !pend_report) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* White list can not be used with RPAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!allow_rpa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* Since all no longer valid white list entries have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * removed, walk through the list of pending connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * and ensure that any new device gets programmed into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * If the list of the devices is larger than the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * available white list entries in the controller, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * just abort and return filer policy value to not use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * white list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) list_for_each_entry(params, &hdev->pend_le_conns, action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (add_to_white_list(req, params, &num_entries, allow_rpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* After adding all new pending connections, walk through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * the list of pending reports and also add these to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * white list if there is still space. Abort if space runs out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) list_for_each_entry(params, &hdev->pend_le_reports, action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (add_to_white_list(req, params, &num_entries, allow_rpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Once the controller offloading of advertisement monitor is in place,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * the if condition should include the support of MSFT extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * support. If suspend is ongoing, whitelist should be the default to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * prevent waking by random advertisements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Select filter policy to use white list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static bool scan_use_rpa(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return hci_dev_test_flag(hdev, HCI_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) u16 window, u8 own_addr_type, u8 filter_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) bool addr_resolv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (hdev->scanning_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) bt_dev_dbg(hdev, "Scanning is paused for suspend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (use_ll_privacy(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) addr_resolv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) u8 enable = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Use ext scanning if set ext scan param and ext scan enable is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (use_ext_scan(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct hci_cp_le_set_ext_scan_params *ext_param_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct hci_cp_le_scan_phy_params *phy_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ext_param_cp = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) phy_params = (void *)ext_param_cp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) memset(ext_param_cp, 0, sizeof(*ext_param_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ext_param_cp->own_addr_type = own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ext_param_cp->filter_policy = filter_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) plen = sizeof(*ext_param_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (scan_1m(hdev) || scan_2m(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) memset(phy_params, 0, sizeof(*phy_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) phy_params->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) phy_params->interval = cpu_to_le16(interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) phy_params->window = cpu_to_le16(window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) plen += sizeof(*phy_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) phy_params++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (scan_coded(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) memset(phy_params, 0, sizeof(*phy_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) phy_params->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) phy_params->interval = cpu_to_le16(interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) phy_params->window = cpu_to_le16(window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) plen += sizeof(*phy_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) phy_params++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) plen, ext_param_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ext_enable_cp.enable = LE_SCAN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) sizeof(ext_enable_cp), &ext_enable_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct hci_cp_le_set_scan_param param_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct hci_cp_le_set_scan_enable enable_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) memset(¶m_cp, 0, sizeof(param_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) param_cp.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) param_cp.interval = cpu_to_le16(interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) param_cp.window = cpu_to_le16(window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) param_cp.own_address_type = own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) param_cp.filter_policy = filter_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ¶m_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) memset(&enable_cp, 0, sizeof(enable_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) enable_cp.enable = LE_SCAN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) &enable_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Returns true if an le connection is in the scanning state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct hci_conn_hash *h = &hdev->conn_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct hci_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) list_for_each_entry_rcu(c, &h->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (c->type == LE_LINK && c->state == BT_CONNECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) test_bit(HCI_CONN_SCANNING, &c->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Ensure to call hci_req_add_le_scan_disable() first to disable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * controller based address resolution to be able to reconfigure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * resolving list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) void hci_req_add_le_passive_scan(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) u8 own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) u8 filter_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) u16 window, interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* Background scanning should run with address resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) bool addr_resolv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (hdev->scanning_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) bt_dev_dbg(hdev, "Scanning is paused for suspend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* Set require_privacy to false since no SCAN_REQ are send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * during passive scanning. Not using an non-resolvable address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * here is important so that peer devices using direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * advertising with our address will be correctly reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * by the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (hci_update_random_address(req, false, scan_use_rpa(hdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) &own_addr_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* Adding or removing entries from the white list must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * happen before enabling scanning. The controller does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * not allow white list modification while scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) filter_policy = update_white_list(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* When the controller is using random resolvable addresses and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * with that having LE privacy enabled, then controllers with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * Extended Scanner Filter Policies support can now enable support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * for handling directed advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * So instead of using filter polices 0x00 (no whitelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * and 0x01 (whitelist enabled) use the new filter policies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * 0x02 (no whitelist) and 0x03 (whitelist enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) filter_policy |= 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (hdev->suspended) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) window = hdev->le_scan_window_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) interval = hdev->le_scan_int_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) } else if (hci_is_le_conn_scanning(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) window = hdev->le_scan_window_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) interval = hdev->le_scan_int_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) } else if (hci_is_adv_monitoring(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) window = hdev->le_scan_window_adv_monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) interval = hdev->le_scan_int_adv_monitor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) window = hdev->le_scan_window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) interval = hdev->le_scan_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) own_addr_type, filter_policy, addr_resolv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Instance 0x00 always set local name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (instance == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return adv_instance->scan_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void hci_req_clear_event_filter(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct hci_cp_set_event_filter f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) memset(&f, 0, sizeof(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) f.flt_type = HCI_FLT_CLEAR_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* Update page scan state (since we may have modified it when setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * the event filter).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) __hci_req_update_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static void hci_req_set_event_filter(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct bdaddr_list_with_flags *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct hci_cp_set_event_filter f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) u8 scan = SCAN_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Always clear event filter when starting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) hci_req_clear_event_filter(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) list_for_each_entry(b, &hdev->whitelist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) b->current_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) memset(&f, 0, sizeof(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) f.flt_type = HCI_FLT_CONN_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) scan = SCAN_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void hci_req_config_le_suspend_scan(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* Before changing params disable scan if enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Configure params and enable scanning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) hci_req_add_le_passive_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* Block suspend notifier on response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void cancel_adv_timeout(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (hdev->adv_instance_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) hdev->adv_instance_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) cancel_delayed_work(&hdev->adv_instance_expire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* This function requires the caller holds hdev->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static void hci_suspend_adv_instances(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) bt_dev_dbg(req->hdev, "Suspending advertising instances");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* Call to disable any advertisements active on the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * This will succeed even if no advertisements are configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) __hci_req_disable_advertising(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* If we are using software rotation, pause the loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!ext_adv_capable(req->hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) cancel_adv_timeout(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* This function requires the caller holds hdev->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void hci_resume_adv_instances(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct adv_info *adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) bt_dev_dbg(req->hdev, "Resuming advertising instances");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (ext_adv_capable(req->hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* Call for each tracked instance to be re-enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) list_for_each_entry(adv, &req->hdev->adv_instances, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) __hci_req_enable_ext_advertising(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) adv->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Schedule for most recent instance to be restarted and begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * the software rotation loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) __hci_req_schedule_adv_instance(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) req->hdev->cur_adv_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) wake_up(&hdev->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Call with hci_dev_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) u8 page_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int disconnect_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (next == hdev->suspend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) bt_dev_dbg(hdev, "Same state before and after: %d", next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) hdev->suspend_state = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (next == BT_SUSPEND_DISCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Mark device as suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) hdev->suspended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* Pause discovery if not already stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) old_state = hdev->discovery.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (old_state != DISCOVERY_STOPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) queue_work(hdev->req_workqueue, &hdev->discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) hdev->discovery_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) hdev->discovery_old_state = old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /* Stop directed advertising */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) cancel_delayed_work(&hdev->discov_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) queue_delayed_work(hdev->req_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) &hdev->discov_off, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* Pause other advertisements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (hdev->adv_instance_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) hci_suspend_adv_instances(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) hdev->advertising_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) hdev->advertising_old_state = old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* Disable page scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) page_scan = SCAN_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Disable LE passive scan if enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) hci_req_add_le_scan_disable(&req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* Mark task needing completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* Prevent disconnects from causing scanning to be re-enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) hdev->scanning_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /* Run commands before disconnecting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) hci_req_run(&req, suspend_req_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) disconnect_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* Soft disconnect everything (power off) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) list_for_each_entry(conn, &hdev->conn_hash.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) disconnect_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (disconnect_counter > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) bt_dev_dbg(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) "Had %d disconnects. Will wait on them",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) disconnect_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* Unpause to take care of updating scanning params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) hdev->scanning_paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Enable event filter for paired devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) hci_req_set_event_filter(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* Enable passive scan at lower duty cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) hci_req_config_le_suspend_scan(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* Pause scan changes again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) hdev->scanning_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) hci_req_run(&req, suspend_req_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) hdev->suspended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) hdev->scanning_paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) hci_req_clear_event_filter(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* Reset passive/background scanning to normal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) hci_req_config_le_suspend_scan(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* Unpause directed advertising */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) hdev->advertising_paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (hdev->advertising_old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) set_bit(SUSPEND_UNPAUSE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) hci_dev_set_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) queue_work(hdev->req_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) &hdev->discoverable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) hdev->advertising_old_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* Resume other advertisements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (hdev->adv_instance_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) hci_resume_adv_instances(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Unpause discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) hdev->discovery_paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) hdev->discovery_old_state != DISCOVERY_STOPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) hci_discovery_set_state(hdev, DISCOVERY_STARTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) queue_work(hdev->req_workqueue, &hdev->discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) hci_req_run(&req, suspend_req_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) hdev->suspend_state = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) wake_up(&hdev->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) u8 instance = hdev->cur_adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* Instance 0x00 always set local name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (instance == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* TODO: Take into account the "appearance" and "local-name" flags here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * These are currently being ignored as they are not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return adv_instance->scan_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) void __hci_req_disable_advertising(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (ext_adv_capable(req->hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) __hci_req_disable_ext_adv_instance(req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) u8 enable = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (instance == 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* Instance 0 always manages the "Tx Power" and "Flags"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * corresponds to the "connectable" instance flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) flags |= MGMT_ADV_FLAG_CONNECTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) flags |= MGMT_ADV_FLAG_DISCOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Return 0 when we got an invalid instance identifier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return adv_instance->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* If privacy is not enabled don't use RPA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /* If basic privacy mode is enabled use RPA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* If limited privacy mode is enabled don't use RPA if we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * both discoverable and bondable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if ((flags & MGMT_ADV_FLAG_DISCOV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) hci_dev_test_flag(hdev, HCI_BONDABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* We're neither bondable nor discoverable in the limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * privacy mode, therefore use RPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* If there is no connection we are OK to advertise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (hci_conn_num(hdev, LE_LINK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* Check le_states if there is any connection in slave role. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (hdev->conn_hash.le_num_slave > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Slave connection state and non connectable mode bit 20. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (!connectable && !(hdev->le_states[2] & 0x10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* Slave connection state and connectable mode bit 38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * and scannable bit 21.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (connectable && (!(hdev->le_states[4] & 0x40) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) !(hdev->le_states[2] & 0x20)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Check le_states if there is any connection in master role. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* Master connection state and non connectable mode bit 18. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (!connectable && !(hdev->le_states[2] & 0x02))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* Master connection state and connectable mode bit 35 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * scannable 19.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (connectable && (!(hdev->le_states[4] & 0x08) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) !(hdev->le_states[2] & 0x08)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) void __hci_req_enable_advertising(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct hci_cp_le_set_adv_param cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) u8 own_addr_type, enable = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) bool connectable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) u16 adv_min_interval, adv_max_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* If the "connectable" instance flag was not set, then choose between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) mgmt_get_connectable(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (!is_advertising_allowed(hdev, connectable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) __hci_req_disable_advertising(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* Clear the HCI_LE_ADV bit temporarily so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * hci_update_random_address knows that it's safe to go ahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * and write a new random address. The flag will be set back on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * as soon as the SET_ADV_ENABLE HCI command completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) hci_dev_clear_flag(hdev, HCI_LE_ADV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /* Set require_privacy to true only when non-connectable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * advertising is used. In that case it is fine to use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * non-resolvable private address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (hci_update_random_address(req, !connectable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) adv_use_rpa(hdev, flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) &own_addr_type) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (connectable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) cp.type = LE_ADV_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) adv_min_interval = hdev->le_adv_min_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) adv_max_interval = hdev->le_adv_max_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (get_cur_adv_instance_scan_rsp_len(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) cp.type = LE_ADV_SCAN_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) cp.type = LE_ADV_NONCONN_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) adv_min_interval = hdev->le_adv_min_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) adv_max_interval = hdev->le_adv_max_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) cp.min_interval = cpu_to_le16(adv_min_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) cp.max_interval = cpu_to_le16(adv_max_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) cp.own_address_type = own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) cp.channel_map = hdev->le_adv_channel_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) size_t short_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) size_t complete_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* no space left for name (+ NULL + type + len) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return ad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /* use complete name if present and fits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) complete_len = strlen(hdev->dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) hdev->dev_name, complete_len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* use short name if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) short_len = strlen(hdev->short_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (short_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) hdev->short_name, short_len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* use shortened full name if present, we already know that name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * is longer then HCI_MAX_SHORT_NAME_LENGTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (complete_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) sizeof(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return ad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) u8 scan_rsp_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (hdev->appearance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return append_local_name(hdev, ptr, scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) u8 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) u32 instance_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) u8 scan_rsp_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) instance_flags = adv_instance->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) adv_instance->scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) scan_rsp_len += adv_instance->scan_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return scan_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) u8 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct hci_cp_le_set_ext_scan_rsp_data cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) len = create_instance_scan_rsp_data(hdev, instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) len = create_default_scan_rsp_data(hdev, cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (hdev->scan_rsp_data_len == len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) !memcmp(cp.data, hdev->scan_rsp_data, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) hdev->scan_rsp_data_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) cp.handle = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) cp.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct hci_cp_le_set_scan_rsp_data cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) len = create_instance_scan_rsp_data(hdev, instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) len = create_default_scan_rsp_data(hdev, cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (hdev->scan_rsp_data_len == len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) !memcmp(cp.data, hdev->scan_rsp_data, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) hdev->scan_rsp_data_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) cp.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct adv_info *adv_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) u8 ad_len = 0, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) u32 instance_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* Return 0 when the current instance identifier is invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) instance_flags = get_adv_instance_flags(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* If instance already has the flags set skip adding it once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (adv_instance && eir_get_data(adv_instance->adv_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) adv_instance->adv_data_len, EIR_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) goto skip_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /* The Add Advertising command allows userspace to set both the general
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * and limited discoverable flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (instance_flags & MGMT_ADV_FLAG_DISCOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) flags |= LE_AD_GENERAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) flags |= LE_AD_LIMITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) flags |= LE_AD_NO_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* If a discovery flag wasn't provided, simply use the global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) flags |= mgmt_get_adv_discov_flags(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /* If flags would still be empty, then there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * include the "Flags" AD field".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) ptr[0] = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ptr[1] = EIR_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) ptr[2] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) ad_len += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) ptr += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) skip_flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (adv_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) memcpy(ptr, adv_instance->adv_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) adv_instance->adv_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ad_len += adv_instance->adv_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ptr += adv_instance->adv_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) s8 adv_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) adv_tx_power = adv_instance->tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) adv_tx_power = hdev->adv_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) adv_tx_power = hdev->adv_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* Provide Tx Power only if we can provide a valid value for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (adv_tx_power != HCI_TX_POWER_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) ptr[0] = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) ptr[1] = EIR_TX_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ptr[2] = (u8)adv_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ad_len += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) ptr += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return ad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) u8 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct hci_cp_le_set_ext_adv_data cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) len = create_instance_adv_data(hdev, instance, cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* There's nothing to do if the data hasn't changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (hdev->adv_data_len == len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) memcmp(cp.data, hdev->adv_data, len) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) hdev->adv_data_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) cp.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) cp.handle = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct hci_cp_le_set_adv_data cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) len = create_instance_adv_data(hdev, instance, cp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* There's nothing to do if the data hasn't changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (hdev->adv_data_len == len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) memcmp(cp.data, hdev->adv_data, len) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) hdev->adv_data_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) cp.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) __hci_req_update_adv_data(&req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) return hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) BT_DBG("%s status %u", hdev->name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) void hci_req_disable_address_resolution(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) __u8 enable = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (!use_ll_privacy(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) hci_req_run(&req, enable_addr_resolution_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) BT_DBG("%s status %u", hdev->name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) void hci_req_reenable_advertising(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) list_empty(&hdev->adv_instances))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (hdev->cur_adv_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) __hci_req_start_ext_adv(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) __hci_req_update_adv_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) __hci_req_update_scan_rsp_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) __hci_req_enable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) hci_req_run(&req, adv_enable_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static void adv_timeout_expire(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) adv_instance_expire.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) u8 instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) hdev->adv_instance_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) instance = hdev->cur_adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (instance == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (list_empty(&hdev->adv_instances))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) __hci_req_disable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) bool use_rpa, struct adv_info *adv_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) u8 *own_addr_type, bdaddr_t *rand_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) bacpy(rand_addr, BDADDR_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /* If privacy is enabled use a resolvable private address. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * current RPA has expired then generate a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (use_rpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) int to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* If Controller supports LL Privacy use own address type is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (use_ll_privacy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) *own_addr_type = ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (adv_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (!adv_instance->rpa_expired &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) !bacmp(&adv_instance->random_addr, &hdev->rpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) adv_instance->rpa_expired = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) !bacmp(&hdev->random_addr, &hdev->rpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) bt_dev_err(hdev, "failed to generate new RPA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) bacpy(rand_addr, &hdev->rpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) queue_delayed_work(hdev->workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) &adv_instance->rpa_expired_cb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) queue_delayed_work(hdev->workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) &hdev->rpa_expired, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* In case of required privacy without resolvable private address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * use an non-resolvable private address. This is useful for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * non-connectable advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (require_privacy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) bdaddr_t nrpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) /* The non-resolvable private address is generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * from random six bytes with the two most significant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * bits cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) get_random_bytes(&nrpa, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) nrpa.b[5] &= 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* The non-resolvable private address shall not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * equal to the public address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (bacmp(&hdev->bdaddr, &nrpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) *own_addr_type = ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) bacpy(rand_addr, &nrpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /* No privacy so use a public address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) *own_addr_type = ADDR_LE_DEV_PUBLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) void __hci_req_clear_ext_adv_sets(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct hci_cp_le_set_ext_adv_params cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) bool connectable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) bdaddr_t random_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) u8 own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) bool secondary_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (instance > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) adv_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) flags = get_adv_instance_flags(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /* If the "connectable" instance flag was not set, then choose between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) mgmt_get_connectable(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (!is_advertising_allowed(hdev, connectable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /* Set require_privacy to true only when non-connectable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * advertising is used. In that case it is fine to use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * non-resolvable private address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) err = hci_get_random_address(hdev, !connectable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) adv_use_rpa(hdev, flags), adv_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) &own_addr_type, &random_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) /* In ext adv set param interval is 3 octets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (connectable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (secondary_adv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (secondary_adv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (secondary_adv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) cp.own_addr_type = own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) cp.channel_map = hdev->le_adv_channel_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) cp.tx_power = 127;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) cp.handle = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (flags & MGMT_ADV_FLAG_SEC_2M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) cp.primary_phy = HCI_ADV_PHY_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) cp.secondary_phy = HCI_ADV_PHY_2M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) cp.primary_phy = HCI_ADV_PHY_CODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) cp.secondary_phy = HCI_ADV_PHY_CODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /* In all other cases use 1M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) cp.primary_phy = HCI_ADV_PHY_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) cp.secondary_phy = HCI_ADV_PHY_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (own_addr_type == ADDR_LE_DEV_RANDOM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) bacmp(&random_addr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) struct hci_cp_le_set_adv_set_rand_addr cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* Check if random address need to be updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (adv_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (!bacmp(&random_addr, &adv_instance->random_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (!bacmp(&random_addr, &hdev->random_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) cp.handle = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) bacpy(&cp.bdaddr, &random_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) hci_req_add(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) struct hci_cp_le_set_ext_adv_enable *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct hci_cp_ext_adv_set *adv_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (instance > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) adv_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) cp = (void *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) adv_set = (void *) cp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) memset(cp, 0, sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) cp->enable = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) cp->num_of_sets = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) memset(adv_set, 0, sizeof(*adv_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) adv_set->handle = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) /* Set duration per instance since controller is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) * scheduling it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (adv_instance && adv_instance->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) u16 duration = adv_instance->timeout * MSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /* Time = N * 10 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) adv_set->duration = cpu_to_le16(duration / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) struct hci_cp_le_set_ext_adv_enable *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) struct hci_cp_ext_adv_set *adv_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) u8 req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) /* If request specifies an instance that doesn't exist, fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (instance > 0 && !hci_find_adv_instance(hdev, instance))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) memset(data, 0, sizeof(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) cp = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) adv_set = (void *)cp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* Instance 0x00 indicates all advertising instances will be disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) cp->num_of_sets = !!instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) cp->enable = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) adv_set->handle = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /* If request specifies an instance that doesn't exist, fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (instance > 0 && !hci_find_adv_instance(hdev, instance))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) /* If instance isn't pending, the chip knows about it, and it's safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (adv_instance && !adv_instance->pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) __hci_req_disable_ext_adv_instance(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) err = __hci_req_setup_ext_adv_instance(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) __hci_req_update_scan_rsp_data(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) __hci_req_enable_ext_advertising(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) struct adv_info *adv_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) u16 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) list_empty(&hdev->adv_instances))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (hdev->adv_instance_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /* A zero timeout means unlimited advertising. As long as there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) * only one instance, duration should be ignored. We still set a timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) * in case further instances are being added later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * If the remaining lifetime of the instance is more than the duration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * then the timeout corresponds to the duration, otherwise it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * reduced to the remaining instance lifetime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (adv_instance->timeout == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) adv_instance->duration <= adv_instance->remaining_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) timeout = adv_instance->duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) timeout = adv_instance->remaining_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) /* The remaining time is being reduced unless the instance is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * advertised without time limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (adv_instance->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) adv_instance->remaining_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) adv_instance->remaining_time - timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) /* Only use work for scheduling instances with legacy advertising */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (!ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) hdev->adv_instance_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) queue_delayed_work(hdev->req_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) &hdev->adv_instance_expire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) msecs_to_jiffies(timeout * 1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) /* If we're just re-scheduling the same instance again then do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * execute any HCI commands. This happens when a single instance is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * being advertised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (!force && hdev->cur_adv_instance == instance &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) hdev->cur_adv_instance = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) __hci_req_start_ext_adv(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) __hci_req_update_adv_data(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) __hci_req_update_scan_rsp_data(req, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) __hci_req_enable_advertising(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /* For a single instance:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * - force == true: The instance will be removed even when its remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * lifetime is not zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * - force == false: the instance will be deactivated but kept stored unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * the remaining lifetime is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * For instance == 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * - force == true: All instances will be removed regardless of their timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) * setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * - force == false: Only instances that have a timeout will be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct hci_request *req, u8 instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) struct adv_info *adv_instance, *n, *next_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) u8 rem_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /* Cancel any timeout concerning the removed instance(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (!instance || hdev->cur_adv_instance == instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) cancel_adv_timeout(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) /* Get the next instance to advertise BEFORE we remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * the current one. This can be the same instance again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * if there is only one instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (instance && hdev->cur_adv_instance == instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) next_instance = hci_get_next_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (instance == 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (!(force || adv_instance->timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) rem_inst = adv_instance->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) err = hci_remove_adv_instance(hdev, rem_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) mgmt_advertising_removed(sk, hdev, rem_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) adv_instance = hci_find_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (force || (adv_instance && adv_instance->timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) !adv_instance->remaining_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /* Don't advertise a removed instance. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (next_instance &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) next_instance->instance == instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) next_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) err = hci_remove_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) mgmt_advertising_removed(sk, hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (!req || !hdev_is_powered(hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) hci_dev_test_flag(hdev, HCI_ADVERTISING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (next_instance && !ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) __hci_req_schedule_adv_instance(req, next_instance->instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* If we're advertising or initiating an LE connection we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * go ahead and change the random address at this time. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * because the eventual initiator address used for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * subsequently created connection will be undefined (some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * controllers use the new address and others the one we had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * when the operation started).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) * In this kind of scenario skip the update and let the random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) * address be updated at the next cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) hci_lookup_le_connect(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) BT_DBG("Deferring random address update");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) int hci_update_random_address(struct hci_request *req, bool require_privacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) bool use_rpa, u8 *own_addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) /* If privacy is enabled use a resolvable private address. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * current RPA has expired or there is something else than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * the current RPA in use, then generate a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (use_rpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) int to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /* If Controller supports LL Privacy use own address type is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (use_ll_privacy(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) *own_addr_type = ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) !bacmp(&hdev->random_addr, &hdev->rpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) bt_dev_err(hdev, "failed to generate new RPA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) set_random_addr(req, &hdev->rpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* In case of required privacy without resolvable private address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * use an non-resolvable private address. This is useful for active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * scanning and non-connectable advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (require_privacy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) bdaddr_t nrpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) /* The non-resolvable private address is generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * from random six bytes with the two most significant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * bits cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) get_random_bytes(&nrpa, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) nrpa.b[5] &= 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) /* The non-resolvable private address shall not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) * equal to the public address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (bacmp(&hdev->bdaddr, &nrpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) *own_addr_type = ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) set_random_addr(req, &nrpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) /* If forcing static address is in use or there is no public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * address use the static address as random address (but skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * the HCI command if the current random address is already the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * static one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) * In case BR/EDR has been disabled on a dual-mode controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * and a static address has been configured, then use that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * address instead of the public BR/EDR address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) bacmp(&hdev->static_addr, BDADDR_ANY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) *own_addr_type = ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (bacmp(&hdev->static_addr, &hdev->random_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) &hdev->static_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* Neither privacy nor static address is being used so use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * public address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) *own_addr_type = ADDR_LE_DEV_PUBLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) static bool disconnected_whitelist_entries(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct bdaddr_list *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) list_for_each_entry(b, &hdev->whitelist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) void __hci_req_update_scan(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) u8 scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (!hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (mgmt_powering_down(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (hdev->scanning_paused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) disconnected_whitelist_entries(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) scan = SCAN_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) scan = SCAN_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) scan |= SCAN_INQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static int update_scan(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) hci_dev_lock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) __hci_req_update_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) hci_dev_unlock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) static void scan_update_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) static int connectable_update(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) __hci_req_update_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /* If BR/EDR is not enabled and we disable advertising as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * by-product of disabling connectable, we need to update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * advertising flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) __hci_req_update_adv_data(req, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) /* Update the advertising parameters if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) !list_empty(&hdev->adv_instances)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) if (ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) __hci_req_enable_advertising(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) __hci_update_background_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) static void connectable_update_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) connectable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) mgmt_set_connectable_complete(hdev, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) static u8 get_service_classes(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) struct bt_uuid *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) u8 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) list_for_each_entry(uuid, &hdev->uuids, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) val |= uuid->svc_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) void __hci_req_update_class(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) u8 cod[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) if (!hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) cod[0] = hdev->minor_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) cod[1] = hdev->major_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) cod[2] = get_service_classes(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) cod[1] |= 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (memcmp(cod, hdev->dev_class, 3) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static void write_iac(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct hci_cp_write_current_iac_lap cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) /* Limited discoverable mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) cp.num_iac = min_t(u8, hdev->num_iac, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) cp.iac_lap[0] = 0x00; /* LIAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) cp.iac_lap[1] = 0x8b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) cp.iac_lap[2] = 0x9e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) cp.iac_lap[3] = 0x33; /* GIAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) cp.iac_lap[4] = 0x8b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) cp.iac_lap[5] = 0x9e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) /* General discoverable mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) cp.num_iac = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) cp.iac_lap[0] = 0x33; /* GIAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) cp.iac_lap[1] = 0x8b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) cp.iac_lap[2] = 0x9e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) (cp.num_iac * 3) + 1, &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) static int discoverable_update(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) write_iac(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) __hci_req_update_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) __hci_req_update_class(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) /* Advertising instances don't use the global discoverable setting, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * only update AD if advertising was enabled using Set Advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) __hci_req_update_adv_data(req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* Discoverable mode affects the local advertising
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * address in limited privacy mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) __hci_req_start_ext_adv(req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) __hci_req_enable_advertising(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static void discoverable_update_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) discoverable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) mgmt_set_discoverable_complete(hdev, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) u8 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) switch (conn->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) case BT_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) case BT_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (conn->type == AMP_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) struct hci_cp_disconn_phy_link cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) cp.reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) struct hci_cp_disconnect dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) dc.handle = cpu_to_le16(conn->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) dc.reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) conn->state = BT_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) case BT_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (conn->type == LE_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (test_bit(HCI_CONN_SCANNING, &conn->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) } else if (conn->type == ACL_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 6, &conn->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) case BT_CONNECT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (conn->type == ACL_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct hci_cp_reject_conn_req rej;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) bacpy(&rej.bdaddr, &conn->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) rej.reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) sizeof(rej), &rej);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) struct hci_cp_reject_sync_conn_req rej;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) bacpy(&rej.bdaddr, &conn->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) /* SCO rejection has its own limited set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * allowed error values (0x0D-0x0F) which isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * compatible with most values passed to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * function. To be safe hard-code one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) * values that's suitable for SCO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) sizeof(rej), &rej);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) conn->state = BT_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) BT_DBG("Failed to abort connection: status 0x%2.2x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) int hci_abort_conn(struct hci_conn *conn, u8 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) hci_req_init(&req, conn->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) __hci_abort_conn(&req, conn, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) err = hci_req_run(&req, abort_conn_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (err && err != -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static int update_bg_scan(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) hci_dev_lock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) __hci_update_background_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) hci_dev_unlock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) static void bg_scan_update(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) bg_scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) hci_le_conn_failed(conn, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) static int le_scan_disable(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static int bredr_inquiry(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) u8 length = opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) const u8 giac[3] = { 0x33, 0x8b, 0x9e };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) const u8 liac[3] = { 0x00, 0x8b, 0x9e };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) struct hci_cp_inquiry cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) BT_DBG("%s", req->hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) hci_dev_lock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) hci_inquiry_cache_flush(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) hci_dev_unlock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (req->hdev->discovery.limited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) memcpy(&cp.lap, liac, sizeof(cp.lap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) memcpy(&cp.lap, giac, sizeof(cp.lap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) cp.length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) static void le_scan_disable_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) le_scan_disable.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) cancel_delayed_work(&hdev->le_scan_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) hdev->discovery.scan_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) /* If we were running LE only scan, change discovery state. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * we were running both LE and BR/EDR inquiry simultaneously,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) * and BR/EDR inquiry is already finished, stop discovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * otherwise BR/EDR inquiry will stop discovery when finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * If we will resolve remote device name, do not change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * discovery state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (hdev->discovery.type == DISCOV_TYPE_LE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) goto discov_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) hdev->discovery.state != DISCOVERY_RESOLVING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) goto discov_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) goto discov_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) discov_stopped:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) static int le_scan_restart(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) /* If controller is not scanning we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (hdev->scanning_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) bt_dev_dbg(hdev, "Scanning is paused for suspend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (use_ext_scan(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) ext_enable_cp.enable = LE_SCAN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) sizeof(ext_enable_cp), &ext_enable_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) struct hci_cp_le_set_scan_enable cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) cp.enable = LE_SCAN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) static void le_scan_restart_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) le_scan_restart.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) unsigned long timeout, duration, scan_start, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) bt_dev_err(hdev, "failed to restart LE scan: status %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) !hdev->discovery.scan_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) /* When the scan was started, hdev->le_scan_disable has been queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) * after duration from scan_start. During scan restart this job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) * has been canceled, and we need to queue it again after proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * timeout, to make sure that scan does not run indefinitely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) duration = hdev->discovery.scan_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) scan_start = hdev->discovery.scan_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) if (now - scan_start <= duration) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) int elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (now >= scan_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) elapsed = now - scan_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) elapsed = ULONG_MAX - scan_start + now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) timeout = duration - elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) queue_delayed_work(hdev->req_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) &hdev->le_scan_disable, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) static int active_scan(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) uint16_t interval = opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) u8 own_addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) /* White list is not used for discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) u8 filter_policy = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) /* Discovery doesn't require controller address resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) bool addr_resolv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /* If controller is scanning, it means the background scanning is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * running. Thus, we should temporarily stop it in order to set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * discovery scanning parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) /* All active scans will be done with either a resolvable private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) * address (when privacy feature has been enabled) or non-resolvable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) * private address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) err = hci_update_random_address(req, true, scan_use_rpa(hdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) &own_addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) own_addr_type = ADDR_LE_DEV_PUBLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) hdev->le_scan_window_discovery, own_addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) filter_policy, addr_resolv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) static int interleaved_discov(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) BT_DBG("%s", req->hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) err = active_scan(req, opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static void start_discovery(struct hci_dev *hdev, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) switch (hdev->discovery.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) case DISCOV_TYPE_BREDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) hci_req_sync(hdev, bredr_inquiry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) case DISCOV_TYPE_INTERLEAVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) /* When running simultaneous discovery, the LE scanning time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * should occupy the whole discovery time sine BR/EDR inquiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * and LE scanning are scheduled by the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) * For interleaving discovery in comparison, BR/EDR inquiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * and LE scanning are done sequentially with separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) &hdev->quirks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) /* During simultaneous discovery, we double LE scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * interval. We must leave some time for the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * to do BR/EDR inquiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) hci_req_sync(hdev, interleaved_discov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) HCI_CMD_TIMEOUT, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) case DISCOV_TYPE_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) HCI_CMD_TIMEOUT, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) *status = HCI_ERROR_UNSPECIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (*status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) /* When service discovery is used and the controller has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) * strict duplicate filter, it is important to remember the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) * start and duration of the scan. This is required for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) * restarting scanning during the discovery phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) hdev->discovery.result_filtering) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) hdev->discovery.scan_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) hdev->discovery.scan_duration = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) bool hci_req_stop_discovery(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct discovery_state *d = &hdev->discovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct hci_cp_remote_name_req_cancel cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) struct inquiry_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (test_bit(HCI_INQUIRY, &hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) cancel_delayed_work(&hdev->le_scan_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) /* Passive scanning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) hci_req_add_le_scan_disable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) /* No further actions needed for LE-only discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (d->type == DISCOV_TYPE_LE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) NAME_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) bacpy(&cp.bdaddr, &e->data.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) static int stop_discovery(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) hci_dev_lock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) hci_req_stop_discovery(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) hci_dev_unlock(req->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) static void discov_update(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) u8 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) switch (hdev->discovery.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) case DISCOVERY_STARTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) start_discovery(hdev, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) mgmt_start_discovery_complete(hdev, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) hci_discovery_set_state(hdev, DISCOVERY_FINDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) case DISCOVERY_STOPPING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) mgmt_stop_discovery_complete(hdev, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) case DISCOVERY_STOPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) static void discov_off(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) discov_off.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) BT_DBG("%s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) /* When discoverable timeout triggers, then just make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) * the limited discoverable flag is cleared. Even in the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) * of a timeout triggered from general discoverable, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) * safe to unconditionally clear the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) hdev->discov_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) mgmt_new_settings(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) static int powered_update_hci(struct hci_request *req, unsigned long opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) u8 link_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) !lmp_host_ssp_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) u8 mode = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) u8 support = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) sizeof(support), &support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) lmp_bredr_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) struct hci_cp_write_le_host_supported cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) cp.le = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) cp.simul = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) /* Check first if we already have the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) * host state (host features set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (cp.le != lmp_host_le_capable(hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) cp.simul != lmp_host_le_br_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) /* Make sure the controller has a good default for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) * advertising data. This also applies to the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) * where BR/EDR was toggled during the AUTO_OFF phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) list_empty(&hdev->adv_instances)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) err = __hci_req_setup_ext_adv_instance(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) __hci_req_update_scan_rsp_data(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) __hci_req_update_adv_data(req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) __hci_req_update_scan_rsp_data(req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) if (!ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) __hci_req_enable_advertising(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) else if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) __hci_req_enable_ext_advertising(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) } else if (!list_empty(&hdev->adv_instances)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) adv_instance = list_first_entry(&hdev->adv_instances,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct adv_info, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) __hci_req_schedule_adv_instance(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) adv_instance->instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) sizeof(link_sec), &link_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) if (lmp_bredr_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) __hci_req_write_fast_connectable(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) __hci_req_write_fast_connectable(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) __hci_req_update_scan(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) __hci_req_update_class(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) __hci_req_update_name(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) __hci_req_update_eir(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) int __hci_req_hci_power_on(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) /* Register the available SMP channels (BR/EDR and LE) only when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) * successfully powering on the controller. This late
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) * registration is required so that LE SMP can clearly decide if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) * the public address or static address is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) smp_register(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) void hci_request_setup(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) INIT_WORK(&hdev->discov_update, discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) INIT_WORK(&hdev->scan_update, scan_update_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) INIT_WORK(&hdev->connectable_update, connectable_update_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) void hci_request_cancel_all(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) hci_req_sync_cancel(hdev, ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) cancel_work_sync(&hdev->discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) cancel_work_sync(&hdev->bg_scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) cancel_work_sync(&hdev->scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) cancel_work_sync(&hdev->connectable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) cancel_work_sync(&hdev->discoverable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) cancel_delayed_work_sync(&hdev->discov_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) cancel_delayed_work_sync(&hdev->le_scan_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) cancel_delayed_work_sync(&hdev->le_scan_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (hdev->adv_instance_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) cancel_delayed_work_sync(&hdev->adv_instance_expire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) hdev->adv_instance_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) }