^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) BlueZ - Bluetooth protocol stack for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Copyright (C) 2014 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) it under the terms of the GNU General Public License version 2 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) published by the Free Software Foundation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) SOFTWARE IS DISCLAIMED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct hci_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct sk_buff_head cmd_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* If something goes wrong when building the HCI request, the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * value is stored in this field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void hci_req_purge(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bool hci_req_status_pend(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) const void *param, u8 event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) hci_req_complete_t *req_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) hci_req_complete_skb_t *req_complete_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long opt, u32 timeout, u8 *hci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long opt, u32 timeout, u8 *hci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void hci_req_sync_cancel(struct hci_dev *hdev, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) const void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int __hci_req_hci_power_on(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void __hci_req_update_name(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void __hci_req_update_eir(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void hci_req_add_le_passive_scan(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void hci_req_disable_address_resolution(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void hci_req_reenable_advertising(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void __hci_req_enable_advertising(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void __hci_req_disable_advertising(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bool force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct hci_request *req, u8 instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void __hci_req_clear_ext_adv_sets(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bool use_rpa, struct adv_info *adv_instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u8 *own_addr_type, bdaddr_t *rand_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void __hci_req_update_class(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Returns true if HCI commands were queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool hci_req_stop_discovery(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline void hci_req_update_scan(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) queue_work(hdev->req_workqueue, &hdev->scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void __hci_req_update_scan(struct hci_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int hci_update_random_address(struct hci_request *req, bool require_privacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bool use_rpa, u8 *own_addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int hci_abort_conn(struct hci_conn *conn, u8 reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u8 reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void hci_update_background_scan(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void hci_request_setup(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void hci_request_cancel_all(struct hci_dev *hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u8 *data, u8 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) eir[eir_len++] = sizeof(type) + data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) eir[eir_len++] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) memcpy(&eir[eir_len], data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) eir_len += data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) eir[eir_len++] = sizeof(type) + sizeof(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) eir[eir_len++] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) put_unaligned_le16(data, &eir[eir_len]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) eir_len += sizeof(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }