^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) BlueZ - Bluetooth protocol stack for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Copyright (C) 2010 Nokia Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) Copyright (C) 2011-2012 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) it under the terms of the GNU General Public License version 2 as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) published by the Free Software Foundation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) SOFTWARE IS DISCLAIMED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Bluetooth HCI Management interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/bluetooth/hci_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/bluetooth/l2cap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/bluetooth/mgmt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "hci_request.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "smp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "mgmt_util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "mgmt_config.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "msft.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define MGMT_VERSION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MGMT_REVISION 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const u16 mgmt_commands[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) MGMT_OP_READ_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MGMT_OP_READ_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MGMT_OP_SET_POWERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) MGMT_OP_SET_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) MGMT_OP_SET_BONDABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MGMT_OP_SET_LINK_SECURITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MGMT_OP_SET_SSP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MGMT_OP_SET_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) MGMT_OP_SET_DEV_CLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) MGMT_OP_SET_LOCAL_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MGMT_OP_ADD_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MGMT_OP_REMOVE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) MGMT_OP_LOAD_LINK_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) MGMT_OP_LOAD_LONG_TERM_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MGMT_OP_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MGMT_OP_GET_CONNECTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MGMT_OP_PIN_CODE_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MGMT_OP_PIN_CODE_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MGMT_OP_SET_IO_CAPABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) MGMT_OP_CANCEL_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) MGMT_OP_UNPAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MGMT_OP_USER_CONFIRM_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MGMT_OP_USER_CONFIRM_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MGMT_OP_USER_PASSKEY_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MGMT_OP_USER_PASSKEY_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MGMT_OP_ADD_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MGMT_OP_REMOVE_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MGMT_OP_START_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) MGMT_OP_STOP_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MGMT_OP_CONFIRM_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) MGMT_OP_BLOCK_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MGMT_OP_UNBLOCK_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) MGMT_OP_SET_DEVICE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) MGMT_OP_SET_STATIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) MGMT_OP_SET_SCAN_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) MGMT_OP_SET_SECURE_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MGMT_OP_SET_DEBUG_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MGMT_OP_SET_PRIVACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) MGMT_OP_LOAD_IRKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MGMT_OP_GET_CLOCK_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MGMT_OP_LOAD_CONN_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MGMT_OP_READ_UNCONF_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) MGMT_OP_READ_CONFIG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MGMT_OP_SET_EXTERNAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) MGMT_OP_SET_PUBLIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) MGMT_OP_READ_EXT_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) MGMT_OP_READ_ADV_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) MGMT_OP_REMOVE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) MGMT_OP_GET_ADV_SIZE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) MGMT_OP_START_LIMITED_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) MGMT_OP_READ_EXT_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) MGMT_OP_SET_APPEARANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) MGMT_OP_SET_BLOCKED_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) MGMT_OP_SET_WIDEBAND_SPEECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) MGMT_OP_READ_SECURITY_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MGMT_OP_READ_EXP_FEATURES_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) MGMT_OP_READ_DEF_SYSTEM_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) MGMT_OP_SET_DEF_SYSTEM_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) MGMT_OP_READ_DEF_RUNTIME_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) MGMT_OP_SET_DEF_RUNTIME_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) MGMT_OP_GET_DEVICE_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) MGMT_OP_SET_DEVICE_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) MGMT_OP_READ_ADV_MONITOR_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) MGMT_OP_REMOVE_ADV_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static const u16 mgmt_events[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) MGMT_EV_CONTROLLER_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) MGMT_EV_INDEX_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) MGMT_EV_INDEX_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) MGMT_EV_NEW_SETTINGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) MGMT_EV_CLASS_OF_DEV_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) MGMT_EV_LOCAL_NAME_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) MGMT_EV_NEW_LINK_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) MGMT_EV_NEW_LONG_TERM_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) MGMT_EV_DEVICE_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) MGMT_EV_DEVICE_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) MGMT_EV_CONNECT_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) MGMT_EV_PIN_CODE_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) MGMT_EV_USER_CONFIRM_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) MGMT_EV_USER_PASSKEY_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) MGMT_EV_AUTH_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) MGMT_EV_DEVICE_FOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) MGMT_EV_DISCOVERING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) MGMT_EV_DEVICE_BLOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) MGMT_EV_DEVICE_UNBLOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) MGMT_EV_DEVICE_UNPAIRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) MGMT_EV_PASSKEY_NOTIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) MGMT_EV_NEW_IRK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) MGMT_EV_NEW_CSRK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) MGMT_EV_DEVICE_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) MGMT_EV_DEVICE_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) MGMT_EV_NEW_CONN_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) MGMT_EV_UNCONF_INDEX_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) MGMT_EV_UNCONF_INDEX_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) MGMT_EV_NEW_CONFIG_OPTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) MGMT_EV_EXT_INDEX_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) MGMT_EV_EXT_INDEX_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) MGMT_EV_LOCAL_OOB_DATA_UPDATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) MGMT_EV_ADVERTISING_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) MGMT_EV_ADVERTISING_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) MGMT_EV_EXT_INFO_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) MGMT_EV_PHY_CONFIGURATION_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) MGMT_EV_EXP_FEATURE_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) MGMT_EV_DEVICE_FLAGS_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) MGMT_EV_CONTROLLER_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) MGMT_EV_CONTROLLER_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static const u16 mgmt_untrusted_commands[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) MGMT_OP_READ_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) MGMT_OP_READ_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) MGMT_OP_READ_UNCONF_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) MGMT_OP_READ_CONFIG_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) MGMT_OP_READ_EXT_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) MGMT_OP_READ_EXT_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) MGMT_OP_READ_SECURITY_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) MGMT_OP_READ_EXP_FEATURES_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) MGMT_OP_READ_DEF_SYSTEM_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MGMT_OP_READ_DEF_RUNTIME_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static const u16 mgmt_untrusted_events[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) MGMT_EV_INDEX_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) MGMT_EV_INDEX_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) MGMT_EV_NEW_SETTINGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) MGMT_EV_CLASS_OF_DEV_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) MGMT_EV_LOCAL_NAME_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) MGMT_EV_UNCONF_INDEX_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) MGMT_EV_UNCONF_INDEX_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) MGMT_EV_NEW_CONFIG_OPTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) MGMT_EV_EXT_INDEX_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) MGMT_EV_EXT_INDEX_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) MGMT_EV_EXT_INFO_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) MGMT_EV_EXP_FEATURE_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) MGMT_EV_ADV_MONITOR_ADDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) MGMT_EV_ADV_MONITOR_REMOVED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) "\x00\x00\x00\x00\x00\x00\x00\x00"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* HCI to MGMT error code conversion table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static const u8 mgmt_status_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) MGMT_STATUS_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) MGMT_STATUS_NOT_CONNECTED, /* No Connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) MGMT_STATUS_FAILED, /* Hardware Failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) MGMT_STATUS_NO_RESOURCES, /* Memory Full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) MGMT_STATUS_TIMEOUT, /* Connection Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) MGMT_STATUS_BUSY, /* Command Disallowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) MGMT_STATUS_REJECTED, /* Rejected Security */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) MGMT_STATUS_REJECTED, /* Rejected Personal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) MGMT_STATUS_TIMEOUT, /* Host Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MGMT_STATUS_DISCONNECTED, /* OE Power Off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) MGMT_STATUS_BUSY, /* Repeated Attempts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) MGMT_STATUS_FAILED, /* Unknown LMP PDU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) MGMT_STATUS_REJECTED, /* Air Mode Rejected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) MGMT_STATUS_FAILED, /* Unspecified Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) MGMT_STATUS_FAILED, /* Role Change Not Allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) MGMT_STATUS_FAILED, /* Unit Link Key Used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) MGMT_STATUS_TIMEOUT, /* Instant Passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) MGMT_STATUS_FAILED, /* Transaction Collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) MGMT_STATUS_FAILED, /* Reserved for future use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) MGMT_STATUS_REJECTED, /* QoS Rejected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) MGMT_STATUS_REJECTED, /* Insufficient Security */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) MGMT_STATUS_FAILED, /* Reserved for future use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) MGMT_STATUS_BUSY, /* Role Switch Pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) MGMT_STATUS_FAILED, /* Reserved for future use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) MGMT_STATUS_FAILED, /* Slot Violation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) MGMT_STATUS_FAILED, /* Role Switch Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) MGMT_STATUS_BUSY, /* Host Busy Pairing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) MGMT_STATUS_BUSY, /* Controller Busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static u8 mgmt_status(u8 hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (hci_status < ARRAY_SIZE(mgmt_status_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return mgmt_status_table[hci_status];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u16 len, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) flag, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u16 len, int flag, struct sock *skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) flag, skip_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct sock *skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) HCI_SOCK_TRUSTED, skip_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static u8 le_addr_type(u8 mgmt_addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (mgmt_addr_type == BDADDR_LE_PUBLIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return ADDR_LE_DEV_PUBLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void mgmt_fill_version_info(void *ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct mgmt_rp_read_version *rp = ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) rp->version = MGMT_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rp->revision = cpu_to_le16(MGMT_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct mgmt_rp_read_version rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mgmt_fill_version_info(&rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct mgmt_rp_read_commands *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u16 num_commands, num_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) size_t rp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) num_commands = ARRAY_SIZE(mgmt_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) num_events = ARRAY_SIZE(mgmt_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) num_events = ARRAY_SIZE(mgmt_untrusted_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rp = kmalloc(rp_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rp->num_commands = cpu_to_le16(num_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rp->num_events = cpu_to_le16(num_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __le16 *opcode = rp->opcodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) for (i = 0; i < num_commands; i++, opcode++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) put_unaligned_le16(mgmt_commands[i], opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) for (i = 0; i < num_events; i++, opcode++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) put_unaligned_le16(mgmt_events[i], opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) __le16 *opcode = rp->opcodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) for (i = 0; i < num_commands; i++, opcode++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) for (i = 0; i < num_events; i++, opcode++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) put_unaligned_le16(mgmt_untrusted_events[i], opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rp, rp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct mgmt_rp_read_index_list *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct hci_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) size_t rp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) read_lock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) list_for_each_entry(d, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (d->dev_type == HCI_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) !hci_dev_test_flag(d, HCI_UNCONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rp_len = sizeof(*rp) + (2 * count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rp = kmalloc(rp_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) list_for_each_entry(d, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (hci_dev_test_flag(d, HCI_SETUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) hci_dev_test_flag(d, HCI_CONFIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) hci_dev_test_flag(d, HCI_USER_CHANNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Devices marked as raw-only are neither configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * nor unconfigured controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (d->dev_type == HCI_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) rp->index[count++] = cpu_to_le16(d->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) bt_dev_dbg(hdev, "Added hci%u", d->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rp->num_controllers = cpu_to_le16(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rp_len = sizeof(*rp) + (2 * count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 0, rp, rp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct mgmt_rp_read_unconf_index_list *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct hci_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) size_t rp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) read_lock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) list_for_each_entry(d, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (d->dev_type == HCI_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) hci_dev_test_flag(d, HCI_UNCONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rp_len = sizeof(*rp) + (2 * count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) rp = kmalloc(rp_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) list_for_each_entry(d, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (hci_dev_test_flag(d, HCI_SETUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) hci_dev_test_flag(d, HCI_CONFIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) hci_dev_test_flag(d, HCI_USER_CHANNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* Devices marked as raw-only are neither configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * nor unconfigured controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (d->dev_type == HCI_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rp->index[count++] = cpu_to_le16(d->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) bt_dev_dbg(hdev, "Added hci%u", d->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) rp->num_controllers = cpu_to_le16(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rp_len = sizeof(*rp) + (2 * count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct mgmt_rp_read_ext_index_list *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct hci_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) read_lock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) list_for_each_entry(d, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) list_for_each_entry(d, &hci_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (hci_dev_test_flag(d, HCI_SETUP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) hci_dev_test_flag(d, HCI_CONFIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) hci_dev_test_flag(d, HCI_USER_CHANNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Devices marked as raw-only are neither configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * nor unconfigured controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (d->dev_type == HCI_PRIMARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rp->entry[count].type = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) rp->entry[count].type = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else if (d->dev_type == HCI_AMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) rp->entry[count].type = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rp->entry[count].bus = d->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rp->entry[count++].index = cpu_to_le16(d->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) bt_dev_dbg(hdev, "Added hci%u", d->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rp->num_controllers = cpu_to_le16(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) read_unlock(&hci_dev_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* If this command is called at least once, then all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * default index and unconfigured index events are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * and from now on only extended index events are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct_size(rp, entry, count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static bool is_configured(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) !bacmp(&hdev->public_addr, BDADDR_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static __le32 get_missing_options(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) u32 options = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) options |= MGMT_OPTION_EXTERNAL_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) !bacmp(&hdev->public_addr, BDADDR_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) options |= MGMT_OPTION_PUBLIC_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return cpu_to_le32(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static int new_options(struct hci_dev *hdev, struct sock *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) __le32 options = get_missing_options(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) __le32 options = get_missing_options(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sizeof(options));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int read_config_info(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct mgmt_rp_read_config_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) u32 options = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rp.manufacturer = cpu_to_le16(hdev->manufacturer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) options |= MGMT_OPTION_EXTERNAL_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (hdev->set_bdaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) options |= MGMT_OPTION_PUBLIC_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) rp.supported_options = cpu_to_le32(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) rp.missing_options = get_missing_options(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static u32 get_supported_phys(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) u32 supported_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (lmp_bredr_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) supported_phys |= MGMT_PHY_BR_1M_1SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (hdev->features[0][0] & LMP_3SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) supported_phys |= MGMT_PHY_BR_1M_3SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (hdev->features[0][0] & LMP_5SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) supported_phys |= MGMT_PHY_BR_1M_5SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (lmp_edr_2m_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (lmp_edr_3slot_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (lmp_edr_5slot_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (lmp_edr_3m_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (lmp_edr_3slot_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (lmp_edr_5slot_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (lmp_le_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) supported_phys |= MGMT_PHY_LE_1M_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) supported_phys |= MGMT_PHY_LE_1M_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (hdev->le_features[1] & HCI_LE_PHY_2M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) supported_phys |= MGMT_PHY_LE_2M_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) supported_phys |= MGMT_PHY_LE_2M_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) supported_phys |= MGMT_PHY_LE_CODED_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) supported_phys |= MGMT_PHY_LE_CODED_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return supported_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static u32 get_selected_phys(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u32 selected_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (lmp_bredr_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) selected_phys |= MGMT_PHY_BR_1M_1SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) selected_phys |= MGMT_PHY_BR_1M_3SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) selected_phys |= MGMT_PHY_BR_1M_5SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (lmp_edr_2m_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!(hdev->pkt_type & HCI_2DH1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (lmp_edr_3slot_capable(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) !(hdev->pkt_type & HCI_2DH3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (lmp_edr_5slot_capable(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) !(hdev->pkt_type & HCI_2DH5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (lmp_edr_3m_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!(hdev->pkt_type & HCI_3DH1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (lmp_edr_3slot_capable(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) !(hdev->pkt_type & HCI_3DH3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (lmp_edr_5slot_capable(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) !(hdev->pkt_type & HCI_3DH5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (lmp_le_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) selected_phys |= MGMT_PHY_LE_1M_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) selected_phys |= MGMT_PHY_LE_1M_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) selected_phys |= MGMT_PHY_LE_2M_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) selected_phys |= MGMT_PHY_LE_2M_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) selected_phys |= MGMT_PHY_LE_CODED_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) selected_phys |= MGMT_PHY_LE_CODED_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return selected_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static u32 get_configurable_phys(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static u32 get_supported_settings(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u32 settings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) settings |= MGMT_SETTING_POWERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) settings |= MGMT_SETTING_BONDABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) settings |= MGMT_SETTING_DEBUG_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) settings |= MGMT_SETTING_CONNECTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) settings |= MGMT_SETTING_DISCOVERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (lmp_bredr_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) settings |= MGMT_SETTING_FAST_CONNECTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) settings |= MGMT_SETTING_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) settings |= MGMT_SETTING_LINK_SECURITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (lmp_ssp_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) settings |= MGMT_SETTING_SSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (IS_ENABLED(CONFIG_BT_HS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) settings |= MGMT_SETTING_HS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (lmp_sc_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) settings |= MGMT_SETTING_SECURE_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) &hdev->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) settings |= MGMT_SETTING_WIDEBAND_SPEECH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (lmp_le_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) settings |= MGMT_SETTING_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) settings |= MGMT_SETTING_SECURE_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) settings |= MGMT_SETTING_PRIVACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) settings |= MGMT_SETTING_STATIC_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* When the experimental feature for LL Privacy support is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * enabled, then advertising is no longer supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) settings |= MGMT_SETTING_ADVERTISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) hdev->set_bdaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) settings |= MGMT_SETTING_CONFIGURATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) settings |= MGMT_SETTING_PHY_CONFIGURATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return settings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static u32 get_current_settings(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u32 settings = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) settings |= MGMT_SETTING_POWERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) settings |= MGMT_SETTING_CONNECTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) settings |= MGMT_SETTING_FAST_CONNECTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) settings |= MGMT_SETTING_DISCOVERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (hci_dev_test_flag(hdev, HCI_BONDABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) settings |= MGMT_SETTING_BONDABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) settings |= MGMT_SETTING_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) settings |= MGMT_SETTING_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) settings |= MGMT_SETTING_LINK_SECURITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) settings |= MGMT_SETTING_SSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) settings |= MGMT_SETTING_HS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) settings |= MGMT_SETTING_ADVERTISING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) settings |= MGMT_SETTING_SECURE_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) settings |= MGMT_SETTING_DEBUG_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (hci_dev_test_flag(hdev, HCI_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) settings |= MGMT_SETTING_PRIVACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* The current setting for static address has two purposes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * first is to indicate if the static address will be used and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * the second is to indicate if it is actually set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * This means if the static address is not configured, this flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * will never be set. If the address is configured, then if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * address is actually used decides if the flag is set or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * For single mode LE only controllers and dual-mode controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * with BR/EDR disabled, the existence of the static address will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * be evaluated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (bacmp(&hdev->static_addr, BDADDR_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) settings |= MGMT_SETTING_STATIC_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) settings |= MGMT_SETTING_WIDEBAND_SPEECH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return settings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* If there's a pending mgmt command the flags will not yet have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * their final values, so check for this first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct mgmt_mode *cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (cp->val == 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return LE_AD_GENERAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) else if (cp->val == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return LE_AD_LIMITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return LE_AD_LIMITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return LE_AD_GENERAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) bool mgmt_get_connectable(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* If there's a pending mgmt command the flag will not yet have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * it's final value, so check for this first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct mgmt_mode *cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return cp->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static void service_cache_off(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) service_cache.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) __hci_req_update_class(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static void rpa_expired(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct hci_dev *hdev = container_of(work, struct hci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rpa_expired.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) bt_dev_dbg(hdev, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* The generation of a new RPA and programming it into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * controller happens in the hci_req_enable_advertising()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __hci_req_enable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* Non-mgmt controlled devices get this bit set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * implicitly so that pairing works for them, however
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * for mgmt we require user-space to explicitly enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) hci_dev_clear_flag(hdev, HCI_BONDABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct mgmt_rp_read_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) bacpy(&rp.bdaddr, &hdev->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) rp.version = hdev->hci_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) rp.manufacturer = cpu_to_le16(hdev->manufacturer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rp.current_settings = cpu_to_le32(get_current_settings(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) memcpy(rp.dev_class, hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) u16 eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) size_t name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) hdev->appearance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) name_len = strlen(hdev->dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) hdev->dev_name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) name_len = strlen(hdev->short_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) hdev->short_name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) char buf[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct mgmt_rp_read_ext_info *rp = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) u16 eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) memset(&buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) bacpy(&rp->bdaddr, &hdev->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) rp->version = hdev->hci_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rp->manufacturer = cpu_to_le16(hdev->manufacturer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rp->current_settings = cpu_to_le32(get_current_settings(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) eir_len = append_eir_data_to_buf(hdev, rp->eir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) rp->eir_len = cpu_to_le16(eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* If this command is called at least once, then the events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * for class of device and local name changes are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * and only the new extended controller information event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) sizeof(*rp) + eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) char buf[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct mgmt_ev_ext_info_changed *ev = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) u16 eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) memset(buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) eir_len = append_eir_data_to_buf(hdev, ev->eir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ev->eir_len = cpu_to_le16(eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) sizeof(*ev) + eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) HCI_MGMT_EXT_INFO_EVENTS, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) __le32 settings = cpu_to_le32(get_current_settings(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) sizeof(settings));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (hci_conn_count(hdev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) cancel_delayed_work(&hdev->power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) queue_work(hdev->req_workqueue, &hdev->power_off.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct mgmt_ev_advertising_added ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ev.instance = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) u8 instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct mgmt_ev_advertising_removed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ev.instance = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static void cancel_adv_timeout(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (hdev->adv_instance_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) hdev->adv_instance_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) cancel_delayed_work(&hdev->adv_instance_expire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int clean_up_hci_state(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) bool discov_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (test_bit(HCI_ISCAN, &hdev->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) test_bit(HCI_PSCAN, &hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) u8 scan = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) __hci_req_disable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) discov_stopped = hci_req_stop_discovery(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) list_for_each_entry(conn, &hdev->conn_hash.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* 0x15 == Terminated due to Power Off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) __hci_abort_conn(&req, conn, 0x15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) err = hci_req_run(&req, clean_up_hci_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!err && discov_stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!!cp->val == hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) queue_work(hdev->req_workqueue, &hdev->power_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* Disconnect connections, stop scans, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) err = clean_up_hci_state(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) HCI_POWER_OFF_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* ENODATA means there were no HCI commands queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (err == -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) cancel_delayed_work(&hdev->power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) queue_work(hdev->req_workqueue, &hdev->power_off.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int new_settings(struct hci_dev *hdev, struct sock *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) __le32 ev = cpu_to_le32(get_current_settings(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) int mgmt_new_settings(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return new_settings(hdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct cmd_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) u8 mgmt_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct cmd_lookup *match = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) list_del(&cmd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (match->sk == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) match->sk = cmd->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) sock_hold(match->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mgmt_pending_free(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) u8 *status = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (cmd->cmd_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) u8 *status = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) cmd->cmd_complete(cmd, *status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cmd_status_rsp(cmd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) cmd->param, cmd->param_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) cmd->param, sizeof(struct mgmt_addr_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static u8 mgmt_bredr_support(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (!lmp_bredr_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return MGMT_STATUS_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return MGMT_STATUS_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static u8 mgmt_le_support(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return MGMT_STATUS_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return MGMT_STATUS_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto remove_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) hdev->discov_timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) new_settings(hdev, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) remove_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct mgmt_cp_set_discoverable *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) u16 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) timeout = __le16_to_cpu(cp->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* Disabling discoverable requires that no timeout is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * and enabling limited discoverable requires a timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if ((cp->val == 0x00 && timeout > 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) (cp->val == 0x02 && timeout == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (!hdev_is_powered(hdev) && timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) MGMT_STATUS_NOT_POWERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (hdev->advertising_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* Setting limited discoverable when powered off is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * not a valid operation since it requires a timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * and so no need to check HCI_LIMITED_DISCOVERABLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* If the current mode is the same, then just update the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * value with the new value. And if only the timeout gets updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * then no need for any HCI transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) (cp->val == 0x02) == hci_dev_test_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) HCI_LIMITED_DISCOVERABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) cancel_delayed_work(&hdev->discov_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) hdev->discov_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (cp->val && hdev->discov_timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) queue_delayed_work(hdev->req_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) &hdev->discov_off, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Cancel any potential discoverable timeout that might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * still active and store new timeout value. The arming of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * the timeout happens in the complete handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) cancel_delayed_work(&hdev->discov_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) hdev->discov_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (cp->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /* Limited discoverable mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (cp->val == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) queue_work(hdev->req_workqueue, &hdev->discoverable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto remove_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) new_settings(hdev, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) remove_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static int set_connectable_update_settings(struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct sock *sk, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) hci_dev_set_flag(hdev, HCI_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) hci_req_update_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) hci_update_background_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) err = set_connectable_update_settings(hdev, sk, cp->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) hci_dev_set_flag(hdev, HCI_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (hdev->discov_timeout > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) cancel_delayed_work(&hdev->discov_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) queue_work(hdev->req_workqueue, &hdev->connectable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (cp->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /* In limited privacy mode the change of bondable mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * may affect the local advertising address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (hdev_is_powered(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) queue_work(hdev->req_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) &hdev->discoverable_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) u8 val, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) status = mgmt_bredr_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) val = !!cp->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (test_bit(HCI_AUTH, &hdev->flags) == val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) status = mgmt_bredr_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (!lmp_ssp_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) changed = !hci_dev_test_and_set_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) HCI_SSP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) HCI_SSP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (!changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (pending_find(MGMT_OP_SET_SSP, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) sizeof(cp->val), &cp->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (!IS_ENABLED(CONFIG_BT_HS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) status = mgmt_bredr_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (!lmp_ssp_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (pending_find(MGMT_OP_SET_SSP, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct cmd_lookup match = { NULL, hdev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) &mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) new_settings(hdev, match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /* Make sure the controller has a good default for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * advertising data. Restrict the update to when LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * has actually been enabled. During power on, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * update in powered_update_hci will take care of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) err = __hci_req_setup_ext_adv_instance(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) __hci_req_update_scan_rsp_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) __hci_req_update_adv_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) __hci_req_update_scan_rsp_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) hci_update_background_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct hci_cp_write_le_host_supported hci_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) u8 val, enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /* Bluetooth single mode LE only controllers or dual-mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * controllers configured as LE only devices, do not allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * switching LE off. These have either LE enabled explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * or BR/EDR has been previously switched off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * When trying to enable an already enabled LE, then gracefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * send a positive response. Trying to disable it however will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * result into rejection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (cp->val == 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) val = !!cp->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) enabled = lmp_host_le_capable(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (!hdev_is_powered(hdev) || val == enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) hci_dev_change_flag(hdev, HCI_LE_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) hci_dev_clear_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (pending_find(MGMT_OP_SET_LE, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) memset(&hci_cp, 0, sizeof(hci_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) hci_cp.le = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) hci_cp.simul = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) __hci_req_disable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) __hci_req_clear_ext_adv_sets(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) &hci_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) err = hci_req_run(&req, le_enable_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) /* This is a helper function to test for pending mgmt commands that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * cause CoD or EIR HCI commands. We can only allow one such pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * mgmt command at a time since otherwise we cannot easily track what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * the current values are, will be, and based on that calculate if a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * HCI command needs to be sent and if yes with what value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static bool pending_eir_or_class(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) switch (cmd->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) case MGMT_OP_ADD_UUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) case MGMT_OP_REMOVE_UUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) case MGMT_OP_SET_DEV_CLASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) case MGMT_OP_SET_POWERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static const u8 bluetooth_base_uuid[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static u8 get_uuid_size(const u8 *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (memcmp(uuid, bluetooth_base_uuid, 12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) val = get_unaligned_le32(&uuid[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (val > 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) cmd = pending_find(mgmt_op, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) mgmt_status(status), hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) struct mgmt_cp_add_uuid *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct bt_uuid *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (pending_eir_or_class(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (!uuid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) memcpy(uuid->uuid, cp->uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) uuid->svc_hint = cp->svc_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) uuid->size = get_uuid_size(cp->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) list_add_tail(&uuid->list, &hdev->uuids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) __hci_req_update_class(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) err = hci_req_run(&req, add_uuid_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (err != -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) static bool enable_service_cache(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) queue_delayed_work(hdev->workqueue, &hdev->service_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) CACHE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) struct mgmt_cp_remove_uuid *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct bt_uuid *match, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) int err, found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (pending_eir_or_class(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) hci_uuids_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (enable_service_cache(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) MGMT_OP_REMOVE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 0, hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) goto update_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (memcmp(match->uuid, cp->uuid, 16) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) list_del(&match->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) kfree(match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (found == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) update_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) __hci_req_update_class(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) err = hci_req_run(&req, remove_uuid_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (err != -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct mgmt_cp_set_dev_class *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (!lmp_bredr_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (pending_eir_or_class(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) hdev->major_class = cp->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) hdev->minor_class = cp->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) cancel_delayed_work_sync(&hdev->service_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) __hci_req_update_class(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) err = hci_req_run(&req, set_class_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (err != -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) struct mgmt_cp_load_link_keys *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) sizeof(struct mgmt_link_key_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) u16 key_count, expected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (!lmp_bredr_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) key_count = __le16_to_cpu(cp->key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (key_count > max_key_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) expected_len = struct_size(cp, keys, key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (expected_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) expected_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) for (i = 0; i < key_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct mgmt_link_key_info *key = &cp->keys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) MGMT_OP_LOAD_LINK_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) hci_link_keys_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (cp->debug_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) HCI_KEEP_DEBUG_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) new_settings(hdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) for (i = 0; i < key_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) struct mgmt_link_key_info *key = &cp->keys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (hci_is_blocked_key(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) HCI_BLOCKED_KEY_TYPE_LINKKEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) key->val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) &key->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) /* Always ignore debug keys and require a new pairing if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) * the user wants to use them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (key->type == HCI_LK_DEBUG_COMBINATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) key->type, key->pin_len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) u8 addr_type, struct sock *skip_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct mgmt_ev_device_unpaired ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) ev.addr.type = addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) skip_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) struct mgmt_cp_unpair_device *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) struct mgmt_rp_unpair_device rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) u8 addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) rp.addr.type = cp->addr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (!bdaddr_type_is_valid(cp->addr.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) MGMT_STATUS_NOT_POWERED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) /* If disconnection is requested, then look up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * connection. If the remote device is connected, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * will be later used to terminate the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * Setting it to NULL explicitly will cause no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) * termination of the link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (cp->disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) MGMT_OP_UNPAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) MGMT_STATUS_NOT_PAIRED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /* LE address type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) addr_type = le_addr_type(cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) MGMT_STATUS_NOT_PAIRED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /* Defer clearing up the connection parameters until closing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * give a chance of keeping them if a repairing happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) /* Disable auto-connection parameters if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (params->explicit_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) params->auto_connect = HCI_AUTO_CONN_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) /* If disconnection is not requested, then clear the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * variable so that the link is not terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (!cp->disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) /* If the connection variable is set, then termination of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * link is requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) cmd->cmd_complete = addr_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct mgmt_cp_disconnect *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct mgmt_rp_disconnect rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) rp.addr.type = cp->addr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (!bdaddr_type_is_valid(cp->addr.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (!test_bit(HCI_UP, &hdev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) MGMT_STATUS_NOT_POWERED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) MGMT_STATUS_BUSY, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (cp->addr.type == BDADDR_BREDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) le_addr_type(cp->addr.type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) MGMT_STATUS_NOT_CONNECTED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) cmd->cmd_complete = generic_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) switch (link_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) case LE_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) switch (addr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) case ADDR_LE_DEV_PUBLIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return BDADDR_LE_PUBLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /* Fallback to LE Random address type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return BDADDR_LE_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) /* Fallback to BR/EDR type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) return BDADDR_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) struct mgmt_rp_get_connections *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) struct hci_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) MGMT_STATUS_NOT_POWERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) list_for_each_entry(c, &hdev->conn_hash.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) list_for_each_entry(c, &hdev->conn_hash.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) bacpy(&rp->addr[i].bdaddr, &c->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (c->type == SCO_LINK || c->type == ESCO_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) rp->conn_count = cpu_to_le16(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) /* Recalculate length in case of filtered SCO connections, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) struct_size(rp, addr, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct mgmt_cp_pin_code_neg_reply *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) cmd->cmd_complete = addr_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct mgmt_cp_pin_code_reply *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) struct hci_cp_pin_code_reply reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) MGMT_STATUS_NOT_POWERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) MGMT_STATUS_NOT_CONNECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) struct mgmt_cp_pin_code_neg_reply ncp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) bt_dev_err(hdev, "PIN code is not 16 bytes long");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) err = send_pin_code_neg_reply(sk, hdev, &ncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if (err >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) cmd->cmd_complete = addr_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) bacpy(&reply.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) reply.pin_len = cp->pin_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) struct mgmt_cp_set_io_capability *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) hdev->io_capability = cp->io_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) struct hci_dev *hdev = conn->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (cmd->user_data != conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) struct mgmt_rp_pair_device rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) struct hci_conn *conn = cmd->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) bacpy(&rp.addr.bdaddr, &conn->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) status, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) /* So we don't get further callbacks for this connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) conn->connect_cfm_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) conn->security_cfm_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) conn->disconn_cfm_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) hci_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) /* The device is paired so there is no need to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) * its connection parameters anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) hci_conn_put(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) void mgmt_smp_complete(struct hci_conn *conn, bool complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) cmd = find_pairing(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) cmd->cmd_complete(cmd, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) static void pairing_complete_cb(struct hci_conn *conn, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) BT_DBG("status %u", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) cmd = find_pairing(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) BT_DBG("Unable to find a pending command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) BT_DBG("status %u", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) cmd = find_pairing(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) BT_DBG("Unable to find a pending command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) struct mgmt_cp_pair_device *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct mgmt_rp_pair_device rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) u8 sec_level, auth_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) rp.addr.type = cp->addr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (!bdaddr_type_is_valid(cp->addr.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) MGMT_STATUS_NOT_POWERED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) MGMT_STATUS_ALREADY_PAIRED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) sec_level = BT_SECURITY_MEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) auth_type = HCI_AT_DEDICATED_BONDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) auth_type, CONN_REASON_PAIR_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) u8 addr_type = le_addr_type(cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) struct hci_conn_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) /* When pairing a new device, it is expected to remember
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * this device for future connections. Adding the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) * parameter information ahead of time allows tracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) * of the slave preferred values and will speed up any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) * further connection establishment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * If connection parameters already exist, then they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * will be kept and this function does nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) p->auto_connect = HCI_AUTO_CONN_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) sec_level, HCI_LE_CONN_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) CONN_REASON_PAIR_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (IS_ERR(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) if (PTR_ERR(conn) == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) status = MGMT_STATUS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) else if (PTR_ERR(conn) == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) status = MGMT_STATUS_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) else if (PTR_ERR(conn) == -ECONNREFUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) status = MGMT_STATUS_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) status = MGMT_STATUS_CONNECT_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) status, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) if (conn->connect_cfm_cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) hci_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) MGMT_STATUS_BUSY, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) hci_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) cmd->cmd_complete = pairing_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* For LE, just connecting isn't a proof that the pairing finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) conn->connect_cfm_cb = pairing_complete_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) conn->security_cfm_cb = pairing_complete_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) conn->disconn_cfm_cb = pairing_complete_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) conn->connect_cfm_cb = le_pairing_complete_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) conn->security_cfm_cb = le_pairing_complete_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) conn->disconn_cfm_cb = le_pairing_complete_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) conn->io_capability = cp->io_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) cmd->user_data = hci_conn_get(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) hci_conn_security(conn, sec_level, auth_type, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) cmd->cmd_complete(cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) struct mgmt_addr_info *addr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) MGMT_STATUS_NOT_POWERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) conn = cmd->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) addr, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) /* Since user doesn't want to proceed with the connection, abort any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) * ongoing pairing and then terminate the link if it was created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) * because of the pair device action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (addr->type == BDADDR_BREDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) hci_remove_link_key(hdev, &addr->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) le_addr_type(addr->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) struct mgmt_addr_info *addr, u16 mgmt_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) u16 hci_op, __le32 passkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) MGMT_STATUS_NOT_POWERED, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (addr->type == BDADDR_BREDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) le_addr_type(addr->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) MGMT_STATUS_NOT_CONNECTED, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) err = smp_user_confirm_reply(conn, mgmt_op, passkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) MGMT_STATUS_SUCCESS, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) MGMT_STATUS_FAILED, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) cmd->cmd_complete = addr_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) /* Continue with pairing via HCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct hci_cp_user_passkey_reply cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) bacpy(&cp.bdaddr, &addr->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) cp.passkey = passkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) &addr->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) struct mgmt_cp_pin_code_neg_reply *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) return user_pairing_resp(sk, hdev, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) MGMT_OP_PIN_CODE_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) HCI_OP_PIN_CODE_NEG_REPLY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) struct mgmt_cp_user_confirm_reply *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) if (len != sizeof(*cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) return user_pairing_resp(sk, hdev, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) MGMT_OP_USER_CONFIRM_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) HCI_OP_USER_CONFIRM_REPLY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) struct mgmt_cp_user_confirm_neg_reply *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) return user_pairing_resp(sk, hdev, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) MGMT_OP_USER_CONFIRM_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) struct mgmt_cp_user_passkey_reply *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) return user_pairing_resp(sk, hdev, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) MGMT_OP_USER_PASSKEY_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) struct mgmt_cp_user_passkey_neg_reply *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) return user_pairing_resp(sk, hdev, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) MGMT_OP_USER_PASSKEY_NEG_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) static void adv_expire(struct hci_dev *hdev, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) /* stop if current instance doesn't need to be changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) if (!(adv_instance->flags & flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) cancel_adv_timeout(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct mgmt_cp_set_local_name *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) cp, sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) struct mgmt_cp_set_local_name *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) /* If the old values are the same as the new ones just return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) * direct command complete event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) !memcmp(hdev->short_name, cp->short_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) sizeof(hdev->short_name))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) ext_info_changed(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (lmp_bredr_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) __hci_req_update_name(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) /* The name is stored in the scan response data and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) * no need to udpate the advertising data here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) err = hci_req_run(&req, set_name_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) struct mgmt_cp_set_appearance *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) u16 appearance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) appearance = le16_to_cpu(cp->appearance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) if (hdev->appearance != appearance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) hdev->appearance = appearance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) ext_info_changed(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) struct mgmt_rp_get_phy_confguration rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) struct mgmt_ev_phy_configuration_changed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) sizeof(ev), skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) u16 opcode, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) mgmt_cmd_status(cmd->sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) mgmt_cmd_complete(cmd->sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) MGMT_OP_SET_PHY_CONFIGURATION, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) mgmt_phy_configuration_changed(hdev, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) struct mgmt_cp_set_phy_confguration *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) struct hci_cp_le_set_default_phy cp_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) u16 pkt_type = (HCI_DH1 | HCI_DM1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) configurable_phys = get_configurable_phys(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) supported_phys = get_supported_phys(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) selected_phys = __le32_to_cpu(cp->selected_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) if (selected_phys & ~supported_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) unconfigure_phys = supported_phys & ~configurable_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) if ((selected_phys & unconfigure_phys) != unconfigure_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if (selected_phys == get_selected_phys(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) return mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) pkt_type |= (HCI_DH3 | HCI_DM3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) pkt_type &= ~(HCI_DH3 | HCI_DM3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) pkt_type |= (HCI_DH5 | HCI_DM5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) pkt_type &= ~(HCI_DH5 | HCI_DM5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) pkt_type &= ~HCI_2DH1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) pkt_type |= HCI_2DH1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) pkt_type &= ~HCI_2DH3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) pkt_type |= HCI_2DH3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) pkt_type &= ~HCI_2DH5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) pkt_type |= HCI_2DH5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) pkt_type &= ~HCI_3DH1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) pkt_type |= HCI_3DH1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) pkt_type &= ~HCI_3DH3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) pkt_type |= HCI_3DH3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) pkt_type &= ~HCI_3DH5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) pkt_type |= HCI_3DH5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (pkt_type != hdev->pkt_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) hdev->pkt_type = pkt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) changed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if ((selected_phys & MGMT_PHY_LE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) mgmt_phy_configuration_changed(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) MGMT_OP_SET_PHY_CONFIGURATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) memset(&cp_phy, 0, sizeof(cp_phy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) cp_phy.all_phys |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) cp_phy.all_phys |= 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (selected_phys & MGMT_PHY_LE_1M_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (selected_phys & MGMT_PHY_LE_2M_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (selected_phys & MGMT_PHY_LE_CODED_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) if (selected_phys & MGMT_PHY_LE_1M_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if (selected_phys & MGMT_PHY_LE_2M_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) if (selected_phys & MGMT_PHY_LE_CODED_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) err = hci_req_run_skb(&req, set_default_phy_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) int err = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) struct mgmt_cp_set_blocked_keys *keys = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) sizeof(struct mgmt_blocked_key_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) u16 key_count, expected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) key_count = __le16_to_cpu(keys->key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) if (key_count > max_key_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) bt_dev_err(hdev, "too big key_count value %u", key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) expected_len = struct_size(keys, keys, key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) if (expected_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) bt_dev_err(hdev, "expected %u bytes, got %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) expected_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) hci_blocked_keys_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) for (i = 0; i < keys->key_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) if (!b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) err = MGMT_STATUS_NO_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) b->type = keys->keys[i].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) memcpy(b->val, keys->keys[i].val, sizeof(b->val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) list_add_rcu(&b->list, &hdev->blocked_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) err, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) MGMT_OP_SET_WIDEBAND_SPEECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) MGMT_OP_SET_WIDEBAND_SPEECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) MGMT_OP_SET_WIDEBAND_SPEECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) if (hdev_is_powered(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) !!cp->val != hci_dev_test_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) HCI_WIDEBAND_SPEECH_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) MGMT_OP_SET_WIDEBAND_SPEECH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) if (cp->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) changed = !hci_dev_test_and_set_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) HCI_WIDEBAND_SPEECH_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) HCI_WIDEBAND_SPEECH_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) static int read_security_info(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) char buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) struct mgmt_rp_read_security_info *rp = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) u16 sec_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) memset(&buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) /* When the Read Simple Pairing Options command is supported, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) * the remote public key validation is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (hdev->commands[41] & 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) flags |= 0x01; /* Remote public key validation (BR/EDR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) flags |= 0x02; /* Remote public key validation (LE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) /* When the Read Encryption Key Size command is supported, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) * encryption key size is enforced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) if (hdev->commands[20] & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) flags |= 0x08; /* Encryption key size enforcement (LE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) /* When the Read Simple Pairing Options command is supported, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) * also max encryption key size information is provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) if (hdev->commands[41] & 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) hdev->max_enc_key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) rp->sec_len = cpu_to_le16(sec_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) rp, sizeof(*rp) + sec_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) #ifdef CONFIG_BT_FEATURE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) /* d4992530-b9ec-469f-ab01-6c481c47da1c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) static const u8 debug_uuid[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) static const u8 simult_central_periph_uuid[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* 15c0a148-c273-11ea-b3de-0242ac130004 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) static const u8 rpa_resolution_uuid[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) char buf[62]; /* Enough space for 3 features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) u16 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) memset(&buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) #ifdef CONFIG_BT_FEATURE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) flags = bt_dbg_get() ? BIT(0) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) memcpy(rp->features[idx].uuid, debug_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) rp->features[idx].flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) if (hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) (hdev->le_states[4] & 0x08) && /* Central */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) (hdev->le_states[4] & 0x40) && /* Peripheral */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) (hdev->le_states[3] & 0x10)) /* Simultaneous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) flags = BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) rp->features[idx].flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) if (hdev && use_ll_privacy(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) flags = BIT(0) | BIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) flags = BIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) rp->features[idx].flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) rp->feature_count = cpu_to_le16(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) /* After reading the experimental features information, enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) * the events to update client on any future change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) MGMT_OP_READ_EXP_FEATURES_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 0, rp, sizeof(*rp) + (20 * idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) struct sock *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) struct mgmt_ev_exp_feature_changed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) memcpy(ev.uuid, rpa_resolution_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) HCI_MGMT_EXP_FEATURE_EVENTS, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) #ifdef CONFIG_BT_FEATURE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) static int exp_debug_feature_changed(bool enabled, struct sock *skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) struct mgmt_ev_exp_feature_changed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) memcpy(ev.uuid, debug_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) HCI_MGMT_EXP_FEATURE_EVENTS, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) struct mgmt_cp_set_exp_feature *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) struct mgmt_rp_set_exp_feature rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) memset(rp.uuid, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) rp.flags = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) #ifdef CONFIG_BT_FEATURE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) bool changed = bt_dbg_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) bt_dbg_set(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) exp_debug_feature_changed(false, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) bool changed = hci_dev_test_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) HCI_ENABLE_LL_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) exp_ll_privacy_feature_changed(false, hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) MGMT_OP_SET_EXP_FEATURE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) #ifdef CONFIG_BT_FEATURE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) if (!memcmp(cp->uuid, debug_uuid, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) bool val, changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) /* Command requires to use the non-controller index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) if (hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) /* Parameters are limited to a single octet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) /* Only boolean on/off is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) val = !!cp->param[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) changed = val ? !bt_dbg_get() : bt_dbg_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) bt_dbg_set(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) memcpy(rp.uuid, debug_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) rp.flags = cpu_to_le32(val ? BIT(0) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) MGMT_OP_SET_EXP_FEATURE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) exp_debug_feature_changed(val, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) bool val, changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) /* Command requires to use the controller index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (!hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) /* Changes can only be made when controller is powered down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if (hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) MGMT_STATUS_NOT_POWERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) /* Parameters are limited to a single octet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) /* Only boolean on/off is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) val = !!cp->param[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) changed = !hci_dev_test_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) HCI_ENABLE_LL_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) hci_dev_clear_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) /* Enable LL privacy + supported settings changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) flags = BIT(0) | BIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) changed = hci_dev_test_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) HCI_ENABLE_LL_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) /* Disable LL privacy + supported settings changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) flags = BIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) memcpy(rp.uuid, rpa_resolution_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) rp.flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) MGMT_OP_SET_EXP_FEATURE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) exp_ll_privacy_feature_changed(val, hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) MGMT_OP_SET_EXP_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) struct mgmt_cp_get_device_flags *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) struct mgmt_rp_get_device_flags rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) struct bdaddr_list_with_flags *br_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) u32 current_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) u8 status = MGMT_STATUS_INVALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) &cp->addr.bdaddr, cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) if (!br_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) current_flags = br_params->current_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) le_addr_type(cp->addr.type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) if (!params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) current_flags = params->current_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) rp.addr.type = cp->addr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) rp.supported_flags = cpu_to_le32(supported_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) rp.current_flags = cpu_to_le32(current_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) bdaddr_t *bdaddr, u8 bdaddr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) u32 supported_flags, u32 current_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) struct mgmt_ev_device_flags_changed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) ev.addr.type = bdaddr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) ev.supported_flags = cpu_to_le32(supported_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) ev.current_flags = cpu_to_le32(current_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) struct mgmt_cp_set_device_flags *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) struct bdaddr_list_with_flags *br_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) u8 status = MGMT_STATUS_INVALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) u32 current_flags = __le32_to_cpu(cp->current_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) &cp->addr.bdaddr, cp->addr.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) __le32_to_cpu(current_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) if ((supported_flags | current_flags) != supported_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) current_flags, supported_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (br_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) br_params->current_flags = current_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) &cp->addr.bdaddr, cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) le_addr_type(cp->addr.type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) params->current_flags = current_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) le_addr_type(cp->addr.type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) if (status == MGMT_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) supported_flags, current_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) u16 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) struct mgmt_ev_adv_monitor_added ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) ev.monitor_handle = cpu_to_le16(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) u16 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) struct mgmt_ev_adv_monitor_added ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) ev.monitor_handle = cpu_to_le16(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) struct adv_monitor *monitor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) struct mgmt_rp_read_adv_monitor_features *rp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) int handle, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) size_t rp_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) __u32 supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) __u16 num_handles = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) BT_DBG("request for %s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) handles[num_handles++] = monitor->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) rp = kmalloc(rp_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) if (!rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) /* Once controller-based monitoring is in place, the enabled_features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) * should reflect the use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) rp->supported_features = cpu_to_le32(supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) rp->enabled_features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) rp->num_handles = cpu_to_le16(num_handles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) if (num_handles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) MGMT_OP_READ_ADV_MONITOR_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) MGMT_STATUS_SUCCESS, rp, rp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) struct mgmt_cp_add_adv_patterns_monitor *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) struct mgmt_rp_add_adv_patterns_monitor rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) struct adv_monitor *m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) struct adv_pattern *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) __u8 cp_ofst = 0, cp_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) BT_DBG("request for %s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) if (len <= sizeof(*cp) || cp->pattern_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) m = kmalloc(sizeof(*m), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) if (!m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) INIT_LIST_HEAD(&m->patterns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) m->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) for (i = 0; i < cp->pattern_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) cp_ofst = cp->patterns[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) cp_len = cp->patterns[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) if (cp_ofst >= HCI_MAX_AD_LENGTH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) cp_len > HCI_MAX_AD_LENGTH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) p = kmalloc(sizeof(*p), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) p->ad_type = cp->patterns[i].ad_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) p->offset = cp->patterns[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) p->length = cp->patterns[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) memcpy(p->value, cp->patterns[i].value, p->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) INIT_LIST_HEAD(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) list_add(&p->list, &m->patterns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) if (mp_cnt != cp->pattern_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) err = hci_add_adv_monitor(hdev, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) if (err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) MGMT_STATUS_NO_RESOURCES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) mgmt_adv_monitor_added(sk, hdev, m->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) rp.monitor_handle = cpu_to_le16(m->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) hci_free_adv_monitor(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) struct mgmt_cp_remove_adv_monitor *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) struct mgmt_rp_remove_adv_monitor rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) unsigned int prev_adv_monitors_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) u16 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) BT_DBG("request for %s", hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) handle = __le16_to_cpu(cp->monitor_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) err = hci_remove_adv_monitor(hdev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) MGMT_STATUS_INVALID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) mgmt_adv_monitor_removed(sk, hdev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) rp.monitor_handle = cp->monitor_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) u16 opcode, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) struct mgmt_rp_read_local_oob_data mgmt_rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) size_t rp_size = sizeof(mgmt_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) bt_dev_dbg(hdev, "status %u", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) if (status || !skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) status ? mgmt_status(status) : MGMT_STATUS_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) goto remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) memset(&mgmt_rp, 0, sizeof(mgmt_rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) if (skb->len < sizeof(*rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) mgmt_cmd_status(cmd->sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) MGMT_STATUS_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) goto remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) if (skb->len < sizeof(*rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) mgmt_cmd_status(cmd->sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) MGMT_STATUS_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) goto remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) MGMT_STATUS_NOT_POWERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) if (!lmp_ssp_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) if (bredr_sc_enabled(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) err = hci_req_run_skb(&req, read_local_oob_data_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) struct mgmt_addr_info *addr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) if (!bdaddr_type_is_valid(addr->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) return mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) MGMT_OP_ADD_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) addr, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) struct mgmt_cp_add_remote_oob_data *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) if (cp->addr.type != BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) MGMT_OP_ADD_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) cp->addr.type, cp->hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) cp->rand, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) MGMT_OP_ADD_REMOTE_OOB_DATA, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) struct mgmt_cp_add_remote_oob_ext_data *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) u8 *rand192, *hash192, *rand256, *hash256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) if (bdaddr_type_is_le(cp->addr.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) /* Enforce zero-valued 192-bit parameters as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) * long as legacy SMP OOB isn't implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) if (memcmp(cp->rand192, ZERO_KEY, 16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) memcmp(cp->hash192, ZERO_KEY, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) MGMT_OP_ADD_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) addr, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) rand192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) hash192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) /* In case one of the P-192 values is set to zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) * then just disable OOB data for P-192.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) !memcmp(cp->hash192, ZERO_KEY, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) rand192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) hash192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) rand192 = cp->rand192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) hash192 = cp->hash192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) /* In case one of the P-256 values is set to zero, then just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) * disable OOB data for P-256.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) !memcmp(cp->hash256, ZERO_KEY, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) rand256 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) hash256 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) rand256 = cp->rand256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) hash256 = cp->hash256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) cp->addr.type, hash192, rand192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) hash256, rand256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) MGMT_OP_ADD_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) status, &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) struct mgmt_cp_remove_remote_oob_data *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) if (cp->addr.type != BDADDR_BREDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) return mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) MGMT_OP_REMOVE_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) hci_remote_oob_data_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) status = MGMT_STATUS_INVALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) status, &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) bt_dev_dbg(hdev, "status %d", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) /* Handle suspend notifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) hdev->suspend_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) bt_dev_dbg(hdev, "Unpaused discovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) wake_up(&hdev->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) uint8_t *mgmt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) case DISCOV_TYPE_LE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) *mgmt_status = mgmt_le_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) if (*mgmt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) case DISCOV_TYPE_INTERLEAVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) *mgmt_status = mgmt_le_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) if (*mgmt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) case DISCOV_TYPE_BREDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) *mgmt_status = mgmt_bredr_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) if (*mgmt_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) u16 op, void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) struct mgmt_cp_start_discovery *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) err = mgmt_cmd_complete(sk, hdev->id, op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) MGMT_STATUS_NOT_POWERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) if (hdev->discovery.state != DISCOVERY_STOPPED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) if (!discovery_type_is_valid(hdev, cp->type, &status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) err = mgmt_cmd_complete(sk, hdev->id, op, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) /* Can't start discovery when it is paused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) if (hdev->discovery_paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) /* Clear the discovery filter first to free any previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) * allocated memory for the UUID list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) hci_discovery_filter_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) hdev->discovery.type = cp->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) hdev->discovery.report_invalid_rssi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) if (op == MGMT_OP_START_LIMITED_DISCOVERY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) hdev->discovery.limited = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) hdev->discovery.limited = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) cmd = mgmt_pending_add(sk, op, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) cmd->cmd_complete = generic_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) hci_discovery_set_state(hdev, DISCOVERY_STARTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) queue_work(hdev->req_workqueue, &hdev->discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) static int start_discovery(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) return start_discovery_internal(sk, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) MGMT_OP_START_LIMITED_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) cmd->param, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) struct mgmt_cp_start_service_discovery *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) u16 uuid_count, expected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) MGMT_STATUS_NOT_POWERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) if (hdev->discovery.state != DISCOVERY_STOPPED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) MGMT_STATUS_BUSY, &cp->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) uuid_count = __le16_to_cpu(cp->uuid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) if (uuid_count > max_uuid_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) uuid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) MGMT_STATUS_INVALID_PARAMS, &cp->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) expected_len = sizeof(*cp) + uuid_count * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) if (expected_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) expected_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) MGMT_STATUS_INVALID_PARAMS, &cp->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) if (!discovery_type_is_valid(hdev, cp->type, &status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) status, &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) cmd->cmd_complete = service_discovery_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) /* Clear the discovery filter first to free any previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) * allocated memory for the UUID list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) hci_discovery_filter_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) hdev->discovery.result_filtering = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) hdev->discovery.type = cp->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) hdev->discovery.rssi = cp->rssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) hdev->discovery.uuid_count = uuid_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) if (uuid_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) if (!hdev->discovery.uuids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) MGMT_OP_START_SERVICE_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) MGMT_STATUS_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) &cp->type, sizeof(cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) hci_discovery_set_state(hdev, DISCOVERY_STARTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) queue_work(hdev->req_workqueue, &hdev->discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) bt_dev_dbg(hdev, "status %d", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) /* Handle suspend notifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) bt_dev_dbg(hdev, "Paused discovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) wake_up(&hdev->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) struct mgmt_cp_stop_discovery *mgmt_cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) if (!hci_discovery_active(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) MGMT_STATUS_REJECTED, &mgmt_cp->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) sizeof(mgmt_cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) if (hdev->discovery.type != mgmt_cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) &mgmt_cp->type, sizeof(mgmt_cp->type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) cmd->cmd_complete = generic_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) queue_work(hdev->req_workqueue, &hdev->discov_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) struct mgmt_cp_confirm_name *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) struct inquiry_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) if (!hci_discovery_active(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) MGMT_STATUS_FAILED, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) MGMT_STATUS_INVALID_PARAMS, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) if (cp->name_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) e->name_state = NAME_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) list_del(&e->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) e->name_state = NAME_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) hci_inquiry_cache_update_resolve(hdev, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) struct mgmt_cp_block_device *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) if (!bdaddr_type_is_valid(cp->addr.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) struct mgmt_cp_unblock_device *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) if (!bdaddr_type_is_valid(cp->addr.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) status = MGMT_STATUS_INVALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) struct mgmt_cp_set_device_id *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) __u16 source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) source = __le16_to_cpu(cp->source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) if (source > 0x0002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) hdev->devid_source = source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) hdev->devid_vendor = __le16_to_cpu(cp->vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) hdev->devid_product = __le16_to_cpu(cp->product);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) hdev->devid_version = __le16_to_cpu(cp->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) bt_dev_dbg(hdev, "status %d", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) static void set_advertising_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) struct cmd_lookup match = { NULL, hdev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) u8 instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) cmd_status_rsp, &mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) if (hci_dev_test_flag(hdev, HCI_LE_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) hci_dev_set_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) hci_dev_clear_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) new_settings(hdev, match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) /* Handle suspend notifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) hdev->suspend_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) bt_dev_dbg(hdev, "Paused advertising");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) wake_up(&hdev->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) hdev->suspend_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) bt_dev_dbg(hdev, "Unpaused advertising");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) wake_up(&hdev->suspend_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) /* If "Set Advertising" was just disabled and instance advertising was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) * set up earlier, then re-enable multi-instance advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) list_empty(&hdev->adv_instances))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) instance = hdev->cur_adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) if (!instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) adv_instance = list_first_entry_or_null(&hdev->adv_instances,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) struct adv_info, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) if (!adv_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) instance = adv_instance->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) err = __hci_req_schedule_adv_instance(&req, instance, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) err = hci_req_run(&req, enable_advertising_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) bt_dev_err(hdev, "failed to re-configure advertising");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) u8 val, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) status = mgmt_le_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) /* Enabling the experimental LL Privay support disables support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) * advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) if (hdev->advertising_paused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) val = !!cp->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) /* The following conditions are ones which mean that we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) * not do any HCI communication but directly send a mgmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) * response to user space (after toggling the flag if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) * necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) if (!hdev_is_powered(hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) hci_conn_num(hdev, LE_LINK) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) hdev->le_scan_type == LE_SCAN_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) if (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) hdev->cur_adv_instance = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) if (cp->val == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) pending_find(MGMT_OP_SET_LE, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) if (cp->val == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) cancel_adv_timeout(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) /* Switch to instance "0" for the Set Advertising setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) * We cannot use update_[adv|scan_rsp]_data() here as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) * HCI_ADVERTISING flag is not yet set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) hdev->cur_adv_instance = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) __hci_req_start_ext_adv(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) __hci_req_update_adv_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) __hci_req_update_scan_rsp_data(&req, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) __hci_req_enable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) __hci_req_disable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) err = hci_req_run(&req, set_advertising_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) static int set_static_address(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) struct mgmt_cp_set_static_address *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) if (hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) if (!bacmp(&cp->bdaddr, BDADDR_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) MGMT_OP_SET_STATIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) /* Two most significant bits shall be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) MGMT_OP_SET_STATIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) bacpy(&hdev->static_addr, &cp->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) struct mgmt_cp_set_scan_params *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) __u16 interval, window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) interval = __le16_to_cpu(cp->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) if (interval < 0x0004 || interval > 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) window = __le16_to_cpu(cp->window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) if (window < 0x0004 || window > 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) if (window > interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) hdev->le_scan_interval = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) hdev->le_scan_window = window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) /* If background scan is running, restart it so new parameters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) * loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) hdev->discovery.state == DISCOVERY_STOPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) hci_req_add_le_scan_disable(&req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) hci_req_add_le_passive_scan(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) struct mgmt_mode *cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) if (cp->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) new_settings(hdev, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) hdev->hci_ver < BLUETOOTH_VER_1_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) __hci_req_write_fast_connectable(&req, cp->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) err = hci_req_run(&req, fast_connectable_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) MGMT_STATUS_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) bt_dev_dbg(hdev, "status 0x%02x", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) /* We need to restore the flag if related HCI commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) * failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) new_settings(hdev, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) if (cp->val != 0x00 && cp->val != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) if (!cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) /* Reject disabling when powered on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) if (!cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) /* When configuring a dual-mode controller to operate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) * with LE only and using a static address, then switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) * BR/EDR back on is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) * Dual-mode controllers shall operate with the public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) * address as its identity address for BR/EDR and LE. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) * reject the attempt to create an invalid configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) * The same restrictions applies when secure connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) * has been enabled. For BR/EDR this is a controller feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) * while for LE it is a host stack feature. This means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) * switching BR/EDR back on when secure connections has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) * enabled is not a supported transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) (bacmp(&hdev->static_addr, BDADDR_ANY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) /* We need to flip the bit already here so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) * hci_req_update_adv_data generates the correct flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) __hci_req_write_fast_connectable(&req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) __hci_req_update_scan(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) /* Since only the advertising data flags will change, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) * is no need to update the scan response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) err = hci_req_run(&req, set_bredr_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) struct mgmt_mode *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) bt_dev_dbg(hdev, "status %u", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) goto remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) switch (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) case 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) hci_dev_clear_flag(hdev, HCI_SC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) case 0x01:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) hci_dev_set_flag(hdev, HCI_SC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) hci_dev_clear_flag(hdev, HCI_SC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) case 0x02:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) hci_dev_set_flag(hdev, HCI_SC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) hci_dev_set_flag(hdev, HCI_SC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) new_settings(hdev, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) if (!lmp_sc_capable(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) lmp_sc_capable(hdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) if (cp->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) changed = !hci_dev_test_and_set_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) HCI_SC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) if (cp->val == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) hci_dev_set_flag(hdev, HCI_SC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) hci_dev_clear_flag(hdev, HCI_SC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) HCI_SC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) hci_dev_clear_flag(hdev, HCI_SC_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) val = !!cp->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) err = hci_req_run(&req, sc_enable_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) struct mgmt_mode *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) bool changed, use_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) if (cp->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) HCI_KEEP_DEBUG_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) if (cp->val == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) use_changed = !hci_dev_test_and_set_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) HCI_USE_DEBUG_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) use_changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) HCI_USE_DEBUG_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) if (hdev_is_powered(hdev) && use_changed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) sizeof(mode), &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) struct mgmt_cp_set_privacy *cp = cp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) if (hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) /* If user space supports this command it is also expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) if (cp->privacy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) hci_adv_instances_set_rpa_expired(hdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) if (cp->privacy == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) memset(hdev->irk, 0, sizeof(hdev->irk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) hci_adv_instances_set_rpa_expired(hdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) err = new_settings(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) static bool irk_is_valid(struct mgmt_irk_info *irk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) switch (irk->addr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) case BDADDR_LE_PUBLIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) case BDADDR_LE_RANDOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) /* Two most significant bits shall be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) struct mgmt_cp_load_irks *cp = cp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) sizeof(struct mgmt_irk_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) u16 irk_count, expected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) irk_count = __le16_to_cpu(cp->irk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) if (irk_count > max_irk_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) bt_dev_err(hdev, "load_irks: too big irk_count value %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) irk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) expected_len = struct_size(cp, irks, irk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) if (expected_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) expected_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) bt_dev_dbg(hdev, "irk_count %u", irk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) for (i = 0; i < irk_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) struct mgmt_irk_info *key = &cp->irks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) if (!irk_is_valid(key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) MGMT_OP_LOAD_IRKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) hci_smp_irks_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) for (i = 0; i < irk_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) struct mgmt_irk_info *irk = &cp->irks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) if (hci_is_blocked_key(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) HCI_BLOCKED_KEY_TYPE_IRK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) irk->val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) &irk->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) hci_add_irk(hdev, &irk->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) le_addr_type(irk->addr.type), irk->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) BDADDR_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) static bool ltk_is_valid(struct mgmt_ltk_info *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) if (key->master != 0x00 && key->master != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) switch (key->addr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) case BDADDR_LE_PUBLIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) case BDADDR_LE_RANDOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) /* Two most significant bits shall be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) void *cp_data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) struct mgmt_cp_load_long_term_keys *cp = cp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) sizeof(struct mgmt_ltk_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) u16 key_count, expected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) key_count = __le16_to_cpu(cp->key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) if (key_count > max_key_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) bt_dev_err(hdev, "load_ltks: too big key_count value %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) expected_len = struct_size(cp, keys, key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) if (expected_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) expected_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) bt_dev_dbg(hdev, "key_count %u", key_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) for (i = 0; i < key_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) struct mgmt_ltk_info *key = &cp->keys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) if (!ltk_is_valid(key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) return mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) MGMT_OP_LOAD_LONG_TERM_KEYS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) hci_smp_ltks_clear(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) for (i = 0; i < key_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) struct mgmt_ltk_info *key = &cp->keys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) u8 type, authenticated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) if (hci_is_blocked_key(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) HCI_BLOCKED_KEY_TYPE_LTK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) key->val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) &key->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) switch (key->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) case MGMT_LTK_UNAUTHENTICATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) authenticated = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) case MGMT_LTK_AUTHENTICATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) authenticated = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) case MGMT_LTK_P256_UNAUTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) authenticated = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) type = SMP_LTK_P256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) case MGMT_LTK_P256_AUTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) authenticated = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) type = SMP_LTK_P256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) case MGMT_LTK_P256_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) authenticated = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) type = SMP_LTK_P256_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) hci_add_ltk(hdev, &key->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) le_addr_type(key->addr.type), type, authenticated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) key->val, key->enc_size, key->ediv, key->rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) struct hci_conn *conn = cmd->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) struct mgmt_rp_get_conn_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) if (status == MGMT_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) rp.rssi = conn->rssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) rp.tx_power = conn->tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) rp.max_tx_power = conn->max_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) rp.rssi = HCI_RSSI_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) rp.tx_power = HCI_TX_POWER_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) rp.max_tx_power = HCI_TX_POWER_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) status, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) hci_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) hci_conn_put(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) struct hci_cp_read_rssi *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) u16 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) bt_dev_dbg(hdev, "status 0x%02x", hci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) /* Commands sent in request are either Read RSSI or Read Transmit Power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) * Level so we check which one was last sent to retrieve connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) * handle. Both commands have handle as first parameter so it's safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) * cast data on the same command struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) * First command sent is always Read RSSI and we fail only if it fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) * In other case we simply override error to indicate success as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) * already remembered if TX power value is actually valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) if (!cp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) status = mgmt_status(hci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) if (!cp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) handle = __le16_to_cpu(cp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) conn = hci_conn_hash_lookup_handle(hdev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) cmd->cmd_complete(cmd, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) struct mgmt_cp_get_conn_info *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) struct mgmt_rp_get_conn_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) unsigned long conn_info_age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) rp.addr.type = cp->addr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) if (!bdaddr_type_is_valid(cp->addr.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) MGMT_STATUS_NOT_POWERED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) if (cp->addr.type == BDADDR_BREDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) if (!conn || conn->state != BT_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) MGMT_STATUS_NOT_CONNECTED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) MGMT_STATUS_BUSY, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) /* To avoid client trying to guess when to poll again for information we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) * calculate conn info age as random value between min/max set in hdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) conn_info_age = hdev->conn_info_min_age +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) prandom_u32_max(hdev->conn_info_max_age -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) hdev->conn_info_min_age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) /* Query controller to refresh cached values if they are too old or were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) * never read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) if (time_after(jiffies, conn->conn_info_timestamp +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) msecs_to_jiffies(conn_info_age)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) !conn->conn_info_timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) struct hci_cp_read_tx_power req_txp_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) struct hci_cp_read_rssi req_rssi_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) req_rssi_cp.handle = cpu_to_le16(conn->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) &req_rssi_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) /* For LE links TX power does not change thus we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) * query for it once value is known.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) if (!bdaddr_type_is_le(cp->addr.type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) conn->tx_power == HCI_TX_POWER_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) req_txp_cp.handle = cpu_to_le16(conn->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) req_txp_cp.type = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) hci_req_add(&req, HCI_OP_READ_TX_POWER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) sizeof(req_txp_cp), &req_txp_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) /* Max TX power needs to be read only once per connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) req_txp_cp.handle = cpu_to_le16(conn->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) req_txp_cp.type = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) hci_req_add(&req, HCI_OP_READ_TX_POWER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) sizeof(req_txp_cp), &req_txp_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) err = hci_req_run(&req, conn_info_refresh_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) hci_conn_hold(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) cmd->user_data = hci_conn_get(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) cmd->cmd_complete = conn_info_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) conn->conn_info_timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) /* Cache is valid, just reply with values cached in hci_conn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) rp.rssi = conn->rssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) rp.tx_power = conn->tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) rp.max_tx_power = conn->max_tx_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) struct hci_conn *conn = cmd->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) struct mgmt_rp_get_clock_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) hdev = hci_dev_get(cmd->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) if (hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) rp.local_clock = cpu_to_le32(hdev->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) hci_dev_put(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) if (conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) rp.piconet_clock = cpu_to_le32(conn->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) rp.accuracy = cpu_to_le16(conn->clock_accuracy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) if (conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) hci_conn_drop(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) hci_conn_put(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) struct hci_cp_read_clock *hci_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) bt_dev_dbg(hdev, "status %u", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) if (!hci_cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) if (hci_cp->which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) u16 handle = __le16_to_cpu(hci_cp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) conn = hci_conn_hash_lookup_handle(hdev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) struct mgmt_cp_get_clock_info *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) struct mgmt_rp_get_clock_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) struct hci_cp_read_clock hci_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) memset(&rp, 0, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) rp.addr.type = cp->addr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) if (cp->addr.type != BDADDR_BREDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) if (!hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) MGMT_STATUS_NOT_POWERED, &rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) &cp->addr.bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) if (!conn || conn->state != BT_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) MGMT_OP_GET_CLOCK_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) MGMT_STATUS_NOT_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) cmd->cmd_complete = clock_info_cmd_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) memset(&hci_cp, 0, sizeof(hci_cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) if (conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) hci_conn_hold(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) cmd->user_data = hci_conn_get(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) hci_cp.handle = cpu_to_le16(conn->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) hci_cp.which = 0x01; /* Piconet clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) err = hci_req_run(&req, get_clock_info_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) struct hci_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) if (conn->dst_type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) if (conn->state != BT_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) /* This function requires the caller holds hdev->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) u8 addr_type, u8 auto_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) params = hci_conn_params_add(hdev, addr, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) if (!params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) if (params->auto_connect == auto_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) list_del_init(¶ms->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) switch (auto_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) case HCI_AUTO_CONN_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) case HCI_AUTO_CONN_LINK_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) /* If auto connect is being disabled when we're trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) * connect to device, keep connecting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) if (params->explicit_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) list_add(¶ms->action, &hdev->pend_le_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) case HCI_AUTO_CONN_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) if (params->explicit_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) list_add(¶ms->action, &hdev->pend_le_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) list_add(¶ms->action, &hdev->pend_le_reports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) case HCI_AUTO_CONN_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) case HCI_AUTO_CONN_ALWAYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) if (!is_connected(hdev, addr, addr_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) list_add(¶ms->action, &hdev->pend_le_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) params->auto_connect = auto_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) addr, addr_type, auto_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) static void device_added(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) bdaddr_t *bdaddr, u8 type, u8 action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) struct mgmt_ev_device_added ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) ev.addr.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) ev.action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) static int add_device(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) struct mgmt_cp_add_device *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) u8 auto_conn, addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) u32 current_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) if (!bdaddr_type_is_valid(cp->addr.type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) /* Only incoming connections action is supported for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) if (cp->action != 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) cp->addr.type, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) hci_req_update_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) goto added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) addr_type = le_addr_type(cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) if (cp->action == 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) auto_conn = HCI_AUTO_CONN_ALWAYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) else if (cp->action == 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) auto_conn = HCI_AUTO_CONN_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) auto_conn = HCI_AUTO_CONN_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) /* Kernel internally uses conn_params with resolvable private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) * address, but Add Device allows only identity addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) * Make sure it is enforced before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) * hci_conn_params_lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) /* If the connection parameters don't exist for this device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) * they will be created and configured with defaults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) auto_conn) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) MGMT_STATUS_FAILED, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) if (params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) current_flags = params->current_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) hci_update_background_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) added:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) SUPPORTED_DEVICE_FLAGS(), current_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) MGMT_STATUS_SUCCESS, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) static void device_removed(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) bdaddr_t *bdaddr, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) struct mgmt_ev_device_removed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) ev.addr.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) static int remove_device(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) struct mgmt_cp_remove_device *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) struct hci_conn_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) u8 addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) if (!bdaddr_type_is_valid(cp->addr.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) if (cp->addr.type == BDADDR_BREDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) err = hci_bdaddr_list_del(&hdev->whitelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) hci_req_update_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) device_removed(sk, hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) addr_type = le_addr_type(cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) /* Kernel internally uses conn_params with resolvable private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) * address, but Remove Device allows only identity addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) * Make sure it is enforced before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) * hci_conn_params_lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) if (!params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) list_del(¶ms->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) list_del(¶ms->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) kfree(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) hci_update_background_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) struct hci_conn_params *p, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) struct bdaddr_list *b, *btmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) if (cp->addr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) MGMT_STATUS_INVALID_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) &cp->addr, sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) list_del(&b->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) kfree(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) hci_req_update_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) device_removed(sk, hdev, &p->addr, p->addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) if (p->explicit_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) list_del(&p->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) bt_dev_dbg(hdev, "All LE connection parameters were removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) hci_update_background_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) MGMT_STATUS_SUCCESS, &cp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) sizeof(cp->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) struct mgmt_cp_load_conn_param *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) sizeof(struct mgmt_conn_param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) u16 param_count, expected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) param_count = __le16_to_cpu(cp->param_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) if (param_count > max_param_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) param_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) expected_len = struct_size(cp, params, param_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) if (expected_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) expected_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) bt_dev_dbg(hdev, "param_count %u", param_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) hci_conn_params_clear_disabled(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) for (i = 0; i < param_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) struct mgmt_conn_param *param = &cp->params[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) struct hci_conn_params *hci_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) u16 min, max, latency, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) u8 addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) param->addr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) if (param->addr.type == BDADDR_LE_PUBLIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) addr_type = ADDR_LE_DEV_PUBLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) } else if (param->addr.type == BDADDR_LE_RANDOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) addr_type = ADDR_LE_DEV_RANDOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) bt_dev_err(hdev, "ignoring invalid connection parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) min = le16_to_cpu(param->min_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) max = le16_to_cpu(param->max_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) latency = le16_to_cpu(param->latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) timeout = le16_to_cpu(param->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) min, max, latency, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) if (hci_check_conn_params(min, max, latency, timeout) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) bt_dev_err(hdev, "ignoring invalid connection parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) if (!hci_param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) bt_dev_err(hdev, "failed to add connection parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) hci_param->conn_min_interval = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) hci_param->conn_max_interval = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) hci_param->conn_latency = latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) hci_param->supervision_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) static int set_external_config(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) struct mgmt_cp_set_external_config *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) if (hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) if (cp->config != 0x00 && cp->config != 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) if (cp->config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) if (!changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) err = new_options(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) mgmt_index_removed(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) hci_dev_set_flag(hdev, HCI_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) hci_dev_set_flag(hdev, HCI_AUTO_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) queue_work(hdev->req_workqueue, &hdev->power_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) set_bit(HCI_RAW, &hdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) mgmt_index_added(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) static int set_public_address(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) void *data, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) struct mgmt_cp_set_public_address *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) if (hdev_is_powered(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) if (!bacmp(&cp->bdaddr, BDADDR_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) if (!hdev->set_bdaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) bacpy(&hdev->public_addr, &cp->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) if (!changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) err = new_options(hdev, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) if (is_configured(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) mgmt_index_removed(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) hci_dev_set_flag(hdev, HCI_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) hci_dev_set_flag(hdev, HCI_AUTO_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) queue_work(hdev->req_workqueue, &hdev->power_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) u16 opcode, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) u8 *h192, *r192, *h256, *r256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) u16 eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) bt_dev_dbg(hdev, "status %u", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) mgmt_cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) status = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) h192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) r192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) h256 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) r256 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) struct hci_rp_read_local_oob_data *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) if (skb->len != sizeof(*rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) rp = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) eir_len = 5 + 18 + 18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) h192 = rp->hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) r192 = rp->rand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) h256 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) r256 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) struct hci_rp_read_local_oob_ext_data *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) if (skb->len != sizeof(*rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) rp = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) eir_len = 5 + 18 + 18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) h192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) r192 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) eir_len = 5 + 18 + 18 + 18 + 18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) h192 = rp->hash192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) r192 = rp->rand192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) h256 = rp->hash256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) r256 = rp->rand256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) if (!mgmt_rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) goto send_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) if (h192 && r192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) eir_len = eir_append_data(mgmt_rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) EIR_SSP_HASH_C192, h192, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) eir_len = eir_append_data(mgmt_rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) EIR_SSP_RAND_R192, r192, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) if (h256 && r256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) eir_len = eir_append_data(mgmt_rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) EIR_SSP_HASH_C256, h256, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) eir_len = eir_append_data(mgmt_rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) EIR_SSP_RAND_R256, r256, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) send_rsp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) mgmt_rp->type = mgmt_cp->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) mgmt_rp->eir_len = cpu_to_le16(eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) err = mgmt_cmd_complete(cmd->sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) mgmt_rp, sizeof(*mgmt_rp) + eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) if (err < 0 || status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) mgmt_rp, sizeof(*mgmt_rp) + eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) kfree(mgmt_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) struct mgmt_cp_read_local_oob_ext_data *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) cp, sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) if (bredr_sc_enabled(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) struct mgmt_cp_read_local_oob_ext_data *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) struct mgmt_rp_read_local_oob_ext_data *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) size_t rp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) u16 eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) u8 status, flags, role, addr[7], hash[16], rand[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) if (hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) case BIT(BDADDR_BREDR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) status = mgmt_bredr_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) eir_len = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) status = mgmt_le_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) eir_len = 9 + 3 + 18 + 18 + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) status = MGMT_STATUS_INVALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) status = MGMT_STATUS_NOT_POWERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) rp_len = sizeof(*rp) + eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) rp = kmalloc(rp_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) if (!rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) switch (cp->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) case BIT(BDADDR_BREDR):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) err = read_local_ssp_oob_req(hdev, sk, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) eir_len = eir_append_data(rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) EIR_CLASS_OF_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) hdev->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) smp_generate_oob(hdev, hash, rand) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) /* This should return the active RPA, but since the RPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) * is only programmed on demand, it is really hard to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) * this in at the moment. For now disallow retrieving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) * local out-of-band data when privacy is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) * Returning the identity address will not help here since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) * pairing happens before the identity resolving key is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) * known and thus the connection establishment happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) * based on the RPA and not the identity address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) status = MGMT_STATUS_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) bacmp(&hdev->static_addr, BDADDR_ANY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) memcpy(addr, &hdev->static_addr, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) addr[6] = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) memcpy(addr, &hdev->bdaddr, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) addr[6] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) addr, sizeof(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) role = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) role = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) &role, sizeof(role));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) eir_len = eir_append_data(rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) EIR_LE_SC_CONFIRM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) hash, sizeof(hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) eir_len = eir_append_data(rp->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) EIR_LE_SC_RANDOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) rand, sizeof(rand));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) flags = mgmt_get_adv_discov_flags(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) flags |= LE_AD_NO_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) &flags, sizeof(flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) status = MGMT_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) rp->type = cp->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) rp->eir_len = cpu_to_le16(eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) status, rp, sizeof(*rp) + eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) if (err < 0 || status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) rp, sizeof(*rp) + eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) HCI_MGMT_OOB_DATA_EVENTS, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) static u32 get_supported_adv_flags(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) flags |= MGMT_ADV_FLAG_CONNECTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) flags |= MGMT_ADV_FLAG_DISCOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) flags |= MGMT_ADV_FLAG_APPEARANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) flags |= MGMT_ADV_FLAG_LOCAL_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) /* In extended adv TX_POWER returned from Set Adv Param
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) * will be always valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) ext_adv_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) flags |= MGMT_ADV_FLAG_TX_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) flags |= MGMT_ADV_FLAG_SEC_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) if (hdev->le_features[1] & HCI_LE_PHY_2M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) flags |= MGMT_ADV_FLAG_SEC_2M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) if (hdev->le_features[1] & HCI_LE_PHY_CODED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) flags |= MGMT_ADV_FLAG_SEC_CODED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) struct mgmt_rp_read_adv_features *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) size_t rp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) struct adv_info *adv_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) u32 supported_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) u8 *instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) /* Enabling the experimental LL Privay support disables support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) * advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) rp = kmalloc(rp_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) if (!rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) supported_flags = get_supported_adv_flags(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) rp->supported_flags = cpu_to_le32(supported_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) rp->max_instances = hdev->le_num_of_adv_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) rp->num_instances = hdev->adv_instance_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) instance = rp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) *instance = adv_instance->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) instance++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) MGMT_STATUS_SUCCESS, rp, rp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) kfree(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) static u8 calculate_name_len(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) return append_local_name(hdev, buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) bool is_adv_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) u8 max_len = HCI_MAX_AD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) if (is_adv_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) MGMT_ADV_FLAG_LIMITED_DISCOV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) MGMT_ADV_FLAG_MANAGED_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) max_len -= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) max_len -= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) max_len -= calculate_name_len(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) max_len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) return max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) static bool flags_managed(u32 adv_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) return adv_flags & (MGMT_ADV_FLAG_DISCOV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) MGMT_ADV_FLAG_LIMITED_DISCOV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) MGMT_ADV_FLAG_MANAGED_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) static bool tx_power_managed(u32 adv_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) return adv_flags & MGMT_ADV_FLAG_TX_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) static bool name_managed(u32 adv_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) static bool appearance_managed(u32 adv_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) u8 len, bool is_adv_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) int i, cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) u8 max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) if (len > max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) /* Make sure that the data is correctly formatted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) cur_len = data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) if (!cur_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) if (data[i + 1] == EIR_FLAGS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) (!is_adv_data || flags_managed(adv_flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) if (data[i + 1] == EIR_APPEARANCE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) appearance_managed(adv_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) /* If the current field length would exceed the total data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) * length, then it's invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) if (i + cur_len >= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) static void add_advertising_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) struct mgmt_cp_add_advertising *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) struct mgmt_rp_add_advertising rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) struct adv_info *adv_instance, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) u8 instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) bt_dev_dbg(hdev, "status %d", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) if (!adv_instance->pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) adv_instance->pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) instance = adv_instance->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) if (hdev->cur_adv_instance == instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) cancel_adv_timeout(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) hci_remove_adv_instance(hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) rp.instance = cp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) mgmt_status(status), &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) static int add_advertising(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) struct mgmt_cp_add_advertising *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) struct mgmt_rp_add_advertising rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) u32 supported_flags, phy_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) u16 timeout, duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) u8 schedule_instance = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) struct adv_info *next_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) status = mgmt_le_support(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) /* Enabling the experimental LL Privay support disables support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) * advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) flags = __le32_to_cpu(cp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) timeout = __le16_to_cpu(cp->timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) duration = __le16_to_cpu(cp->duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) /* The current implementation only supports a subset of the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) * flags. Also need to check mutual exclusiveness of sec flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) supported_flags = get_supported_adv_flags(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) if (flags & ~supported_flags ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) if (timeout && !hdev_is_powered(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) pending_find(MGMT_OP_SET_LE, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) cp->scan_rsp_len, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) err = hci_add_adv_instance(hdev, cp->instance, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) cp->adv_data_len, cp->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) cp->scan_rsp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) cp->data + cp->adv_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) timeout, duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) MGMT_STATUS_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) /* Only trigger an advertising added event if a new instance was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) * actually added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) if (hdev->adv_instance_cnt > prev_instance_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) mgmt_advertising_added(sk, hdev, cp->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) if (hdev->cur_adv_instance == cp->instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) /* If the currently advertised instance is being changed then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) * cancel the current advertising and schedule the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) * instance. If there is only one instance then the overridden
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) * advertising data will be visible right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) cancel_adv_timeout(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) next_instance = hci_get_next_instance(hdev, cp->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) if (next_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) schedule_instance = next_instance->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) } else if (!hdev->adv_instance_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) /* Immediately advertise the new instance if no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) * instance is currently being advertised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) schedule_instance = cp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) /* If the HCI_ADVERTISING flag is set or the device isn't powered or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) * there is no instance to be advertised then we have no HCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) * communication to make. Simply return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) if (!hdev_is_powered(hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) !schedule_instance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) rp.instance = cp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) /* We're good to go, update advertising data, parameters, and start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) * advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) err = hci_req_run(&req, add_advertising_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) MGMT_STATUS_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) u16 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) struct mgmt_cp_remove_advertising *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) struct mgmt_rp_remove_advertising rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) bt_dev_dbg(hdev, "status %d", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) /* A failure status here only means that we failed to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) * advertising. Otherwise, the advertising instance has been removed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) * so report success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612) cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) rp.instance = cp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) struct mgmt_cp_remove_advertising *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) struct mgmt_rp_remove_advertising rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) /* Enabling the experimental LL Privay support disables support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) * advertising.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) MGMT_STATUS_NOT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) err = mgmt_cmd_status(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) MGMT_OP_REMOVE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650) if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) pending_find(MGMT_OP_SET_LE, hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) MGMT_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) if (list_empty(&hdev->adv_instances)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) /* If we use extended advertising, instance is disabled and removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) if (ext_adv_capable(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) __hci_req_disable_ext_adv_instance(&req, cp->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) __hci_req_remove_ext_adv_instance(&req, cp->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) if (list_empty(&hdev->adv_instances))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) __hci_req_disable_advertising(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) /* If no HCI commands have been collected so far or the HCI_ADVERTISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) * flag is set or the device isn't powered then we have no HCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) * communication to make. Simply return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) if (skb_queue_empty(&req.cmd_q) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) !hdev_is_powered(hdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) hci_req_purge(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) rp.instance = cp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) err = mgmt_cmd_complete(sk, hdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) MGMT_OP_REMOVE_ADVERTISING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) err = hci_req_run(&req, remove_advertising_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) void *data, u16 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) struct mgmt_cp_get_adv_size_info *cp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) struct mgmt_rp_get_adv_size_info rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) u32 flags, supported_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) bt_dev_dbg(hdev, "sock %p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) if (!lmp_le_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) MGMT_STATUS_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) flags = __le32_to_cpu(cp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) /* The current implementation only supports a subset of the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) * flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732) supported_flags = get_supported_adv_flags(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) if (flags & ~supported_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) MGMT_STATUS_INVALID_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737) rp.instance = cp->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738) rp.flags = cp->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740) rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) static const struct hci_mgmt_handler mgmt_handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) { NULL }, /* 0x0000 (no command) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) { read_version, MGMT_READ_VERSION_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751) HCI_MGMT_NO_HDEV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) { read_commands, MGMT_READ_COMMANDS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) HCI_MGMT_NO_HDEV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) HCI_MGMT_NO_HDEV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) { read_controller_info, MGMT_READ_INFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761) { set_powered, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762) { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) { set_connectable, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) { set_fast_connectable, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) { set_bondable, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) { set_link_security, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767) { set_ssp, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) { set_hs, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) { set_le, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772) { add_uuid, MGMT_ADD_UUID_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) { remove_uuid, MGMT_REMOVE_UUID_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) { disconnect, MGMT_DISCONNECT_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) { get_connections, MGMT_GET_CONNECTIONS_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) { pair_device, MGMT_PAIR_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787) { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) { start_discovery, MGMT_START_DISCOVERY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) { confirm_name, MGMT_CONFIRM_NAME_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) { block_device, MGMT_BLOCK_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799) { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) { set_advertising, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) { set_bredr, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803) { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) { set_secure_conn, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) { set_debug_keys, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806) { set_privacy, MGMT_SET_PRIVACY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) { load_irks, MGMT_LOAD_IRKS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810) { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) { add_device, MGMT_ADD_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) { remove_device, MGMT_REMOVE_DEVICE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815) { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) HCI_MGMT_NO_HDEV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) HCI_MGMT_UNCONFIGURED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) HCI_MGMT_UNCONFIGURED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824) HCI_MGMT_UNCONFIGURED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) HCI_MGMT_NO_HDEV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835) { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839) { set_appearance, MGMT_SET_APPEARANCE_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) { set_wideband_speech, MGMT_SETTING_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845) { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) HCI_MGMT_UNTRUSTED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849) HCI_MGMT_HDEV_OPTIONAL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) HCI_MGMT_VAR_LEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) HCI_MGMT_HDEV_OPTIONAL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858) HCI_MGMT_UNTRUSTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861) { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862) { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) HCI_MGMT_VAR_LEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) void mgmt_index_added(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871) struct mgmt_ev_ext_index ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) switch (hdev->dev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877) case HCI_PRIMARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880) NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) ev.type = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) HCI_MGMT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) ev.type = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) case HCI_AMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) ev.type = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895) ev.bus = hdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) HCI_MGMT_EXT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) void mgmt_index_removed(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) struct mgmt_ev_ext_index ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) u8 status = MGMT_STATUS_INVALID_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) switch (hdev->dev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) case HCI_PRIMARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911) mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914) mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) ev.type = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) HCI_MGMT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920) ev.type = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923) case HCI_AMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) ev.type = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930) ev.bus = hdev->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) HCI_MGMT_EXT_INDEX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) /* This function requires the caller holds hdev->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) static void restart_le_actions(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939) struct hci_conn_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) list_for_each_entry(p, &hdev->le_conn_params, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942) /* Needed for AUTO_OFF case where might not "really"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) * have been powered off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) list_del_init(&p->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947) switch (p->auto_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) case HCI_AUTO_CONN_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) case HCI_AUTO_CONN_ALWAYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) list_add(&p->action, &hdev->pend_le_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952) case HCI_AUTO_CONN_REPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) list_add(&p->action, &hdev->pend_le_reports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961) void mgmt_power_on(struct hci_dev *hdev, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) struct cmd_lookup match = { NULL, hdev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965) bt_dev_dbg(hdev, "err %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) hci_dev_lock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) restart_le_actions(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971) hci_update_background_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974) mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976) new_settings(hdev, match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) hci_dev_unlock(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984) void __mgmt_power_off(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) struct cmd_lookup match = { NULL, hdev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987) u8 status, zero_cod[] = { 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989) mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991) /* If the power off is because of hdev unregistration let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992) * use the appropriate INVALID_INDEX status. Otherwise use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993) * NOT_POWERED. We cover both scenarios here since later in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994) * mgmt_index_removed() any hci_conn callbacks will have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995) * been triggered, potentially causing misleading DISCONNECTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) * status responses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998) if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999) status = MGMT_STATUS_INVALID_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001) status = MGMT_STATUS_NOT_POWERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003) mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) zero_cod, sizeof(zero_cod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008) HCI_MGMT_DEV_CLASS_EVENTS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) ext_info_changed(hdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) new_settings(hdev, match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023) cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027) if (err == -ERFKILL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028) status = MGMT_STATUS_RFKILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030) status = MGMT_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032) mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038) bool persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) struct mgmt_ev_new_link_key ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044) ev.store_hint = persistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) ev.key.addr.type = BDADDR_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047) ev.key.type = key->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049) ev.key.pin_len = key->pin_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051) mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) static u8 mgmt_ltk_type(struct smp_ltk *ltk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056) switch (ltk->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) case SMP_LTK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) case SMP_LTK_SLAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059) if (ltk->authenticated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060) return MGMT_LTK_AUTHENTICATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061) return MGMT_LTK_UNAUTHENTICATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) case SMP_LTK_P256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063) if (ltk->authenticated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064) return MGMT_LTK_P256_AUTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065) return MGMT_LTK_P256_UNAUTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) case SMP_LTK_P256_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067) return MGMT_LTK_P256_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070) return MGMT_LTK_UNAUTHENTICATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073) void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075) struct mgmt_ev_new_long_term_key ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) /* Devices using resolvable or non-resolvable random addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080) * without providing an identity resolving key don't require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081) * to store long term keys. Their addresses will change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082) * next time around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084) * Only when a remote device provides an identity address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) * make sure the long term key is stored. If the remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086) * identity is known, the long term keys are internally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087) * mapped to the identity address. So allow static random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) * and public addresses here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090) if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) (key->bdaddr.b[5] & 0xc0) != 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092) ev.store_hint = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) ev.store_hint = persistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096) bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098) ev.key.type = mgmt_ltk_type(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) ev.key.enc_size = key->enc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100) ev.key.ediv = key->ediv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) ev.key.rand = key->rand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103) if (key->type == SMP_LTK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104) ev.key.master = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106) /* Make sure we copy only the significant bytes based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107) * encryption key size, and set the rest of the value to zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109) memcpy(ev.key.val, key->val, key->enc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110) memset(ev.key.val + key->enc_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) sizeof(ev.key.val) - key->enc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113) mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118) struct mgmt_ev_new_irk ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) ev.store_hint = persistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124) bacpy(&ev.rpa, &irk->rpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125) bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) memcpy(ev.irk.val, irk->val, sizeof(irk->val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129) mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132) void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133) bool persistent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135) struct mgmt_ev_new_csrk ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139) /* Devices using resolvable or non-resolvable random addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140) * without providing an identity resolving key don't require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141) * to store signature resolving keys. Their addresses will change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) * the next time around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144) * Only when a remote device provides an identity address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145) * make sure the signature resolving key is stored. So allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146) * static random and public addresses here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149) (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) ev.store_hint = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152) ev.store_hint = persistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154) bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156) ev.key.type = csrk->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159) mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162) void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163) u8 bdaddr_type, u8 store_hint, u16 min_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) u16 max_interval, u16 latency, u16 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166) struct mgmt_ev_new_conn_param ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168) if (!hci_is_identity_address(bdaddr, bdaddr_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174) ev.store_hint = store_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) ev.min_interval = cpu_to_le16(min_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) ev.max_interval = cpu_to_le16(max_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177) ev.latency = cpu_to_le16(latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178) ev.timeout = cpu_to_le16(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184) u32 flags, u8 *name, u8 name_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) char buf[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) struct mgmt_ev_device_connected *ev = (void *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188) u16 eir_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190) bacpy(&ev->addr.bdaddr, &conn->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191) ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) ev->flags = __cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195) /* We must ensure that the EIR Data fields are ordered and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196) * unique. Keep it simple for now and avoid the problem by not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) * adding any BR/EDR data to the LE adv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) if (conn->le_adv_data_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200) memcpy(&ev->eir[eir_len],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201) conn->le_adv_data, conn->le_adv_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) eir_len = conn->le_adv_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204) if (name_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206) name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) eir_len = eir_append_data(ev->eir, eir_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) EIR_CLASS_OF_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211) conn->dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) ev->eir_len = cpu_to_le16(eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) sizeof(*ev) + eir_len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222) struct sock **sk = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) cmd->cmd_complete(cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) *sk = cmd->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) sock_hold(*sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232) static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) struct hci_dev *hdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) struct mgmt_cp_unpair_device *cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237) device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239) cmd->cmd_complete(cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) bool mgmt_powering_down(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246) struct mgmt_mode *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252) cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) if (!cp->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259) void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) u8 link_type, u8 addr_type, u8 reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) bool mgmt_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) struct mgmt_ev_device_disconnected ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) struct sock *sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266) /* The connection is still in hci_conn_hash so test for 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267) * instead of 0 to know if this is the last one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269) if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270) cancel_delayed_work(&hdev->power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271) queue_work(hdev->req_workqueue, &hdev->power_off.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274) if (!mgmt_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277) if (link_type != ACL_LINK && link_type != LE_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280) mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283) ev.addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284) ev.reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) /* Report disconnects due to suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287) if (hdev->suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288) ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295) mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) u8 link_type, u8 addr_type, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303) struct mgmt_cp_disconnect *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307) hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313) cp = cmd->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315) if (bacmp(bdaddr, &cp->addr.bdaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318) if (cp->addr.type != bdaddr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) u8 addr_type, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328) struct mgmt_ev_connect_failed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) /* The connection is still in hci_conn_hash so test for 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331) * instead of 0 to know if this is the last one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334) cancel_delayed_work(&hdev->power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335) queue_work(hdev->req_workqueue, &hdev->power_off.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339) ev.addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) ev.status = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345) void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347) struct mgmt_ev_pin_code_request ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350) ev.addr.type = BDADDR_BREDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351) ev.secure = secure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353) mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356) void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357) u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361) cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370) u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374) cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383) u8 link_type, u8 addr_type, u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) u8 confirm_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386) struct mgmt_ev_user_confirm_request ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388) bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) ev.addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392) ev.confirm_hint = confirm_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393) ev.value = cpu_to_le32(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399) int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) u8 link_type, u8 addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402) struct mgmt_ev_user_passkey_request ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404) bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407) ev.addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413) static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) u8 link_type, u8 addr_type, u8 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419) cmd = pending_find(opcode, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423) cmd->cmd_complete(cmd, mgmt_status(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429) int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430) u8 link_type, u8 addr_type, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) status, MGMT_OP_USER_CONFIRM_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437) u8 link_type, u8 addr_type, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439) return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441) MGMT_OP_USER_CONFIRM_NEG_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444) int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445) u8 link_type, u8 addr_type, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447) return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) status, MGMT_OP_USER_PASSKEY_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) u8 link_type, u8 addr_type, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454) return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456) MGMT_OP_USER_PASSKEY_NEG_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459) int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) u8 link_type, u8 addr_type, u32 passkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) u8 entered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) struct mgmt_ev_passkey_notify ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465) bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468) ev.addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469) ev.passkey = __cpu_to_le32(passkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) ev.entered = entered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472) return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477) struct mgmt_ev_auth_failed ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479) u8 status = mgmt_status(hci_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481) bacpy(&ev.addr.bdaddr, &conn->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482) ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483) ev.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485) cmd = find_pairing(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487) mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488) cmd ? cmd->sk : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490) if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491) cmd->cmd_complete(cmd, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) mgmt_pending_remove(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496) void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498) struct cmd_lookup match = { NULL, hdev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499) bool changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503) mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504) cmd_status_rsp, &mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508) if (test_bit(HCI_AUTH, &hdev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509) changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511) changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513) mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514) &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517) new_settings(hdev, match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523) static void clear_eir(struct hci_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525) struct hci_dev *hdev = req->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526) struct hci_cp_write_eir cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528) if (!lmp_ext_inq_capable(hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) memset(hdev->eir, 0, sizeof(hdev->eir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535) hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540) struct cmd_lookup match = { NULL, hdev };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541) struct hci_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542) bool changed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) u8 mgmt_err = mgmt_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547) if (enable && hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548) HCI_SSP_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549) hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550) new_settings(hdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553) mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554) &mgmt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559) changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562) if (!changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) changed = hci_dev_test_and_clear_flag(hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566) hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569) mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571) if (changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572) new_settings(hdev, match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577) hci_req_init(&req, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580) if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581) hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582) sizeof(enable), &enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583) __hci_req_update_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585) clear_eir(&req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588) hci_req_run(&req, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591) static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593) struct cmd_lookup *match = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595) if (match->sk == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) match->sk = cmd->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) sock_hold(match->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602) u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606) mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607) mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608) mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611) mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612) 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) ext_info_changed(hdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) if (match.sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617) sock_put(match.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620) void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) struct mgmt_cp_set_local_name ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623) struct mgmt_pending_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632) cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636) /* If this is a HCI command related to powering on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637) * HCI dev don't send any mgmt signals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) if (pending_find(MGMT_OP_SET_POWERED, hdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644) HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645) ext_info_changed(hdev, cmd ? cmd->sk : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652) for (i = 0; i < uuid_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653) if (!memcmp(uuid, uuids[i], 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662) u16 parsed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) while (parsed < eir_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665) u8 field_len = eir[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666) u8 uuid[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) if (field_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672) if (eir_len - parsed < field_len + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675) switch (eir[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) case EIR_UUID16_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677) case EIR_UUID16_SOME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) for (i = 0; i + 3 <= field_len; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679) memcpy(uuid, bluetooth_base_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) uuid[13] = eir[i + 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681) uuid[12] = eir[i + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) if (has_uuid(uuid, uuid_count, uuids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) case EIR_UUID32_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) case EIR_UUID32_SOME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688) for (i = 0; i + 5 <= field_len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689) memcpy(uuid, bluetooth_base_uuid, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) uuid[15] = eir[i + 5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691) uuid[14] = eir[i + 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692) uuid[13] = eir[i + 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) uuid[12] = eir[i + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694) if (has_uuid(uuid, uuid_count, uuids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698) case EIR_UUID128_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) case EIR_UUID128_SOME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700) for (i = 0; i + 17 <= field_len; i += 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701) memcpy(uuid, eir + i + 2, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702) if (has_uuid(uuid, uuid_count, uuids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708) parsed += field_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709) eir += field_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) static void restart_le_scan(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717) /* If controller is not scanning we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721) if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) hdev->discovery.scan_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723) hdev->discovery.scan_duration))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726) queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727) DISCOV_LE_RESTART_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730) static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731) u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) /* If a RSSI threshold has been specified, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734) * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736) * is set, let it through for further processing, as we might need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) * restart the scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739) * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) * the results are also dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742) if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) (rssi == HCI_RSSI_INVALID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744) (rssi < hdev->discovery.rssi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745) !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) if (hdev->discovery.uuid_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) /* If a list of UUIDs is provided in filter, results with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) * matching UUID should be dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752) if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753) hdev->discovery.uuids) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) !eir_has_uuids(scan_rsp, scan_rsp_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755) hdev->discovery.uuid_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) hdev->discovery.uuids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760) /* If duplicate filtering does not report RSSI changes, then restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761) * scanning to ensure updated result with updated RSSI values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) restart_le_scan(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766) /* Validate RSSI value against the RSSI threshold once more. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767) if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) rssi < hdev->discovery.rssi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777) u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779) char buf[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780) struct mgmt_ev_device_found *ev = (void *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781) size_t ev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783) /* Don't send events for a non-kernel initiated discovery. With
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) * LE one exception is if we have pend_le_reports > 0 in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785) * case we're doing passive scanning and want these events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787) if (!hci_discovery_active(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) if (link_type == ACL_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) if (link_type == LE_LINK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791) list_empty(&hdev->pend_le_reports) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) !hci_is_adv_monitoring(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) if (hdev->discovery.result_filtering) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798) /* We are using service discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) scan_rsp_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) if (hdev->discovery.limited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) /* Check for limited discoverable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806) if (dev_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807) if (!(dev_class[1] & 0x20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811) if (!flags || !(flags[0] & LE_AD_LIMITED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) /* Make sure that the buffer is big enough. The 5 extra bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817) * are for the potential CoD field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819) if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822) memset(buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) /* In case of device discovery with BR/EDR devices (pre 1.2), the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825) * RSSI value was reported as 0 when not available. This behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826) * is kept when using device discovery. This is required for full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827) * backwards compatibility with the API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829) * However when using service discovery, the value 127 will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830) * returned when the RSSI is not available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833) link_type == ACL_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834) rssi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) bacpy(&ev->addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837) ev->addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838) ev->rssi = rssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) ev->flags = cpu_to_le32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841) if (eir_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) /* Copy EIR or advertising data into event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843) memcpy(ev->eir, eir, eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845) if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8846) NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8847) eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8848) dev_class, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8850) if (scan_rsp_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8851) /* Append scan response data to event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8852) memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8854) ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8855) ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8857) mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8860) void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8861) u8 addr_type, s8 rssi, u8 *name, u8 name_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8863) struct mgmt_ev_device_found *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8864) char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8865) u16 eir_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8867) ev = (struct mgmt_ev_device_found *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8869) memset(buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8871) bacpy(&ev->addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8872) ev->addr.type = link_to_bdaddr(link_type, addr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8873) ev->rssi = rssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8875) eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8876) name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8878) ev->eir_len = cpu_to_le16(eir_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8880) mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8883) void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8885) struct mgmt_ev_discovering ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8887) bt_dev_dbg(hdev, "discovering %u", discovering);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8889) memset(&ev, 0, sizeof(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8890) ev.type = hdev->discovery.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8891) ev.discovering = discovering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8893) mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8896) void mgmt_suspending(struct hci_dev *hdev, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8898) struct mgmt_ev_controller_suspend ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8900) ev.suspend_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8901) mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8904) void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8905) u8 addr_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8907) struct mgmt_ev_controller_resume ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8909) ev.wake_reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8910) if (bdaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8911) bacpy(&ev.addr.bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8912) ev.addr.type = addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8913) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8914) memset(&ev.addr, 0, sizeof(ev.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8917) mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8920) static struct hci_mgmt_chan chan = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8921) .channel = HCI_CHANNEL_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8922) .handler_count = ARRAY_SIZE(mgmt_handlers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8923) .handlers = mgmt_handlers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8924) .hdev_init = mgmt_init_hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8925) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8927) int mgmt_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8929) return hci_mgmt_chan_register(&chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8932) void mgmt_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8934) hci_mgmt_chan_unregister(&chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8935) }