YOSHIFUJI Hideaki | 8e87d14 | 2007-02-09 23:24:33 +0900 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | BlueZ - Bluetooth protocol stack for Linux |
Ron Shaffer | 2d0a034 | 2010-05-28 11:53:46 -0400 | [diff] [blame] | 3 | Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
| 5 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
| 6 | |
| 7 | This program is free software; you can redistribute it and/or modify |
| 8 | it under the terms of the GNU General Public License version 2 as |
| 9 | published by the Free Software Foundation; |
| 10 | |
| 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 12 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 13 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. |
| 14 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY |
YOSHIFUJI Hideaki | 8e87d14 | 2007-02-09 23:24:33 +0900 | [diff] [blame] | 15 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES |
| 16 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 17 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 19 | |
YOSHIFUJI Hideaki | 8e87d14 | 2007-02-09 23:24:33 +0900 | [diff] [blame] | 20 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, |
| 21 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | SOFTWARE IS DISCLAIMED. |
| 23 | */ |
| 24 | |
| 25 | /* Bluetooth HCI connection handling. */ |
| 26 | |
Gustavo Padovan | 8c520a5 | 2012-05-23 04:04:22 -0300 | [diff] [blame] | 27 | #include <linux/export.h> |
Marcel Holtmann | 23b9ceb | 2014-12-20 17:13:41 +0100 | [diff] [blame] | 28 | #include <linux/debugfs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
| 30 | #include <net/bluetooth/bluetooth.h> |
| 31 | #include <net/bluetooth/hci_core.h> |
Johan Hedberg | 4bc58f5 | 2014-05-20 09:45:47 +0300 | [diff] [blame] | 32 | #include <net/bluetooth/l2cap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 34 | #include "hci_request.h" |
Marcel Holtmann | ac4b723 | 2013-10-10 14:54:16 -0700 | [diff] [blame] | 35 | #include "smp.h" |
Marcel Holtmann | 7024728 | 2013-10-10 14:54:15 -0700 | [diff] [blame] | 36 | #include "a2mp.h" |
| 37 | |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 38 | struct sco_param { |
| 39 | u16 pkt_type; |
| 40 | u16 max_latency; |
Johan Hedberg | c7da579 | 2014-09-24 22:41:46 +0300 | [diff] [blame] | 41 | u8 retrans_effort; |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 42 | }; |
| 43 | |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 44 | static const struct sco_param esco_param_cvsd[] = { |
Johan Hedberg | c7da579 | 2014-09-24 22:41:46 +0300 | [diff] [blame] | 45 | { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ |
| 46 | { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ |
| 47 | { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */ |
| 48 | { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */ |
| 49 | { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */ |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 50 | }; |
| 51 | |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 52 | static const struct sco_param sco_param_cvsd[] = { |
Johan Hedberg | c7da579 | 2014-09-24 22:41:46 +0300 | [diff] [blame] | 53 | { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */ |
| 54 | { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */ |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 55 | }; |
| 56 | |
Johan Hedberg | 565766b | 2014-09-25 09:48:01 +0300 | [diff] [blame] | 57 | static const struct sco_param esco_param_msbc[] = { |
Johan Hedberg | c7da579 | 2014-09-24 22:41:46 +0300 | [diff] [blame] | 58 | { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */ |
| 59 | { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 60 | }; |
| 61 | |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 62 | /* This function requires the caller holds hdev->lock */ |
| 63 | static void hci_connect_le_scan_cleanup(struct hci_conn *conn) |
| 64 | { |
| 65 | struct hci_conn_params *params; |
Johan Hedberg | b5c2b62 | 2015-10-21 18:03:09 +0300 | [diff] [blame] | 66 | struct hci_dev *hdev = conn->hdev; |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 67 | struct smp_irk *irk; |
| 68 | bdaddr_t *bdaddr; |
| 69 | u8 bdaddr_type; |
| 70 | |
| 71 | bdaddr = &conn->dst; |
| 72 | bdaddr_type = conn->dst_type; |
| 73 | |
| 74 | /* Check if we need to convert to identity address */ |
Johan Hedberg | b5c2b62 | 2015-10-21 18:03:09 +0300 | [diff] [blame] | 75 | irk = hci_get_irk(hdev, bdaddr, bdaddr_type); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 76 | if (irk) { |
| 77 | bdaddr = &irk->bdaddr; |
| 78 | bdaddr_type = irk->addr_type; |
| 79 | } |
| 80 | |
Johan Hedberg | 17bc08f | 2015-10-21 18:03:10 +0300 | [diff] [blame] | 81 | params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, |
| 82 | bdaddr_type); |
| 83 | if (!params || !params->explicit_connect) |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 84 | return; |
| 85 | |
| 86 | /* The connection attempt was doing scan for new RPA, and is |
| 87 | * in scan phase. If params are not associated with any other |
| 88 | * autoconnect action, remove them completely. If they are, just unmark |
| 89 | * them as waiting for connection, by clearing explicit_connect field. |
| 90 | */ |
Johan Hedberg | 9ad3e6f | 2015-10-16 10:07:53 +0300 | [diff] [blame] | 91 | params->explicit_connect = false; |
| 92 | |
| 93 | list_del_init(¶ms->action); |
| 94 | |
| 95 | switch (params->auto_connect) { |
| 96 | case HCI_AUTO_CONN_EXPLICIT: |
Johan Hedberg | b5c2b62 | 2015-10-21 18:03:09 +0300 | [diff] [blame] | 97 | hci_conn_params_del(hdev, bdaddr, bdaddr_type); |
Johan Hedberg | 9ad3e6f | 2015-10-16 10:07:53 +0300 | [diff] [blame] | 98 | /* return instead of break to avoid duplicate scan update */ |
| 99 | return; |
| 100 | case HCI_AUTO_CONN_DIRECT: |
| 101 | case HCI_AUTO_CONN_ALWAYS: |
Johan Hedberg | b5c2b62 | 2015-10-21 18:03:09 +0300 | [diff] [blame] | 102 | list_add(¶ms->action, &hdev->pend_le_conns); |
Johan Hedberg | 9ad3e6f | 2015-10-16 10:07:53 +0300 | [diff] [blame] | 103 | break; |
| 104 | case HCI_AUTO_CONN_REPORT: |
Johan Hedberg | b5c2b62 | 2015-10-21 18:03:09 +0300 | [diff] [blame] | 105 | list_add(¶ms->action, &hdev->pend_le_reports); |
Johan Hedberg | 9ad3e6f | 2015-10-16 10:07:53 +0300 | [diff] [blame] | 106 | break; |
| 107 | default: |
| 108 | break; |
Jakub Pawlowski | 168b8a2 | 2015-10-16 10:07:49 +0300 | [diff] [blame] | 109 | } |
Johan Hedberg | 9ad3e6f | 2015-10-16 10:07:53 +0300 | [diff] [blame] | 110 | |
Johan Hedberg | b5c2b62 | 2015-10-21 18:03:09 +0300 | [diff] [blame] | 111 | hci_update_background_scan(hdev); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 112 | } |
| 113 | |
Johan Hedberg | b958f9a | 2015-10-16 10:07:50 +0300 | [diff] [blame] | 114 | static void hci_conn_cleanup(struct hci_conn *conn) |
| 115 | { |
| 116 | struct hci_dev *hdev = conn->hdev; |
| 117 | |
| 118 | if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) |
| 119 | hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); |
| 120 | |
| 121 | hci_chan_list_flush(conn); |
| 122 | |
| 123 | hci_conn_hash_del(hdev, conn); |
| 124 | |
Sathish Narsimman | 1f8330e | 2020-04-03 21:43:58 +0200 | [diff] [blame] | 125 | if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { |
| 126 | switch (conn->setting & SCO_AIRMODE_MASK) { |
| 127 | case SCO_AIRMODE_CVSD: |
| 128 | case SCO_AIRMODE_TRANSP: |
| 129 | if (hdev->notify) |
| 130 | hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO); |
| 131 | break; |
| 132 | } |
| 133 | } else { |
| 134 | if (hdev->notify) |
| 135 | hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); |
| 136 | } |
Johan Hedberg | b958f9a | 2015-10-16 10:07:50 +0300 | [diff] [blame] | 137 | |
| 138 | hci_conn_del_sysfs(conn); |
| 139 | |
| 140 | debugfs_remove_recursive(conn->debugfs); |
| 141 | |
| 142 | hci_dev_put(hdev); |
| 143 | |
| 144 | hci_conn_put(conn); |
| 145 | } |
| 146 | |
Johan Hedberg | 8ce783d | 2015-10-21 15:21:31 +0300 | [diff] [blame] | 147 | static void le_scan_cleanup(struct work_struct *work) |
| 148 | { |
| 149 | struct hci_conn *conn = container_of(work, struct hci_conn, |
| 150 | le_scan_cleanup); |
| 151 | struct hci_dev *hdev = conn->hdev; |
| 152 | struct hci_conn *c = NULL; |
| 153 | |
| 154 | BT_DBG("%s hcon %p", hdev->name, conn); |
| 155 | |
| 156 | hci_dev_lock(hdev); |
| 157 | |
| 158 | /* Check that the hci_conn is still around */ |
| 159 | rcu_read_lock(); |
| 160 | list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) { |
| 161 | if (c == conn) |
| 162 | break; |
| 163 | } |
| 164 | rcu_read_unlock(); |
| 165 | |
| 166 | if (c == conn) { |
| 167 | hci_connect_le_scan_cleanup(conn); |
| 168 | hci_conn_cleanup(conn); |
| 169 | } |
| 170 | |
| 171 | hci_dev_unlock(hdev); |
| 172 | hci_dev_put(hdev); |
| 173 | hci_conn_put(conn); |
| 174 | } |
| 175 | |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 176 | static void hci_connect_le_scan_remove(struct hci_conn *conn) |
| 177 | { |
Johan Hedberg | 8ce783d | 2015-10-21 15:21:31 +0300 | [diff] [blame] | 178 | BT_DBG("%s hcon %p", conn->hdev->name, conn); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 179 | |
Johan Hedberg | 8ce783d | 2015-10-21 15:21:31 +0300 | [diff] [blame] | 180 | /* We can't call hci_conn_del/hci_conn_cleanup here since that |
| 181 | * could deadlock with another hci_conn_del() call that's holding |
| 182 | * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work). |
| 183 | * Instead, grab temporary extra references to the hci_dev and |
| 184 | * hci_conn and perform the necessary cleanup in a separate work |
| 185 | * callback. |
Johan Hedberg | b958f9a | 2015-10-16 10:07:50 +0300 | [diff] [blame] | 186 | */ |
Johan Hedberg | 8ce783d | 2015-10-21 15:21:31 +0300 | [diff] [blame] | 187 | |
| 188 | hci_dev_hold(conn->hdev); |
| 189 | hci_conn_get(conn); |
| 190 | |
Johan Hedberg | 0ebc181 | 2015-11-06 13:35:33 +0200 | [diff] [blame] | 191 | /* Even though we hold a reference to the hdev, many other |
| 192 | * things might get cleaned up meanwhile, including the hdev's |
| 193 | * own workqueue, so we can't use that for scheduling. |
| 194 | */ |
Johan Hedberg | 8ce783d | 2015-10-21 15:21:31 +0300 | [diff] [blame] | 195 | schedule_work(&conn->le_scan_cleanup); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 196 | } |
| 197 | |
Vinicius Costa Gomes | 1aef866 | 2012-07-27 19:32:55 -0300 | [diff] [blame] | 198 | static void hci_acl_create_connection(struct hci_conn *conn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { |
| 200 | struct hci_dev *hdev = conn->hdev; |
| 201 | struct inquiry_entry *ie; |
| 202 | struct hci_cp_create_conn cp; |
| 203 | |
Andrei Emeltchenko | 42d2d87 | 2012-02-17 11:40:57 +0200 | [diff] [blame] | 204 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
Sonny Sasaka | 89e6597 | 2020-12-09 13:35:14 -0800 | [diff] [blame] | 206 | /* Many controllers disallow HCI Create Connection while it is doing |
| 207 | * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create |
| 208 | * Connection. This may cause the MGMT discovering state to become false |
| 209 | * without user space's request but it is okay since the MGMT Discovery |
| 210 | * APIs do not promise that discovery should be done forever. Instead, |
| 211 | * the user space monitors the status of MGMT discovering and it may |
| 212 | * request for discovery again when this flag becomes false. |
| 213 | */ |
| 214 | if (test_bit(HCI_INQUIRY, &hdev->flags)) { |
| 215 | /* Put this connection to "pending" state so that it will be |
| 216 | * executed after the inquiry cancel command complete event. |
| 217 | */ |
| 218 | conn->state = BT_CONNECT2; |
| 219 | hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); |
| 220 | return; |
| 221 | } |
| 222 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | conn->state = BT_CONNECT; |
Johan Hedberg | a0c808b | 2012-01-16 09:49:58 +0200 | [diff] [blame] | 224 | conn->out = true; |
Johan Hedberg | 40bef30 | 2014-07-16 11:42:27 +0300 | [diff] [blame] | 225 | conn->role = HCI_ROLE_MASTER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | |
Marcel Holtmann | 4c67bc7 | 2006-10-15 17:30:56 +0200 | [diff] [blame] | 227 | conn->attempt++; |
| 228 | |
Marcel Holtmann | e4e8e37 | 2008-07-14 20:13:47 +0200 | [diff] [blame] | 229 | conn->link_policy = hdev->link_policy; |
| 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | memset(&cp, 0, sizeof(cp)); |
| 232 | bacpy(&cp.bdaddr, &conn->dst); |
| 233 | cp.pscan_rep_mode = 0x02; |
| 234 | |
Andrei Emeltchenko | 70f23020 | 2010-12-01 16:58:25 +0200 | [diff] [blame] | 235 | ie = hci_inquiry_cache_lookup(hdev, &conn->dst); |
| 236 | if (ie) { |
Marcel Holtmann | 41a9621 | 2008-07-14 20:13:48 +0200 | [diff] [blame] | 237 | if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { |
| 238 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; |
| 239 | cp.pscan_mode = ie->data.pscan_mode; |
| 240 | cp.clock_offset = ie->data.clock_offset | |
Joe Perches | dcf4adb | 2014-03-12 10:52:35 -0700 | [diff] [blame] | 241 | cpu_to_le16(0x8000); |
Marcel Holtmann | 41a9621 | 2008-07-14 20:13:48 +0200 | [diff] [blame] | 242 | } |
| 243 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | memcpy(conn->dev_class, ie->data.dev_class, 3); |
| 245 | } |
| 246 | |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 247 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 249 | cp.role_switch = 0x01; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | else |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 251 | cp.role_switch = 0x00; |
Marcel Holtmann | 4c67bc7 | 2006-10-15 17:30:56 +0200 | [diff] [blame] | 252 | |
Marcel Holtmann | a9de924 | 2007-10-20 13:33:56 +0200 | [diff] [blame] | 253 | hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | } |
| 255 | |
Johan Hedberg | e3b679d | 2014-08-18 20:33:32 +0300 | [diff] [blame] | 256 | int hci_disconnect(struct hci_conn *conn, __u8 reason) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 258 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
Johan Hedberg | 839035a | 2014-08-18 20:33:34 +0300 | [diff] [blame] | 260 | /* When we are master of an established connection and it enters |
| 261 | * the disconnect timeout, then go ahead and try to read the |
| 262 | * current clock offset. Processing of the result is done |
| 263 | * within the event handling and hci_clock_offset_evt function. |
| 264 | */ |
Johan Hedberg | 88d07fe | 2015-10-22 10:49:39 +0300 | [diff] [blame] | 265 | if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER && |
| 266 | (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) { |
Johan Hedberg | 839035a | 2014-08-18 20:33:34 +0300 | [diff] [blame] | 267 | struct hci_dev *hdev = conn->hdev; |
Fabian Frederick | 4f639ed | 2014-10-25 10:48:58 +0200 | [diff] [blame] | 268 | struct hci_cp_read_clock_offset clkoff_cp; |
Johan Hedberg | 839035a | 2014-08-18 20:33:34 +0300 | [diff] [blame] | 269 | |
Fabian Frederick | 4f639ed | 2014-10-25 10:48:58 +0200 | [diff] [blame] | 270 | clkoff_cp.handle = cpu_to_le16(conn->handle); |
| 271 | hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), |
| 272 | &clkoff_cp); |
Johan Hedberg | 839035a | 2014-08-18 20:33:34 +0300 | [diff] [blame] | 273 | } |
| 274 | |
Johan Hedberg | 88d07fe | 2015-10-22 10:49:39 +0300 | [diff] [blame] | 275 | return hci_abort_conn(conn, reason); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Vinicius Costa Gomes | 57f5d0d | 2012-07-27 19:32:54 -0300 | [diff] [blame] | 278 | static void hci_add_sco(struct hci_conn *conn, __u16 handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | { |
| 280 | struct hci_dev *hdev = conn->hdev; |
| 281 | struct hci_cp_add_sco cp; |
| 282 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 283 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
| 285 | conn->state = BT_CONNECT; |
Johan Hedberg | a0c808b | 2012-01-16 09:49:58 +0200 | [diff] [blame] | 286 | conn->out = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
Marcel Holtmann | efc7688 | 2009-02-06 09:13:37 +0100 | [diff] [blame] | 288 | conn->attempt++; |
| 289 | |
YOSHIFUJI Hideaki | aca3192 | 2007-03-25 20:12:50 -0700 | [diff] [blame] | 290 | cp.handle = cpu_to_le16(handle); |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 291 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | |
Marcel Holtmann | a9de924 | 2007-10-20 13:33:56 +0200 | [diff] [blame] | 293 | hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | } |
| 295 | |
Yu Liu | 8b1c324 | 2021-01-29 13:53:48 -0800 | [diff] [blame] | 296 | static bool find_next_esco_param(struct hci_conn *conn, |
| 297 | const struct sco_param *esco_param, int size) |
| 298 | { |
| 299 | for (; conn->attempt <= size; conn->attempt++) { |
| 300 | if (lmp_esco_2m_capable(conn->link) || |
| 301 | (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) |
| 302 | break; |
| 303 | BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", |
| 304 | conn, conn->attempt); |
| 305 | } |
| 306 | |
| 307 | return conn->attempt <= size; |
| 308 | } |
| 309 | |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 310 | bool hci_setup_sync(struct hci_conn *conn, __u16 handle) |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 311 | { |
| 312 | struct hci_dev *hdev = conn->hdev; |
| 313 | struct hci_cp_setup_sync_conn cp; |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 314 | const struct sco_param *param; |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 315 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 316 | BT_DBG("hcon %p", conn); |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 317 | |
| 318 | conn->state = BT_CONNECT; |
Johan Hedberg | a0c808b | 2012-01-16 09:49:58 +0200 | [diff] [blame] | 319 | conn->out = true; |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 320 | |
Marcel Holtmann | efc7688 | 2009-02-06 09:13:37 +0100 | [diff] [blame] | 321 | conn->attempt++; |
| 322 | |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 323 | cp.handle = cpu_to_le16(handle); |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 324 | |
Joe Perches | dcf4adb | 2014-03-12 10:52:35 -0700 | [diff] [blame] | 325 | cp.tx_bandwidth = cpu_to_le32(0x00001f40); |
| 326 | cp.rx_bandwidth = cpu_to_le32(0x00001f40); |
Frédéric Dalleau | 10c62dd | 2013-08-19 14:23:59 +0200 | [diff] [blame] | 327 | cp.voice_setting = cpu_to_le16(conn->setting); |
| 328 | |
| 329 | switch (conn->setting & SCO_AIRMODE_MASK) { |
| 330 | case SCO_AIRMODE_TRANSP: |
Yu Liu | 8b1c324 | 2021-01-29 13:53:48 -0800 | [diff] [blame] | 331 | if (!find_next_esco_param(conn, esco_param_msbc, |
| 332 | ARRAY_SIZE(esco_param_msbc))) |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 333 | return false; |
Johan Hedberg | 565766b | 2014-09-25 09:48:01 +0300 | [diff] [blame] | 334 | param = &esco_param_msbc[conn->attempt - 1]; |
Frédéric Dalleau | 10c62dd | 2013-08-19 14:23:59 +0200 | [diff] [blame] | 335 | break; |
| 336 | case SCO_AIRMODE_CVSD: |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 337 | if (lmp_esco_capable(conn->link)) { |
Yu Liu | 8b1c324 | 2021-01-29 13:53:48 -0800 | [diff] [blame] | 338 | if (!find_next_esco_param(conn, esco_param_cvsd, |
| 339 | ARRAY_SIZE(esco_param_cvsd))) |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 340 | return false; |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 341 | param = &esco_param_cvsd[conn->attempt - 1]; |
| 342 | } else { |
| 343 | if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) |
| 344 | return false; |
Bernhard Thaler | 48e68ff | 2014-09-23 11:01:07 +0200 | [diff] [blame] | 345 | param = &sco_param_cvsd[conn->attempt - 1]; |
| 346 | } |
Frédéric Dalleau | 10c62dd | 2013-08-19 14:23:59 +0200 | [diff] [blame] | 347 | break; |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 348 | default: |
| 349 | return false; |
Frédéric Dalleau | 10c62dd | 2013-08-19 14:23:59 +0200 | [diff] [blame] | 350 | } |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 351 | |
Johan Hedberg | c7da579 | 2014-09-24 22:41:46 +0300 | [diff] [blame] | 352 | cp.retrans_effort = param->retrans_effort; |
Frédéric Dalleau | 2dea632 | 2013-08-19 14:24:03 +0200 | [diff] [blame] | 353 | cp.pkt_type = __cpu_to_le16(param->pkt_type); |
| 354 | cp.max_latency = __cpu_to_le16(param->max_latency); |
| 355 | |
| 356 | if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) |
| 357 | return false; |
| 358 | |
| 359 | return true; |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 360 | } |
| 361 | |
Johan Hedberg | 7d6ca69 | 2014-07-02 17:37:31 +0300 | [diff] [blame] | 362 | u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, |
| 363 | u16 to_multiplier) |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 364 | { |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 365 | struct hci_dev *hdev = conn->hdev; |
Marcel Holtmann | f044eb0 | 2014-06-29 16:43:26 +0200 | [diff] [blame] | 366 | struct hci_conn_params *params; |
| 367 | struct hci_cp_le_conn_update cp; |
| 368 | |
| 369 | hci_dev_lock(hdev); |
| 370 | |
| 371 | params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); |
| 372 | if (params) { |
| 373 | params->conn_min_interval = min; |
| 374 | params->conn_max_interval = max; |
| 375 | params->conn_latency = latency; |
| 376 | params->supervision_timeout = to_multiplier; |
| 377 | } |
| 378 | |
| 379 | hci_dev_unlock(hdev); |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 380 | |
| 381 | memset(&cp, 0, sizeof(cp)); |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 382 | cp.handle = cpu_to_le16(conn->handle); |
| 383 | cp.conn_interval_min = cpu_to_le16(min); |
| 384 | cp.conn_interval_max = cpu_to_le16(max); |
| 385 | cp.conn_latency = cpu_to_le16(latency); |
| 386 | cp.supervision_timeout = cpu_to_le16(to_multiplier); |
Joe Perches | dcf4adb | 2014-03-12 10:52:35 -0700 | [diff] [blame] | 387 | cp.min_ce_len = cpu_to_le16(0x0000); |
| 388 | cp.max_ce_len = cpu_to_le16(0x0000); |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 389 | |
| 390 | hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); |
Johan Hedberg | 7d6ca69 | 2014-07-02 17:37:31 +0300 | [diff] [blame] | 391 | |
| 392 | if (params) |
| 393 | return 0x01; |
| 394 | |
| 395 | return 0x00; |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 396 | } |
Claudio Takahasi | 2ce603e | 2011-02-16 20:44:53 -0200 | [diff] [blame] | 397 | |
Marcel Holtmann | fe39c7b | 2014-02-27 16:00:28 -0800 | [diff] [blame] | 398 | void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, |
Johan Hedberg | 8b76ce3 | 2015-06-08 18:14:39 +0300 | [diff] [blame] | 399 | __u8 ltk[16], __u8 key_size) |
Vinicius Costa Gomes | a7a595f | 2011-06-09 18:50:47 -0300 | [diff] [blame] | 400 | { |
| 401 | struct hci_dev *hdev = conn->hdev; |
| 402 | struct hci_cp_le_start_enc cp; |
| 403 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 404 | BT_DBG("hcon %p", conn); |
Vinicius Costa Gomes | a7a595f | 2011-06-09 18:50:47 -0300 | [diff] [blame] | 405 | |
| 406 | memset(&cp, 0, sizeof(cp)); |
| 407 | |
| 408 | cp.handle = cpu_to_le16(conn->handle); |
Marcel Holtmann | fe39c7b | 2014-02-27 16:00:28 -0800 | [diff] [blame] | 409 | cp.rand = rand; |
Vinicius Costa Gomes | a7a595f | 2011-06-09 18:50:47 -0300 | [diff] [blame] | 410 | cp.ediv = ediv; |
Johan Hedberg | 8b76ce3 | 2015-06-08 18:14:39 +0300 | [diff] [blame] | 411 | memcpy(cp.ltk, ltk, key_size); |
Vinicius Costa Gomes | a7a595f | 2011-06-09 18:50:47 -0300 | [diff] [blame] | 412 | |
| 413 | hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); |
| 414 | } |
Vinicius Costa Gomes | a7a595f | 2011-06-09 18:50:47 -0300 | [diff] [blame] | 415 | |
Marcel Holtmann | e73439d | 2010-07-26 10:06:00 -0400 | [diff] [blame] | 416 | /* Device _must_ be locked */ |
| 417 | void hci_sco_setup(struct hci_conn *conn, __u8 status) |
| 418 | { |
| 419 | struct hci_conn *sco = conn->link; |
| 420 | |
Marcel Holtmann | e73439d | 2010-07-26 10:06:00 -0400 | [diff] [blame] | 421 | if (!sco) |
| 422 | return; |
| 423 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 424 | BT_DBG("hcon %p", conn); |
| 425 | |
Marcel Holtmann | e73439d | 2010-07-26 10:06:00 -0400 | [diff] [blame] | 426 | if (!status) { |
| 427 | if (lmp_esco_capable(conn->hdev)) |
| 428 | hci_setup_sync(sco, conn->handle); |
| 429 | else |
| 430 | hci_add_sco(sco, conn->handle); |
| 431 | } else { |
Johan Hedberg | 539c496 | 2015-02-18 14:53:57 +0200 | [diff] [blame] | 432 | hci_connect_cfm(sco, status); |
Marcel Holtmann | e73439d | 2010-07-26 10:06:00 -0400 | [diff] [blame] | 433 | hci_conn_del(sco); |
| 434 | } |
| 435 | } |
| 436 | |
Gustavo F. Padovan | 19c40e3 | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 437 | static void hci_conn_timeout(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | { |
Gustavo F. Padovan | 19c40e3 | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 439 | struct hci_conn *conn = container_of(work, struct hci_conn, |
Gustavo Padovan | 5974e4c | 2012-05-17 00:36:25 -0300 | [diff] [blame] | 440 | disc_work.work); |
Lukasz Rymanowski | 1d56dc4 | 2014-06-17 13:04:20 +0200 | [diff] [blame] | 441 | int refcnt = atomic_read(&conn->refcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 443 | BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | |
Lukasz Rymanowski | 1d56dc4 | 2014-06-17 13:04:20 +0200 | [diff] [blame] | 445 | WARN_ON(refcnt < 0); |
| 446 | |
| 447 | /* FIXME: It was observed that in pairing failed scenario, refcnt |
| 448 | * drops below 0. Probably this is because l2cap_conn_del calls |
| 449 | * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is |
| 450 | * dropped. After that loop hci_chan_del is called which also drops |
| 451 | * conn. For now make sure that ACL is alive if refcnt is higher then 0, |
| 452 | * otherwise drop it. |
| 453 | */ |
| 454 | if (refcnt > 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | return; |
| 456 | |
Johan Hedberg | 89e0ccc | 2015-10-22 10:49:38 +0300 | [diff] [blame] | 457 | /* LE connections in scanning state need special handling */ |
| 458 | if (conn->state == BT_CONNECT && conn->type == LE_LINK && |
| 459 | test_bit(HCI_CONN_SCANNING, &conn->flags)) { |
| 460 | hci_connect_le_scan_remove(conn); |
| 461 | return; |
Marcel Holtmann | 6ac5934 | 2006-09-26 09:43:48 +0200 | [diff] [blame] | 462 | } |
Johan Hedberg | 89e0ccc | 2015-10-22 10:49:38 +0300 | [diff] [blame] | 463 | |
| 464 | hci_abort_conn(conn, hci_proto_disconn_ind(conn)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } |
| 466 | |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 467 | /* Enter sniff mode */ |
Johan Hedberg | a74a84f | 2013-10-16 18:11:40 +0300 | [diff] [blame] | 468 | static void hci_conn_idle(struct work_struct *work) |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 469 | { |
Johan Hedberg | a74a84f | 2013-10-16 18:11:40 +0300 | [diff] [blame] | 470 | struct hci_conn *conn = container_of(work, struct hci_conn, |
| 471 | idle_work.work); |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 472 | struct hci_dev *hdev = conn->hdev; |
| 473 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 474 | BT_DBG("hcon %p mode %d", conn, conn->mode); |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 475 | |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 476 | if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) |
| 477 | return; |
| 478 | |
| 479 | if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) |
| 480 | return; |
| 481 | |
| 482 | if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { |
| 483 | struct hci_cp_sniff_subrate cp; |
| 484 | cp.handle = cpu_to_le16(conn->handle); |
Joe Perches | dcf4adb | 2014-03-12 10:52:35 -0700 | [diff] [blame] | 485 | cp.max_latency = cpu_to_le16(0); |
| 486 | cp.min_remote_timeout = cpu_to_le16(0); |
| 487 | cp.min_local_timeout = cpu_to_le16(0); |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 488 | hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); |
| 489 | } |
| 490 | |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 491 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 492 | struct hci_cp_sniff_mode cp; |
| 493 | cp.handle = cpu_to_le16(conn->handle); |
| 494 | cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); |
| 495 | cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); |
Joe Perches | dcf4adb | 2014-03-12 10:52:35 -0700 | [diff] [blame] | 496 | cp.attempt = cpu_to_le16(4); |
| 497 | cp.timeout = cpu_to_le16(1); |
Gustavo F. Padovan | 416dc94 | 2011-12-07 13:24:33 -0200 | [diff] [blame] | 498 | hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); |
| 499 | } |
| 500 | } |
| 501 | |
Johan Hedberg | 7bc18d9 | 2013-10-16 18:11:39 +0300 | [diff] [blame] | 502 | static void hci_conn_auto_accept(struct work_struct *work) |
Johan Hedberg | 9f61656 | 2011-04-28 11:28:54 -0700 | [diff] [blame] | 503 | { |
Johan Hedberg | 7bc18d9 | 2013-10-16 18:11:39 +0300 | [diff] [blame] | 504 | struct hci_conn *conn = container_of(work, struct hci_conn, |
| 505 | auto_accept_work.work); |
Johan Hedberg | 9f61656 | 2011-04-28 11:28:54 -0700 | [diff] [blame] | 506 | |
Johan Hedberg | 7bc18d9 | 2013-10-16 18:11:39 +0300 | [diff] [blame] | 507 | hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), |
Gustavo Padovan | 5974e4c | 2012-05-17 00:36:25 -0300 | [diff] [blame] | 508 | &conn->dst); |
Johan Hedberg | 9f61656 | 2011-04-28 11:28:54 -0700 | [diff] [blame] | 509 | } |
| 510 | |
Sathish Narsimman | c3bed4d | 2020-02-24 10:53:40 +0530 | [diff] [blame] | 511 | static void le_disable_advertising(struct hci_dev *hdev) |
| 512 | { |
| 513 | if (ext_adv_capable(hdev)) { |
| 514 | struct hci_cp_le_set_ext_adv_enable cp; |
| 515 | |
| 516 | cp.enable = 0x00; |
| 517 | cp.num_of_sets = 0x00; |
| 518 | |
| 519 | hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), |
| 520 | &cp); |
| 521 | } else { |
| 522 | u8 enable = 0x00; |
| 523 | hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), |
| 524 | &enable); |
| 525 | } |
| 526 | } |
| 527 | |
Johan Hedberg | 9489eca | 2014-02-28 17:45:46 +0200 | [diff] [blame] | 528 | static void le_conn_timeout(struct work_struct *work) |
| 529 | { |
| 530 | struct hci_conn *conn = container_of(work, struct hci_conn, |
| 531 | le_conn_timeout.work); |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 532 | struct hci_dev *hdev = conn->hdev; |
Johan Hedberg | 9489eca | 2014-02-28 17:45:46 +0200 | [diff] [blame] | 533 | |
| 534 | BT_DBG(""); |
| 535 | |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 536 | /* We could end up here due to having done directed advertising, |
| 537 | * so clean up the state if necessary. This should however only |
| 538 | * happen with broken hardware or if low duty cycle was used |
| 539 | * (which doesn't have a timeout of its own). |
| 540 | */ |
Johan Hedberg | 0b1db38c | 2014-10-28 22:23:27 +0100 | [diff] [blame] | 541 | if (conn->role == HCI_ROLE_SLAVE) { |
Sathish Narsimman | c3bed4d | 2020-02-24 10:53:40 +0530 | [diff] [blame] | 542 | /* Disable LE Advertising */ |
| 543 | le_disable_advertising(hdev); |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 544 | hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); |
| 545 | return; |
| 546 | } |
| 547 | |
Johan Hedberg | 89e0ccc | 2015-10-22 10:49:38 +0300 | [diff] [blame] | 548 | hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); |
Johan Hedberg | 9489eca | 2014-02-28 17:45:46 +0200 | [diff] [blame] | 549 | } |
| 550 | |
Johan Hedberg | a5c4e30 | 2014-07-16 11:56:07 +0300 | [diff] [blame] | 551 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, |
| 552 | u8 role) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | { |
| 554 | struct hci_conn *conn; |
| 555 | |
Andrei Emeltchenko | 6ed93dc | 2012-09-25 12:49:43 +0300 | [diff] [blame] | 556 | BT_DBG("%s dst %pMR", hdev->name, dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | |
Johan Hedberg | 27f70f3 | 2014-07-21 10:50:06 +0300 | [diff] [blame] | 558 | conn = kzalloc(sizeof(*conn), GFP_KERNEL); |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 559 | if (!conn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | |
| 562 | bacpy(&conn->dst, dst); |
Marcel Holtmann | 662e882 | 2013-10-13 05:23:59 -0700 | [diff] [blame] | 563 | bacpy(&conn->src, &hdev->bdaddr); |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 564 | conn->hdev = hdev; |
| 565 | conn->type = type; |
Johan Hedberg | a5c4e30 | 2014-07-16 11:56:07 +0300 | [diff] [blame] | 566 | conn->role = role; |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 567 | conn->mode = HCI_CM_ACTIVE; |
| 568 | conn->state = BT_OPEN; |
Andrei Emeltchenko | 93f19c9 | 2009-09-03 12:34:19 +0300 | [diff] [blame] | 569 | conn->auth_type = HCI_AT_GENERAL_BONDING; |
Johan Hedberg | 17fa4b9 | 2011-01-25 13:28:33 +0200 | [diff] [blame] | 570 | conn->io_capability = hdev->io_capability; |
Johan Hedberg | a958355 | 2011-02-19 12:06:01 -0300 | [diff] [blame] | 571 | conn->remote_auth = 0xff; |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 572 | conn->key_type = 0xff; |
Johan Hedberg | ebf86aa | 2014-12-05 13:36:08 +0200 | [diff] [blame] | 573 | conn->rssi = HCI_RSSI_INVALID; |
Andrzej Kaczmarek | 5a134fa | 2014-05-09 21:35:28 +0200 | [diff] [blame] | 574 | conn->tx_power = HCI_TX_POWER_INVALID; |
Andrzej Kaczmarek | d0455ed | 2014-05-14 13:43:05 +0200 | [diff] [blame] | 575 | conn->max_tx_power = HCI_TX_POWER_INVALID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | |
Johan Hedberg | 58a681e | 2012-01-16 06:47:28 +0200 | [diff] [blame] | 577 | set_bit(HCI_CONN_POWER_SAVE, &conn->flags); |
Marcel Holtmann | 052b30b | 2009-04-26 20:01:22 +0200 | [diff] [blame] | 578 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 579 | |
Spoorthi Ravishankar Koppad | 302975c | 2019-06-21 14:51:56 +0530 | [diff] [blame] | 580 | /* Set Default Authenticated payload timeout to 30s */ |
| 581 | conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; |
| 582 | |
Johan Hedberg | a5c4e30 | 2014-07-16 11:56:07 +0300 | [diff] [blame] | 583 | if (conn->role == HCI_ROLE_MASTER) |
| 584 | conn->out = true; |
| 585 | |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 586 | switch (type) { |
| 587 | case ACL_LINK: |
| 588 | conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; |
| 589 | break; |
Johan Hedberg | 9c84d1d | 2014-03-24 20:21:50 +0200 | [diff] [blame] | 590 | case LE_LINK: |
| 591 | /* conn->src should reflect the local identity address */ |
| 592 | hci_copy_identity_address(hdev, &conn->src, &conn->src_type); |
| 593 | break; |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 594 | case SCO_LINK: |
| 595 | if (lmp_esco_capable(hdev)) |
Marcel Holtmann | efc7688 | 2009-02-06 09:13:37 +0100 | [diff] [blame] | 596 | conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | |
| 597 | (hdev->esco_type & EDR_ESCO_MASK); |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 598 | else |
| 599 | conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; |
| 600 | break; |
| 601 | case ESCO_LINK: |
Marcel Holtmann | efc7688 | 2009-02-06 09:13:37 +0100 | [diff] [blame] | 602 | conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; |
Marcel Holtmann | a874641 | 2008-07-14 20:13:46 +0200 | [diff] [blame] | 603 | break; |
| 604 | } |
| 605 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | skb_queue_head_init(&conn->data_q); |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 607 | |
Marcel Holtmann | 70c1f20 | 2012-02-22 12:06:43 +0100 | [diff] [blame] | 608 | INIT_LIST_HEAD(&conn->chan_list); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 609 | |
Gustavo F. Padovan | 19c40e3 | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 610 | INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); |
Johan Hedberg | 7bc18d9 | 2013-10-16 18:11:39 +0300 | [diff] [blame] | 611 | INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); |
Johan Hedberg | a74a84f | 2013-10-16 18:11:40 +0300 | [diff] [blame] | 612 | INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); |
Johan Hedberg | 9489eca | 2014-02-28 17:45:46 +0200 | [diff] [blame] | 613 | INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); |
Johan Hedberg | 8ce783d | 2015-10-21 15:21:31 +0300 | [diff] [blame] | 614 | INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
| 616 | atomic_set(&conn->refcnt, 0); |
| 617 | |
| 618 | hci_dev_hold(hdev); |
| 619 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | hci_conn_hash_add(hdev, conn); |
Sathish Narsimman | 1f8330e | 2020-04-03 21:43:58 +0200 | [diff] [blame] | 621 | |
| 622 | /* The SCO and eSCO connections will only be notified when their |
| 623 | * setup has been completed. This is different to ACL links which |
| 624 | * can be notified right away. |
| 625 | */ |
| 626 | if (conn->type != SCO_LINK && conn->type != ESCO_LINK) { |
| 627 | if (hdev->notify) |
| 628 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
| 629 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | |
Marcel Holtmann | a67e899 | 2009-05-02 18:24:06 -0700 | [diff] [blame] | 631 | hci_conn_init_sysfs(conn); |
| 632 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | return conn; |
| 634 | } |
| 635 | |
| 636 | int hci_conn_del(struct hci_conn *conn) |
| 637 | { |
| 638 | struct hci_dev *hdev = conn->hdev; |
| 639 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 640 | BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | |
Gustavo F. Padovan | 19c40e3 | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 642 | cancel_delayed_work_sync(&conn->disc_work); |
Johan Hedberg | 7bc18d9 | 2013-10-16 18:11:39 +0300 | [diff] [blame] | 643 | cancel_delayed_work_sync(&conn->auto_accept_work); |
Johan Hedberg | a74a84f | 2013-10-16 18:11:40 +0300 | [diff] [blame] | 644 | cancel_delayed_work_sync(&conn->idle_work); |
Johan Hedberg | 9f61656 | 2011-04-28 11:28:54 -0700 | [diff] [blame] | 645 | |
Marcel Holtmann | 5b7f990 | 2007-07-11 09:51:55 +0200 | [diff] [blame] | 646 | if (conn->type == ACL_LINK) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | struct hci_conn *sco = conn->link; |
| 648 | if (sco) |
| 649 | sco->link = NULL; |
| 650 | |
| 651 | /* Unacked frames */ |
| 652 | hdev->acl_cnt += conn->sent; |
Ville Tervo | 6ed58ec | 2011-02-10 22:38:48 -0300 | [diff] [blame] | 653 | } else if (conn->type == LE_LINK) { |
Johan Hedberg | 980ffc0 | 2014-10-28 22:23:26 +0100 | [diff] [blame] | 654 | cancel_delayed_work(&conn->le_conn_timeout); |
Johan Hedberg | 9489eca | 2014-02-28 17:45:46 +0200 | [diff] [blame] | 655 | |
Ville Tervo | 6ed58ec | 2011-02-10 22:38:48 -0300 | [diff] [blame] | 656 | if (hdev->le_pkts) |
| 657 | hdev->le_cnt += conn->sent; |
| 658 | else |
| 659 | hdev->acl_cnt += conn->sent; |
Marcel Holtmann | 5b7f990 | 2007-07-11 09:51:55 +0200 | [diff] [blame] | 660 | } else { |
| 661 | struct hci_conn *acl = conn->link; |
| 662 | if (acl) { |
| 663 | acl->link = NULL; |
David Herrmann | 76a68ba | 2013-04-06 20:28:37 +0200 | [diff] [blame] | 664 | hci_conn_drop(acl); |
Marcel Holtmann | 5b7f990 | 2007-07-11 09:51:55 +0200 | [diff] [blame] | 665 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | } |
| 667 | |
Andrei Emeltchenko | 9740e49 | 2012-05-29 13:59:02 +0300 | [diff] [blame] | 668 | if (conn->amp_mgr) |
| 669 | amp_mgr_put(conn->amp_mgr); |
| 670 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | skb_queue_purge(&conn->data_q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | |
Johan Hedberg | b958f9a | 2015-10-16 10:07:50 +0300 | [diff] [blame] | 673 | /* Remove the connection from the list and cleanup its remaining |
| 674 | * state. This is a separate function since for some cases like |
| 675 | * BT_CONNECT_SCAN we *only* want the cleanup part without the |
| 676 | * rest of hci_conn_del. |
| 677 | */ |
| 678 | hci_conn_cleanup(conn); |
Tomas Targownik | 163f4da | 2011-06-30 16:30:44 -0300 | [diff] [blame] | 679 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | return 0; |
| 681 | } |
| 682 | |
Johan Hedberg | 39385cb | 2016-11-12 17:03:07 +0200 | [diff] [blame] | 683 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | { |
| 685 | int use_src = bacmp(src, BDADDR_ANY); |
Luiz Augusto von Dentz | 8035ded | 2011-11-01 10:58:56 +0200 | [diff] [blame] | 686 | struct hci_dev *hdev = NULL, *d; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | |
Andrei Emeltchenko | 6ed93dc | 2012-09-25 12:49:43 +0300 | [diff] [blame] | 688 | BT_DBG("%pMR -> %pMR", src, dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | |
Gustavo F. Padovan | f20d09d | 2011-12-22 16:30:27 -0200 | [diff] [blame] | 690 | read_lock(&hci_dev_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | |
Luiz Augusto von Dentz | 8035ded | 2011-11-01 10:58:56 +0200 | [diff] [blame] | 692 | list_for_each_entry(d, &hci_dev_list, list) { |
Gustavo Padovan | 8fc9ced | 2012-05-23 04:04:21 -0300 | [diff] [blame] | 693 | if (!test_bit(HCI_UP, &d->flags) || |
Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 694 | hci_dev_test_flag(d, HCI_USER_CHANNEL) || |
Marcel Holtmann | ca8bee5 | 2016-07-05 14:30:14 +0200 | [diff] [blame] | 695 | d->dev_type != HCI_PRIMARY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | continue; |
| 697 | |
YOSHIFUJI Hideaki | 8e87d14 | 2007-02-09 23:24:33 +0900 | [diff] [blame] | 698 | /* Simple routing: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | * No source address - find interface with bdaddr != dst |
| 700 | * Source address - find interface with bdaddr == src |
| 701 | */ |
| 702 | |
| 703 | if (use_src) { |
Johan Hedberg | 39385cb | 2016-11-12 17:03:07 +0200 | [diff] [blame] | 704 | bdaddr_t id_addr; |
| 705 | u8 id_addr_type; |
| 706 | |
| 707 | if (src_type == BDADDR_BREDR) { |
| 708 | if (!lmp_bredr_capable(d)) |
| 709 | continue; |
| 710 | bacpy(&id_addr, &d->bdaddr); |
| 711 | id_addr_type = BDADDR_BREDR; |
| 712 | } else { |
| 713 | if (!lmp_le_capable(d)) |
| 714 | continue; |
| 715 | |
| 716 | hci_copy_identity_address(d, &id_addr, |
| 717 | &id_addr_type); |
| 718 | |
| 719 | /* Convert from HCI to three-value type */ |
| 720 | if (id_addr_type == ADDR_LE_DEV_PUBLIC) |
| 721 | id_addr_type = BDADDR_LE_PUBLIC; |
| 722 | else |
| 723 | id_addr_type = BDADDR_LE_RANDOM; |
| 724 | } |
| 725 | |
| 726 | if (!bacmp(&id_addr, src) && id_addr_type == src_type) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | hdev = d; break; |
| 728 | } |
| 729 | } else { |
| 730 | if (bacmp(&d->bdaddr, dst)) { |
| 731 | hdev = d; break; |
| 732 | } |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | if (hdev) |
| 737 | hdev = hci_dev_hold(hdev); |
| 738 | |
Gustavo F. Padovan | f20d09d | 2011-12-22 16:30:27 -0200 | [diff] [blame] | 739 | read_unlock(&hci_dev_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | return hdev; |
| 741 | } |
| 742 | EXPORT_SYMBOL(hci_get_route); |
| 743 | |
Andre Guedes | 9bb3c01 | 2014-01-30 18:22:08 -0300 | [diff] [blame] | 744 | /* This function requires the caller holds hdev->lock */ |
Andre Guedes | 06c053f | 2014-02-26 20:21:41 -0300 | [diff] [blame] | 745 | void hci_le_conn_failed(struct hci_conn *conn, u8 status) |
Andre Guedes | 9bb3c01 | 2014-01-30 18:22:08 -0300 | [diff] [blame] | 746 | { |
| 747 | struct hci_dev *hdev = conn->hdev; |
Johan Hedberg | f161dd4 | 2014-08-15 21:06:54 +0300 | [diff] [blame] | 748 | struct hci_conn_params *params; |
| 749 | |
| 750 | params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, |
| 751 | conn->dst_type); |
| 752 | if (params && params->conn) { |
| 753 | hci_conn_drop(params->conn); |
Johan Hedberg | f8aaf9b | 2014-08-17 23:28:57 +0300 | [diff] [blame] | 754 | hci_conn_put(params->conn); |
Johan Hedberg | f161dd4 | 2014-08-15 21:06:54 +0300 | [diff] [blame] | 755 | params->conn = NULL; |
| 756 | } |
Andre Guedes | 9bb3c01 | 2014-01-30 18:22:08 -0300 | [diff] [blame] | 757 | |
| 758 | conn->state = BT_CLOSED; |
| 759 | |
Johan Hedberg | acb9f91 | 2015-12-03 12:45:20 +0200 | [diff] [blame] | 760 | /* If the status indicates successful cancellation of |
| 761 | * the attempt (i.e. Unkown Connection Id) there's no point of |
| 762 | * notifying failure since we'll go back to keep trying to |
| 763 | * connect. The only exception is explicit connect requests |
| 764 | * where a timeout + cancel does indicate an actual failure. |
| 765 | */ |
| 766 | if (status != HCI_ERROR_UNKNOWN_CONN_ID || |
| 767 | (params && params->explicit_connect)) |
| 768 | mgmt_connect_failed(hdev, &conn->dst, conn->type, |
| 769 | conn->dst_type, status); |
Andre Guedes | 9bb3c01 | 2014-01-30 18:22:08 -0300 | [diff] [blame] | 770 | |
Johan Hedberg | 539c496 | 2015-02-18 14:53:57 +0200 | [diff] [blame] | 771 | hci_connect_cfm(conn, status); |
Andre Guedes | 9bb3c01 | 2014-01-30 18:22:08 -0300 | [diff] [blame] | 772 | |
| 773 | hci_conn_del(conn); |
Andre Guedes | a4790db | 2014-02-26 20:21:47 -0300 | [diff] [blame] | 774 | |
| 775 | /* Since we may have temporarily stopped the background scanning in |
| 776 | * favor of connection establishment, we should restart it. |
| 777 | */ |
| 778 | hci_update_background_scan(hdev); |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 779 | |
| 780 | /* Re-enable advertising in case this was a failed connection |
| 781 | * attempt as a peripheral. |
| 782 | */ |
Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 783 | hci_req_reenable_advertising(hdev); |
Andre Guedes | 9bb3c01 | 2014-01-30 18:22:08 -0300 | [diff] [blame] | 784 | } |
| 785 | |
Marcel Holtmann | 1904a85 | 2015-01-11 13:50:44 -0800 | [diff] [blame] | 786 | static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) |
Andre Guedes | 1d399ae | 2013-10-08 08:21:17 -0300 | [diff] [blame] | 787 | { |
| 788 | struct hci_conn *conn; |
| 789 | |
Jakub Pawlowski | 28a667c | 2015-08-07 20:22:54 +0200 | [diff] [blame] | 790 | hci_dev_lock(hdev); |
| 791 | |
| 792 | conn = hci_lookup_le_connect(hdev); |
| 793 | |
Daniel Winkler | 2943d8e | 2020-11-06 15:20:19 -0800 | [diff] [blame] | 794 | if (hdev->adv_instance_cnt) |
| 795 | hci_req_resume_adv_instances(hdev); |
| 796 | |
Jakub Pawlowski | 28a667c | 2015-08-07 20:22:54 +0200 | [diff] [blame] | 797 | if (!status) { |
| 798 | hci_connect_le_scan_cleanup(conn); |
| 799 | goto done; |
| 800 | } |
Andre Guedes | 1d399ae | 2013-10-08 08:21:17 -0300 | [diff] [blame] | 801 | |
Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 802 | bt_dev_err(hdev, "request failed to create LE connection: " |
| 803 | "status 0x%2.2x", status); |
Andre Guedes | 1d399ae | 2013-10-08 08:21:17 -0300 | [diff] [blame] | 804 | |
Andre Guedes | 1d399ae | 2013-10-08 08:21:17 -0300 | [diff] [blame] | 805 | if (!conn) |
| 806 | goto done; |
| 807 | |
Andre Guedes | 06c053f | 2014-02-26 20:21:41 -0300 | [diff] [blame] | 808 | hci_le_conn_failed(conn, status); |
Andre Guedes | 1d399ae | 2013-10-08 08:21:17 -0300 | [diff] [blame] | 809 | |
| 810 | done: |
| 811 | hci_dev_unlock(hdev); |
| 812 | } |
| 813 | |
Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 814 | static bool conn_use_rpa(struct hci_conn *conn) |
| 815 | { |
| 816 | struct hci_dev *hdev = conn->hdev; |
| 817 | |
| 818 | return hci_dev_test_flag(hdev, HCI_PRIVACY); |
| 819 | } |
| 820 | |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 821 | static void set_ext_conn_params(struct hci_conn *conn, |
| 822 | struct hci_cp_le_ext_conn_param *p) |
| 823 | { |
| 824 | struct hci_dev *hdev = conn->hdev; |
| 825 | |
| 826 | memset(p, 0, sizeof(*p)); |
| 827 | |
Alain Michaud | 10873f9 | 2020-06-11 02:01:56 +0000 | [diff] [blame] | 828 | p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); |
| 829 | p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 830 | p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); |
| 831 | p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); |
| 832 | p->conn_latency = cpu_to_le16(conn->le_conn_latency); |
| 833 | p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); |
| 834 | p->min_ce_len = cpu_to_le16(0x0000); |
| 835 | p->max_ce_len = cpu_to_le16(0x0000); |
| 836 | } |
| 837 | |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 838 | static void hci_req_add_le_create_conn(struct hci_request *req, |
Szymon Janc | 082f230 | 2018-04-03 13:40:06 +0200 | [diff] [blame] | 839 | struct hci_conn *conn, |
| 840 | bdaddr_t *direct_rpa) |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 841 | { |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 842 | struct hci_dev *hdev = conn->hdev; |
| 843 | u8 own_addr_type; |
| 844 | |
Szymon Janc | 082f230 | 2018-04-03 13:40:06 +0200 | [diff] [blame] | 845 | /* If direct address was provided we use it instead of current |
| 846 | * address. |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 847 | */ |
Szymon Janc | 082f230 | 2018-04-03 13:40:06 +0200 | [diff] [blame] | 848 | if (direct_rpa) { |
| 849 | if (bacmp(&req->hdev->random_addr, direct_rpa)) |
| 850 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, |
| 851 | direct_rpa); |
| 852 | |
| 853 | /* direct address is always RPA */ |
| 854 | own_addr_type = ADDR_LE_DEV_RANDOM; |
| 855 | } else { |
| 856 | /* Update random address, but set require_privacy to false so |
| 857 | * that we never connect with an non-resolvable address. |
| 858 | */ |
| 859 | if (hci_update_random_address(req, false, conn_use_rpa(conn), |
| 860 | &own_addr_type)) |
| 861 | return; |
| 862 | } |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 863 | |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 864 | if (use_ext_conn(hdev)) { |
| 865 | struct hci_cp_le_ext_create_conn *cp; |
| 866 | struct hci_cp_le_ext_conn_param *p; |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 867 | u8 data[sizeof(*cp) + sizeof(*p) * 3]; |
| 868 | u32 plen; |
Johan Hedberg | eec7a01 | 2016-03-09 17:30:32 +0200 | [diff] [blame] | 869 | |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 870 | cp = (void *) data; |
| 871 | p = (void *) cp->data; |
Johan Hedberg | 2f99536 | 2015-12-03 12:45:19 +0200 | [diff] [blame] | 872 | |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 873 | memset(cp, 0, sizeof(*cp)); |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 874 | |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 875 | bacpy(&cp->peer_addr, &conn->dst); |
| 876 | cp->peer_addr_type = conn->dst_type; |
| 877 | cp->own_addr_type = own_addr_type; |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 878 | |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 879 | plen = sizeof(*cp); |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 880 | |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 881 | if (scan_1m(hdev)) { |
| 882 | cp->phys |= LE_SCAN_PHY_1M; |
| 883 | set_ext_conn_params(conn, p); |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 884 | |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 885 | p++; |
| 886 | plen += sizeof(*p); |
| 887 | } |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 888 | |
Jaganath Kanakkassery | 4e6e99e | 2018-07-19 17:09:39 +0530 | [diff] [blame] | 889 | if (scan_2m(hdev)) { |
| 890 | cp->phys |= LE_SCAN_PHY_2M; |
| 891 | set_ext_conn_params(conn, p); |
| 892 | |
| 893 | p++; |
| 894 | plen += sizeof(*p); |
| 895 | } |
| 896 | |
| 897 | if (scan_coded(hdev)) { |
| 898 | cp->phys |= LE_SCAN_PHY_CODED; |
| 899 | set_ext_conn_params(conn, p); |
| 900 | |
| 901 | plen += sizeof(*p); |
| 902 | } |
| 903 | |
| 904 | hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data); |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 905 | |
| 906 | } else { |
| 907 | struct hci_cp_le_create_conn cp; |
| 908 | |
| 909 | memset(&cp, 0, sizeof(cp)); |
| 910 | |
Alain Michaud | 10873f9 | 2020-06-11 02:01:56 +0000 | [diff] [blame] | 911 | cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); |
| 912 | cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); |
Jaganath Kanakkassery | 4d94f95 | 2018-07-06 22:50:32 +0200 | [diff] [blame] | 913 | |
| 914 | bacpy(&cp.peer_addr, &conn->dst); |
| 915 | cp.peer_addr_type = conn->dst_type; |
| 916 | cp.own_address_type = own_addr_type; |
| 917 | cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); |
| 918 | cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); |
| 919 | cp.conn_latency = cpu_to_le16(conn->le_conn_latency); |
| 920 | cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); |
| 921 | cp.min_ce_len = cpu_to_le16(0x0000); |
| 922 | cp.max_ce_len = cpu_to_le16(0x0000); |
| 923 | |
| 924 | hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); |
| 925 | } |
Johan Hedberg | b46e003 | 2014-02-28 12:54:15 +0200 | [diff] [blame] | 926 | |
| 927 | conn->state = BT_CONNECT; |
Jakub Pawlowski | 28a667c | 2015-08-07 20:22:54 +0200 | [diff] [blame] | 928 | clear_bit(HCI_CONN_SCANNING, &conn->flags); |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 929 | } |
| 930 | |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 931 | static void hci_req_directed_advertising(struct hci_request *req, |
| 932 | struct hci_conn *conn) |
| 933 | { |
| 934 | struct hci_dev *hdev = req->hdev; |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 935 | u8 own_addr_type; |
| 936 | u8 enable; |
| 937 | |
Jaganath Kanakkassery | 075e40b | 2018-07-19 17:09:44 +0530 | [diff] [blame] | 938 | if (ext_adv_capable(hdev)) { |
| 939 | struct hci_cp_le_set_ext_adv_params cp; |
Jaganath Kanakkassery | a73c046 | 2018-07-19 17:09:45 +0530 | [diff] [blame] | 940 | bdaddr_t random_addr; |
| 941 | |
| 942 | /* Set require_privacy to false so that the remote device has a |
| 943 | * chance of identifying us. |
| 944 | */ |
| 945 | if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, |
| 946 | &own_addr_type, &random_addr) < 0) |
| 947 | return; |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 948 | |
Jaganath Kanakkassery | 075e40b | 2018-07-19 17:09:44 +0530 | [diff] [blame] | 949 | memset(&cp, 0, sizeof(cp)); |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 950 | |
Jaganath Kanakkassery | 075e40b | 2018-07-19 17:09:44 +0530 | [diff] [blame] | 951 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); |
| 952 | cp.own_addr_type = own_addr_type; |
| 953 | cp.channel_map = hdev->le_adv_channel_map; |
| 954 | cp.tx_power = HCI_TX_POWER_INVALID; |
| 955 | cp.primary_phy = HCI_ADV_PHY_1M; |
| 956 | cp.secondary_phy = HCI_ADV_PHY_1M; |
| 957 | cp.handle = 0; /* Use instance 0 for directed adv */ |
| 958 | cp.own_addr_type = own_addr_type; |
| 959 | cp.peer_addr_type = conn->dst_type; |
| 960 | bacpy(&cp.peer_addr, &conn->dst); |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 961 | |
Sathish Narsimman | a9e4569 | 2020-02-24 11:02:24 +0530 | [diff] [blame] | 962 | /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for |
| 963 | * advertising_event_property LE_LEGACY_ADV_DIRECT_IND |
| 964 | * does not supports advertising data when the advertising set already |
| 965 | * contains some, the controller shall return erroc code 'Invalid |
| 966 | * HCI Command Parameters(0x12). |
| 967 | * So it is required to remove adv set for handle 0x00. since we use |
| 968 | * instance 0 for directed adv. |
| 969 | */ |
Daniel Winkler | 37adf70 | 2020-07-14 14:16:00 -0700 | [diff] [blame] | 970 | __hci_req_remove_ext_adv_instance(req, cp.handle); |
Sathish Narsimman | a9e4569 | 2020-02-24 11:02:24 +0530 | [diff] [blame] | 971 | |
Jaganath Kanakkassery | 075e40b | 2018-07-19 17:09:44 +0530 | [diff] [blame] | 972 | hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 973 | |
Jaganath Kanakkassery | a73c046 | 2018-07-19 17:09:45 +0530 | [diff] [blame] | 974 | if (own_addr_type == ADDR_LE_DEV_RANDOM && |
| 975 | bacmp(&random_addr, BDADDR_ANY) && |
| 976 | bacmp(&random_addr, &hdev->random_addr)) { |
| 977 | struct hci_cp_le_set_adv_set_rand_addr cp; |
| 978 | |
| 979 | memset(&cp, 0, sizeof(cp)); |
| 980 | |
| 981 | cp.handle = 0; |
| 982 | bacpy(&cp.bdaddr, &random_addr); |
| 983 | |
| 984 | hci_req_add(req, |
| 985 | HCI_OP_LE_SET_ADV_SET_RAND_ADDR, |
| 986 | sizeof(cp), &cp); |
| 987 | } |
| 988 | |
Luiz Augusto von Dentz | 1d0fac2 | 2019-06-03 13:48:42 +0300 | [diff] [blame] | 989 | __hci_req_enable_ext_advertising(req, 0x00); |
Jaganath Kanakkassery | 075e40b | 2018-07-19 17:09:44 +0530 | [diff] [blame] | 990 | } else { |
| 991 | struct hci_cp_le_set_adv_param cp; |
| 992 | |
| 993 | /* Clear the HCI_LE_ADV bit temporarily so that the |
| 994 | * hci_update_random_address knows that it's safe to go ahead |
| 995 | * and write a new random address. The flag will be set back on |
| 996 | * as soon as the SET_ADV_ENABLE HCI command completes. |
| 997 | */ |
| 998 | hci_dev_clear_flag(hdev, HCI_LE_ADV); |
| 999 | |
| 1000 | /* Set require_privacy to false so that the remote device has a |
| 1001 | * chance of identifying us. |
| 1002 | */ |
| 1003 | if (hci_update_random_address(req, false, conn_use_rpa(conn), |
| 1004 | &own_addr_type) < 0) |
| 1005 | return; |
| 1006 | |
| 1007 | memset(&cp, 0, sizeof(cp)); |
Szymon Janc | 4c371bb | 2019-10-02 14:22:43 +0200 | [diff] [blame] | 1008 | |
| 1009 | /* Some controllers might reject command if intervals are not |
| 1010 | * within range for undirected advertising. |
| 1011 | * BCM20702A0 is known to be affected by this. |
| 1012 | */ |
| 1013 | cp.min_interval = cpu_to_le16(0x0020); |
| 1014 | cp.max_interval = cpu_to_le16(0x0020); |
| 1015 | |
Jaganath Kanakkassery | 075e40b | 2018-07-19 17:09:44 +0530 | [diff] [blame] | 1016 | cp.type = LE_ADV_DIRECT_IND; |
| 1017 | cp.own_address_type = own_addr_type; |
| 1018 | cp.direct_addr_type = conn->dst_type; |
| 1019 | bacpy(&cp.direct_addr, &conn->dst); |
| 1020 | cp.channel_map = hdev->le_adv_channel_map; |
| 1021 | |
| 1022 | hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); |
| 1023 | |
| 1024 | enable = 0x01; |
| 1025 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), |
| 1026 | &enable); |
| 1027 | } |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 1028 | |
| 1029 | conn->state = BT_CONNECT; |
| 1030 | } |
| 1031 | |
Andre Guedes | 04a6c58 | 2014-02-26 20:21:44 -0300 | [diff] [blame] | 1032 | struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, |
Johan Hedberg | cdd6275 | 2014-07-07 15:02:28 +0300 | [diff] [blame] | 1033 | u8 dst_type, u8 sec_level, u16 conn_timeout, |
Szymon Janc | 082f230 | 2018-04-03 13:40:06 +0200 | [diff] [blame] | 1034 | u8 role, bdaddr_t *direct_rpa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | { |
Andre Guedes | 4292f1f | 2014-02-03 13:56:19 -0300 | [diff] [blame] | 1036 | struct hci_conn_params *params; |
Johan Hedberg | e2caced | 2015-11-11 14:44:59 +0200 | [diff] [blame] | 1037 | struct hci_conn *conn; |
Johan Hedberg | 1ebfcc1 | 2014-02-18 21:41:36 +0200 | [diff] [blame] | 1038 | struct smp_irk *irk; |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 1039 | struct hci_request req; |
Andre Guedes | 1d399ae | 2013-10-08 08:21:17 -0300 | [diff] [blame] | 1040 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1041 | |
Sathish Narasimman | 5c49bcc | 2020-07-23 18:09:01 +0530 | [diff] [blame] | 1042 | /* This ensures that during disable le_scan address resolution |
| 1043 | * will not be disabled if it is followed by le_create_conn |
| 1044 | */ |
| 1045 | bool rpa_le_conn = true; |
| 1046 | |
Lukasz Rymanowski | 152d386 | 2015-02-11 12:31:40 +0100 | [diff] [blame] | 1047 | /* Let's make sure that le is enabled.*/ |
Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1048 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { |
Lukasz Rymanowski | 152d386 | 2015-02-11 12:31:40 +0100 | [diff] [blame] | 1049 | if (lmp_le_capable(hdev)) |
| 1050 | return ERR_PTR(-ECONNREFUSED); |
| 1051 | |
| 1052 | return ERR_PTR(-EOPNOTSUPP); |
| 1053 | } |
| 1054 | |
Johan Hedberg | 658aead | 2015-11-11 14:44:58 +0200 | [diff] [blame] | 1055 | /* Since the controller supports only one LE connection attempt at a |
| 1056 | * time, we return -EBUSY if there is any connection attempt running. |
| 1057 | */ |
| 1058 | if (hci_lookup_le_connect(hdev)) |
| 1059 | return ERR_PTR(-EBUSY); |
| 1060 | |
Johan Hedberg | e2caced | 2015-11-11 14:44:59 +0200 | [diff] [blame] | 1061 | /* If there's already a connection object but it's not in |
| 1062 | * scanning state it means it must already be established, in |
| 1063 | * which case we can't do anything else except report a failure |
| 1064 | * to connect. |
Andre Guedes | 620ad52 | 2013-10-08 08:21:18 -0300 | [diff] [blame] | 1065 | */ |
Johan Hedberg | 9d4c1cc | 2015-10-21 18:03:01 +0300 | [diff] [blame] | 1066 | conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); |
Johan Hedberg | e2caced | 2015-11-11 14:44:59 +0200 | [diff] [blame] | 1067 | if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) { |
| 1068 | return ERR_PTR(-EBUSY); |
Ville Tervo | fcd89c0 | 2011-02-10 22:38:47 -0300 | [diff] [blame] | 1069 | } |
| 1070 | |
Marcel Holtmann | edb4b46 | 2014-02-18 15:13:43 -0800 | [diff] [blame] | 1071 | /* When given an identity address with existing identity |
| 1072 | * resolving key, the connection needs to be established |
| 1073 | * to a resolvable random address. |
| 1074 | * |
Marcel Holtmann | edb4b46 | 2014-02-18 15:13:43 -0800 | [diff] [blame] | 1075 | * Storing the resolvable random address is required here |
| 1076 | * to handle connection failures. The address will later |
| 1077 | * be resolved back into the original identity address |
| 1078 | * from the connect request. |
| 1079 | */ |
Johan Hedberg | 1ebfcc1 | 2014-02-18 21:41:36 +0200 | [diff] [blame] | 1080 | irk = hci_find_irk_by_addr(hdev, dst, dst_type); |
| 1081 | if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { |
| 1082 | dst = &irk->rpa; |
| 1083 | dst_type = ADDR_LE_DEV_RANDOM; |
| 1084 | } |
| 1085 | |
Johan Hedberg | e2caced | 2015-11-11 14:44:59 +0200 | [diff] [blame] | 1086 | if (conn) { |
Jakub Pawlowski | 28a667c | 2015-08-07 20:22:54 +0200 | [diff] [blame] | 1087 | bacpy(&conn->dst, dst); |
| 1088 | } else { |
| 1089 | conn = hci_conn_add(hdev, LE_LINK, dst, role); |
Johan Hedberg | e2caced | 2015-11-11 14:44:59 +0200 | [diff] [blame] | 1090 | if (!conn) |
| 1091 | return ERR_PTR(-ENOMEM); |
| 1092 | hci_conn_hold(conn); |
| 1093 | conn->pending_sec_level = sec_level; |
Jakub Pawlowski | 28a667c | 2015-08-07 20:22:54 +0200 | [diff] [blame] | 1094 | } |
| 1095 | |
Johan Hedberg | 1ebfcc1 | 2014-02-18 21:41:36 +0200 | [diff] [blame] | 1096 | conn->dst_type = dst_type; |
Andre Guedes | 620ad52 | 2013-10-08 08:21:18 -0300 | [diff] [blame] | 1097 | conn->sec_level = BT_SECURITY_LOW; |
Johan Hedberg | 09ae260 | 2014-07-06 13:41:15 +0300 | [diff] [blame] | 1098 | conn->conn_timeout = conn_timeout; |
Andre Guedes | 4292f1f | 2014-02-03 13:56:19 -0300 | [diff] [blame] | 1099 | |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 1100 | hci_req_init(&req, hdev); |
| 1101 | |
Johan Hedberg | 376f54c | 2014-07-08 15:07:52 +0300 | [diff] [blame] | 1102 | /* Disable advertising if we're active. For master role |
| 1103 | * connections most controllers will refuse to connect if |
| 1104 | * advertising is enabled, and for slave role connections we |
| 1105 | * anyway have to disable it in order to start directed |
Daniel Winkler | 2943d8e | 2020-11-06 15:20:19 -0800 | [diff] [blame] | 1106 | * advertising. Any registered advertisements will be |
| 1107 | * re-enabled after the connection attempt is finished. |
Johan Hedberg | 376f54c | 2014-07-08 15:07:52 +0300 | [diff] [blame] | 1108 | */ |
Sathish Narsimman | 05bd80a | 2020-02-17 14:37:44 +0530 | [diff] [blame] | 1109 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) |
Daniel Winkler | 2943d8e | 2020-11-06 15:20:19 -0800 | [diff] [blame] | 1110 | __hci_req_pause_adv_instances(&req); |
Johan Hedberg | 376f54c | 2014-07-08 15:07:52 +0300 | [diff] [blame] | 1111 | |
Johan Hedberg | cdd6275 | 2014-07-07 15:02:28 +0300 | [diff] [blame] | 1112 | /* If requested to connect as slave use directed advertising */ |
Johan Hedberg | e804d25 | 2014-07-16 11:42:28 +0300 | [diff] [blame] | 1113 | if (conn->role == HCI_ROLE_SLAVE) { |
Johan Hedberg | e8bb6b9 | 2014-07-08 15:07:53 +0300 | [diff] [blame] | 1114 | /* If we're active scanning most controllers are unable |
| 1115 | * to initiate advertising. Simply reject the attempt. |
| 1116 | */ |
Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1117 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && |
Johan Hedberg | e8bb6b9 | 2014-07-08 15:07:53 +0300 | [diff] [blame] | 1118 | hdev->le_scan_type == LE_SCAN_ACTIVE) { |
Jaganath Kanakkassery | f17d858 | 2017-10-25 10:58:48 +0530 | [diff] [blame] | 1119 | hci_req_purge(&req); |
Johan Hedberg | e8bb6b9 | 2014-07-08 15:07:53 +0300 | [diff] [blame] | 1120 | hci_conn_del(conn); |
| 1121 | return ERR_PTR(-EBUSY); |
| 1122 | } |
| 1123 | |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 1124 | hci_req_directed_advertising(&req, conn); |
| 1125 | goto create_conn; |
| 1126 | } |
| 1127 | |
Andre Guedes | 4292f1f | 2014-02-03 13:56:19 -0300 | [diff] [blame] | 1128 | params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); |
| 1129 | if (params) { |
| 1130 | conn->le_conn_min_interval = params->conn_min_interval; |
| 1131 | conn->le_conn_max_interval = params->conn_max_interval; |
Marcel Holtmann | 037fc41 | 2014-06-29 16:43:27 +0200 | [diff] [blame] | 1132 | conn->le_conn_latency = params->conn_latency; |
| 1133 | conn->le_supv_timeout = params->supervision_timeout; |
Andre Guedes | 4292f1f | 2014-02-03 13:56:19 -0300 | [diff] [blame] | 1134 | } else { |
| 1135 | conn->le_conn_min_interval = hdev->le_conn_min_interval; |
| 1136 | conn->le_conn_max_interval = hdev->le_conn_max_interval; |
Marcel Holtmann | 04fb7d9 | 2014-06-30 12:34:36 +0200 | [diff] [blame] | 1137 | conn->le_conn_latency = hdev->le_conn_latency; |
| 1138 | conn->le_supv_timeout = hdev->le_supv_timeout; |
Andre Guedes | 4292f1f | 2014-02-03 13:56:19 -0300 | [diff] [blame] | 1139 | } |
Vinicius Costa Gomes | d04aef4 | 2012-07-27 19:32:56 -0300 | [diff] [blame] | 1140 | |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 1141 | /* If controller is scanning, we stop it since some controllers are |
Johan Hedberg | 81ad6fd | 2014-02-28 20:26:13 +0200 | [diff] [blame] | 1142 | * not able to scan and connect at the same time. Also set the |
| 1143 | * HCI_LE_SCAN_INTERRUPTED flag so that the command complete |
| 1144 | * handler for scan disabling knows to set the correct discovery |
| 1145 | * state. |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 1146 | */ |
Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1147 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { |
Sathish Narasimman | 5c49bcc | 2020-07-23 18:09:01 +0530 | [diff] [blame] | 1148 | hci_req_add_le_scan_disable(&req, rpa_le_conn); |
Marcel Holtmann | a1536da | 2015-03-13 02:11:01 -0700 | [diff] [blame] | 1149 | hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 1150 | } |
| 1151 | |
Szymon Janc | 082f230 | 2018-04-03 13:40:06 +0200 | [diff] [blame] | 1152 | hci_req_add_le_create_conn(&req, conn, direct_rpa); |
Johan Hedberg | 81ad6fd | 2014-02-28 20:26:13 +0200 | [diff] [blame] | 1153 | |
Johan Hedberg | 3c85775 | 2014-03-25 10:30:49 +0200 | [diff] [blame] | 1154 | create_conn: |
Johan Hedberg | 81ad6fd | 2014-02-28 20:26:13 +0200 | [diff] [blame] | 1155 | err = hci_req_run(&req, create_le_conn_complete); |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 1156 | if (err) { |
| 1157 | hci_conn_del(conn); |
Daniel Winkler | 2943d8e | 2020-11-06 15:20:19 -0800 | [diff] [blame] | 1158 | |
| 1159 | if (hdev->adv_instance_cnt) |
| 1160 | hci_req_resume_adv_instances(hdev); |
| 1161 | |
Andre Guedes | 620ad52 | 2013-10-08 08:21:18 -0300 | [diff] [blame] | 1162 | return ERR_PTR(err); |
Andre Guedes | 2acf3d9 | 2014-02-26 20:21:42 -0300 | [diff] [blame] | 1163 | } |
Vinicius Costa Gomes | d04aef4 | 2012-07-27 19:32:56 -0300 | [diff] [blame] | 1164 | |
Andre Guedes | f1e5d54 | 2013-10-03 18:25:44 -0300 | [diff] [blame] | 1165 | return conn; |
Vinicius Costa Gomes | d04aef4 | 2012-07-27 19:32:56 -0300 | [diff] [blame] | 1166 | } |
| 1167 | |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1168 | static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) |
| 1169 | { |
| 1170 | struct hci_conn *conn; |
| 1171 | |
Johan Hedberg | 9d4c1cc | 2015-10-21 18:03:01 +0300 | [diff] [blame] | 1172 | conn = hci_conn_hash_lookup_le(hdev, addr, type); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1173 | if (!conn) |
| 1174 | return false; |
| 1175 | |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1176 | if (conn->state != BT_CONNECTED) |
| 1177 | return false; |
| 1178 | |
| 1179 | return true; |
| 1180 | } |
| 1181 | |
| 1182 | /* This function requires the caller holds hdev->lock */ |
Johan Hedberg | 84235d2 | 2015-11-11 08:11:20 +0200 | [diff] [blame] | 1183 | static int hci_explicit_conn_params_set(struct hci_dev *hdev, |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1184 | bdaddr_t *addr, u8 addr_type) |
| 1185 | { |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1186 | struct hci_conn_params *params; |
| 1187 | |
| 1188 | if (is_connected(hdev, addr, addr_type)) |
| 1189 | return -EISCONN; |
| 1190 | |
Jakub Pawlowski | 5157b8a | 2015-10-16 10:07:54 +0300 | [diff] [blame] | 1191 | params = hci_conn_params_lookup(hdev, addr, addr_type); |
| 1192 | if (!params) { |
| 1193 | params = hci_conn_params_add(hdev, addr, addr_type); |
| 1194 | if (!params) |
| 1195 | return -ENOMEM; |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1196 | |
Jakub Pawlowski | 5157b8a | 2015-10-16 10:07:54 +0300 | [diff] [blame] | 1197 | /* If we created new params, mark them to be deleted in |
| 1198 | * hci_connect_le_scan_cleanup. It's different case than |
| 1199 | * existing disabled params, those will stay after cleanup. |
| 1200 | */ |
| 1201 | params->auto_connect = HCI_AUTO_CONN_EXPLICIT; |
| 1202 | } |
| 1203 | |
| 1204 | /* We're trying to connect, so make sure params are at pend_le_conns */ |
Johan Hedberg | 49c5092 | 2015-10-16 10:07:51 +0300 | [diff] [blame] | 1205 | if (params->auto_connect == HCI_AUTO_CONN_DISABLED || |
Jakub Pawlowski | 5157b8a | 2015-10-16 10:07:54 +0300 | [diff] [blame] | 1206 | params->auto_connect == HCI_AUTO_CONN_REPORT || |
| 1207 | params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1208 | list_del_init(¶ms->action); |
| 1209 | list_add(¶ms->action, &hdev->pend_le_conns); |
| 1210 | } |
| 1211 | |
| 1212 | params->explicit_connect = true; |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1213 | |
| 1214 | BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, |
| 1215 | params->auto_connect); |
| 1216 | |
| 1217 | return 0; |
| 1218 | } |
| 1219 | |
| 1220 | /* This function requires the caller holds hdev->lock */ |
| 1221 | struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, |
| 1222 | u8 dst_type, u8 sec_level, |
Manish Mandlik | 76b1399 | 2020-06-17 16:39:19 +0200 | [diff] [blame] | 1223 | u16 conn_timeout, |
| 1224 | enum conn_reasons conn_reason) |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1225 | { |
| 1226 | struct hci_conn *conn; |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1227 | |
| 1228 | /* Let's make sure that le is enabled.*/ |
| 1229 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { |
| 1230 | if (lmp_le_capable(hdev)) |
| 1231 | return ERR_PTR(-ECONNREFUSED); |
| 1232 | |
| 1233 | return ERR_PTR(-EOPNOTSUPP); |
| 1234 | } |
| 1235 | |
| 1236 | /* Some devices send ATT messages as soon as the physical link is |
| 1237 | * established. To be able to handle these ATT messages, the user- |
| 1238 | * space first establishes the connection and then starts the pairing |
| 1239 | * process. |
| 1240 | * |
| 1241 | * So if a hci_conn object already exists for the following connection |
| 1242 | * attempt, we simply update pending_sec_level and auth_type fields |
| 1243 | * and return the object found. |
| 1244 | */ |
Johan Hedberg | 9d4c1cc | 2015-10-21 18:03:01 +0300 | [diff] [blame] | 1245 | conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1246 | if (conn) { |
| 1247 | if (conn->pending_sec_level < sec_level) |
| 1248 | conn->pending_sec_level = sec_level; |
| 1249 | goto done; |
| 1250 | } |
| 1251 | |
| 1252 | BT_DBG("requesting refresh of dst_addr"); |
| 1253 | |
Johan Hedberg | 0ad06aa | 2015-11-11 14:44:57 +0200 | [diff] [blame] | 1254 | conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1255 | if (!conn) |
| 1256 | return ERR_PTR(-ENOMEM); |
| 1257 | |
Navid Emamdoost | d088337 | 2019-11-21 14:20:36 -0600 | [diff] [blame] | 1258 | if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { |
| 1259 | hci_conn_del(conn); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1260 | return ERR_PTR(-EBUSY); |
Navid Emamdoost | d088337 | 2019-11-21 14:20:36 -0600 | [diff] [blame] | 1261 | } |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1262 | |
| 1263 | conn->state = BT_CONNECT; |
| 1264 | set_bit(HCI_CONN_SCANNING, &conn->flags); |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1265 | conn->dst_type = dst_type; |
| 1266 | conn->sec_level = BT_SECURITY_LOW; |
| 1267 | conn->pending_sec_level = sec_level; |
| 1268 | conn->conn_timeout = conn_timeout; |
Manish Mandlik | 76b1399 | 2020-06-17 16:39:19 +0200 | [diff] [blame] | 1269 | conn->conn_reason = conn_reason; |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1270 | |
Johan Hedberg | 84235d2 | 2015-11-11 08:11:20 +0200 | [diff] [blame] | 1271 | hci_update_background_scan(hdev); |
| 1272 | |
Jakub Pawlowski | f75113a | 2015-08-07 20:22:53 +0200 | [diff] [blame] | 1273 | done: |
| 1274 | hci_conn_hold(conn); |
| 1275 | return conn; |
| 1276 | } |
| 1277 | |
Andre Guedes | 04a6c58 | 2014-02-26 20:21:44 -0300 | [diff] [blame] | 1278 | struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, |
Manish Mandlik | 76b1399 | 2020-06-17 16:39:19 +0200 | [diff] [blame] | 1279 | u8 sec_level, u8 auth_type, |
| 1280 | enum conn_reasons conn_reason) |
Marcel Holtmann | 5b7f990 | 2007-07-11 09:51:55 +0200 | [diff] [blame] | 1281 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | struct hci_conn *acl; |
Marcel Holtmann | e73439d | 2010-07-26 10:06:00 -0400 | [diff] [blame] | 1283 | |
Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1284 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { |
Lukasz Rymanowski | c411110 | 2015-02-11 12:31:41 +0100 | [diff] [blame] | 1285 | if (lmp_bredr_capable(hdev)) |
| 1286 | return ERR_PTR(-ECONNREFUSED); |
| 1287 | |
Johan Hedberg | beb19e4 | 2014-07-18 11:15:26 +0300 | [diff] [blame] | 1288 | return ERR_PTR(-EOPNOTSUPP); |
Lukasz Rymanowski | c411110 | 2015-02-11 12:31:41 +0100 | [diff] [blame] | 1289 | } |
Johan Hedberg | 56f8790 | 2013-10-02 13:43:13 +0300 | [diff] [blame] | 1290 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1291 | acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); |
| 1292 | if (!acl) { |
Johan Hedberg | a5c4e30 | 2014-07-16 11:56:07 +0300 | [diff] [blame] | 1293 | acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | if (!acl) |
Johan Hedberg | 48c7aba | 2012-02-19 14:06:48 +0200 | [diff] [blame] | 1295 | return ERR_PTR(-ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | } |
| 1297 | |
| 1298 | hci_conn_hold(acl); |
| 1299 | |
Manish Mandlik | 76b1399 | 2020-06-17 16:39:19 +0200 | [diff] [blame] | 1300 | acl->conn_reason = conn_reason; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { |
| 1302 | acl->sec_level = BT_SECURITY_LOW; |
Marcel Holtmann | 5b7f990 | 2007-07-11 09:51:55 +0200 | [diff] [blame] | 1303 | acl->pending_sec_level = sec_level; |
| 1304 | acl->auth_type = auth_type; |
Vinicius Costa Gomes | 1aef866 | 2012-07-27 19:32:55 -0300 | [diff] [blame] | 1305 | hci_acl_create_connection(acl); |
Nick Pelly | c390216 | 2009-11-13 14:16:32 -0800 | [diff] [blame] | 1306 | } |
| 1307 | |
Vinicius Costa Gomes | db47427 | 2012-07-28 22:35:59 -0300 | [diff] [blame] | 1308 | return acl; |
| 1309 | } |
| 1310 | |
Frédéric Dalleau | 10c62dd | 2013-08-19 14:23:59 +0200 | [diff] [blame] | 1311 | struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, |
| 1312 | __u16 setting) |
Vinicius Costa Gomes | db47427 | 2012-07-28 22:35:59 -0300 | [diff] [blame] | 1313 | { |
| 1314 | struct hci_conn *acl; |
| 1315 | struct hci_conn *sco; |
| 1316 | |
Manish Mandlik | 76b1399 | 2020-06-17 16:39:19 +0200 | [diff] [blame] | 1317 | acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, |
| 1318 | CONN_REASON_SCO_CONNECT); |
Vinicius Costa Gomes | db47427 | 2012-07-28 22:35:59 -0300 | [diff] [blame] | 1319 | if (IS_ERR(acl)) |
Marcel Holtmann | b6a0dc8 | 2007-10-20 14:55:10 +0200 | [diff] [blame] | 1320 | return acl; |
| 1321 | |
| 1322 | sco = hci_conn_hash_lookup_ba(hdev, type, dst); |
| 1323 | if (!sco) { |
Johan Hedberg | a5c4e30 | 2014-07-16 11:56:07 +0300 | [diff] [blame] | 1324 | sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER); |
Marcel Holtmann | 5b7f990 | 2007-07-11 09:51:55 +0200 | [diff] [blame] | 1325 | if (!sco) { |
David Herrmann | 76a68ba | 2013-04-06 20:28:37 +0200 | [diff] [blame] | 1326 | hci_conn_drop(acl); |
Johan Hedberg | 48c7aba | 2012-02-19 14:06:48 +0200 | [diff] [blame] | 1327 | return ERR_PTR(-ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | } |
| 1329 | } |
Marcel Holtmann | e7c29cb | 2008-09-09 07:19:20 +0200 | [diff] [blame] | 1330 | |
| 1331 | acl->link = sco; |
| 1332 | sco->link = acl; |
| 1333 | |
| 1334 | hci_conn_hold(sco); |
| 1335 | |
Frédéric Dalleau | 10c62dd | 2013-08-19 14:23:59 +0200 | [diff] [blame] | 1336 | sco->setting = setting; |
| 1337 | |
Marcel Holtmann | e7c29cb | 2008-09-09 07:19:20 +0200 | [diff] [blame] | 1338 | if (acl->state == BT_CONNECTED && |
Gustavo Padovan | 5974e4c | 2012-05-17 00:36:25 -0300 | [diff] [blame] | 1339 | (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { |
Johan Hedberg | 58a681e | 2012-01-16 06:47:28 +0200 | [diff] [blame] | 1340 | set_bit(HCI_CONN_POWER_SAVE, &acl->flags); |
Jaikumar Ganesh | 14b12d0 | 2011-05-23 18:06:04 -0700 | [diff] [blame] | 1341 | hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); |
Marcel Holtmann | e7c29cb | 2008-09-09 07:19:20 +0200 | [diff] [blame] | 1342 | |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1343 | if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) { |
Marcel Holtmann | e7c29cb | 2008-09-09 07:19:20 +0200 | [diff] [blame] | 1344 | /* defer SCO setup until mode change completed */ |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1345 | set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags); |
Marcel Holtmann | 0684e5f | 2009-02-09 02:48:38 +0100 | [diff] [blame] | 1346 | return sco; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | } |
| 1348 | |
| 1349 | hci_sco_setup(acl, 0x00); |
Marcel Holtmann | 96a3183 | 2009-02-12 16:23:03 +0100 | [diff] [blame] | 1350 | } |
Marcel Holtmann | 0684e5f | 2009-02-09 02:48:38 +0100 | [diff] [blame] | 1351 | |
Marcel Holtmann | 96a3183 | 2009-02-12 16:23:03 +0100 | [diff] [blame] | 1352 | return sco; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | |
| 1355 | /* Check link security requirement */ |
| 1356 | int hci_conn_check_link_mode(struct hci_conn *conn) |
| 1357 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1358 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | |
Marcel Holtmann | 40b552a | 2014-03-19 14:10:25 -0700 | [diff] [blame] | 1360 | /* In Secure Connections Only mode, it is required that Secure |
| 1361 | * Connections is used and the link is encrypted with AES-CCM |
| 1362 | * using a P-256 authenticated combination key. |
| 1363 | */ |
Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1364 | if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { |
Marcel Holtmann | 40b552a | 2014-03-19 14:10:25 -0700 | [diff] [blame] | 1365 | if (!hci_conn_sc_enabled(conn) || |
| 1366 | !test_bit(HCI_CONN_AES_CCM, &conn->flags) || |
| 1367 | conn->key_type != HCI_LK_AUTH_COMBINATION_P256) |
| 1368 | return 0; |
| 1369 | } |
| 1370 | |
Luiz Augusto von Dentz | 8746f13 | 2020-05-20 14:20:14 -0700 | [diff] [blame] | 1371 | /* AES encryption is required for Level 4: |
| 1372 | * |
| 1373 | * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C |
| 1374 | * page 1319: |
| 1375 | * |
| 1376 | * 128-bit equivalent strength for link and encryption keys |
| 1377 | * required using FIPS approved algorithms (E0 not allowed, |
| 1378 | * SAFER+ not allowed, and P-192 not allowed; encryption key |
| 1379 | * not shortened) |
| 1380 | */ |
| 1381 | if (conn->sec_level == BT_SECURITY_FIPS && |
| 1382 | !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { |
| 1383 | bt_dev_err(conn->hdev, |
| 1384 | "Invalid security: Missing AES-CCM usage"); |
| 1385 | return 0; |
| 1386 | } |
| 1387 | |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1388 | if (hci_conn_ssp_enabled(conn) && |
| 1389 | !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 | return 0; |
| 1391 | |
| 1392 | return 1; |
| 1393 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | |
| 1395 | /* Authenticate remote device */ |
| 1396 | static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) |
| 1397 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1398 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | |
Johan Hedberg | 765c2a9 | 2011-01-19 12:06:52 +0530 | [diff] [blame] | 1400 | if (conn->pending_sec_level > sec_level) |
| 1401 | sec_level = conn->pending_sec_level; |
| 1402 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | if (sec_level > conn->sec_level) |
Johan Hedberg | 765c2a9 | 2011-01-19 12:06:52 +0530 | [diff] [blame] | 1404 | conn->pending_sec_level = sec_level; |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1405 | else if (test_bit(HCI_CONN_AUTH, &conn->flags)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1406 | return 1; |
| 1407 | |
Johan Hedberg | 65cf686 | 2011-01-19 12:06:49 +0530 | [diff] [blame] | 1408 | /* Make sure we preserve an existing MITM requirement*/ |
| 1409 | auth_type |= (conn->auth_type & 0x01); |
| 1410 | |
Marcel Holtmann | 96a3183 | 2009-02-12 16:23:03 +0100 | [diff] [blame] | 1411 | conn->auth_type = auth_type; |
| 1412 | |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1413 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 | struct hci_cp_auth_requested cp; |
Peter Hurley | b7d05ba | 2012-01-13 15:11:30 +0100 | [diff] [blame] | 1415 | |
YOSHIFUJI Hideaki | aca3192 | 2007-03-25 20:12:50 -0700 | [diff] [blame] | 1416 | cp.handle = cpu_to_le16(conn->handle); |
Marcel Holtmann | 40be492 | 2008-07-14 20:13:50 +0200 | [diff] [blame] | 1417 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, |
Gustavo Padovan | 5974e4c | 2012-05-17 00:36:25 -0300 | [diff] [blame] | 1418 | sizeof(cp), &cp); |
Johan Hedberg | 09da1f3 | 2014-04-11 12:02:32 -0700 | [diff] [blame] | 1419 | |
| 1420 | /* If we're already encrypted set the REAUTH_PEND flag, |
| 1421 | * otherwise set the ENCRYPT_PEND. |
| 1422 | */ |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1423 | if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1424 | set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); |
Johan Hedberg | 09da1f3 | 2014-04-11 12:02:32 -0700 | [diff] [blame] | 1425 | else |
| 1426 | set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | } |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1428 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | return 0; |
| 1430 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1431 | |
Randy Dunlap | bb6d689 | 2020-09-17 21:35:18 -0700 | [diff] [blame] | 1432 | /* Encrypt the link */ |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1433 | static void hci_conn_encrypt(struct hci_conn *conn) |
| 1434 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1435 | BT_DBG("hcon %p", conn); |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1436 | |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1437 | if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1438 | struct hci_cp_set_conn_encrypt cp; |
| 1439 | cp.handle = cpu_to_le16(conn->handle); |
| 1440 | cp.encrypt = 0x01; |
| 1441 | hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), |
Gustavo Padovan | 5974e4c | 2012-05-17 00:36:25 -0300 | [diff] [blame] | 1442 | &cp); |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1443 | } |
| 1444 | } |
| 1445 | |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1446 | /* Enable security */ |
Johan Hedberg | e7cafc4 | 2014-07-17 15:35:38 +0300 | [diff] [blame] | 1447 | int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, |
| 1448 | bool initiator) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1450 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | |
Vinicius Costa Gomes | d8343f1 | 2012-08-23 21:32:44 -0300 | [diff] [blame] | 1452 | if (conn->type == LE_LINK) |
| 1453 | return smp_conn_security(conn, sec_level); |
| 1454 | |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1455 | /* For sdp we don't need the link key. */ |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1456 | if (sec_level == BT_SECURITY_SDP) |
| 1457 | return 1; |
| 1458 | |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1459 | /* For non 2.1 devices and low security level we don't need the link |
| 1460 | key. */ |
Johan Hedberg | aa64a8b | 2012-01-18 21:33:12 +0200 | [diff] [blame] | 1461 | if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn)) |
Marcel Holtmann | 3fdca1e | 2009-04-28 09:04:55 -0700 | [diff] [blame] | 1462 | return 1; |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1463 | |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1464 | /* For other security levels we need the link key. */ |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1465 | if (!test_bit(HCI_CONN_AUTH, &conn->flags)) |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1466 | goto auth; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | |
Marcel Holtmann | 7b5a924 | 2014-01-15 22:37:39 -0800 | [diff] [blame] | 1468 | /* An authenticated FIPS approved combination key has sufficient |
| 1469 | * security for security level 4. */ |
| 1470 | if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 && |
| 1471 | sec_level == BT_SECURITY_FIPS) |
| 1472 | goto encrypt; |
| 1473 | |
| 1474 | /* An authenticated combination key has sufficient security for |
| 1475 | security level 3. */ |
| 1476 | if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 || |
| 1477 | conn->key_type == HCI_LK_AUTH_COMBINATION_P256) && |
| 1478 | sec_level == BT_SECURITY_HIGH) |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1479 | goto encrypt; |
| 1480 | |
| 1481 | /* An unauthenticated combination key has sufficient security for |
| 1482 | security level 1 and 2. */ |
Marcel Holtmann | 66138ce | 2014-01-10 02:07:20 -0800 | [diff] [blame] | 1483 | if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 || |
| 1484 | conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) && |
Gustavo Padovan | 5974e4c | 2012-05-17 00:36:25 -0300 | [diff] [blame] | 1485 | (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1486 | goto encrypt; |
| 1487 | |
| 1488 | /* A combination key has always sufficient security for the security |
| 1489 | levels 1 or 2. High security level requires the combination key |
| 1490 | is generated using maximum PIN code length (16). |
| 1491 | For pre 2.1 units. */ |
| 1492 | if (conn->key_type == HCI_LK_COMBINATION && |
Marcel Holtmann | 7b5a924 | 2014-01-15 22:37:39 -0800 | [diff] [blame] | 1493 | (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW || |
| 1494 | conn->pin_length == 16)) |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1495 | goto encrypt; |
| 1496 | |
| 1497 | auth: |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1498 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | return 0; |
| 1500 | |
Johan Hedberg | 977f8fc | 2014-07-17 15:35:39 +0300 | [diff] [blame] | 1501 | if (initiator) |
| 1502 | set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); |
| 1503 | |
Luiz Augusto von Dentz | 6fdf658 | 2011-06-13 15:37:35 +0300 | [diff] [blame] | 1504 | if (!hci_conn_auth(conn, sec_level, auth_type)) |
| 1505 | return 0; |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1506 | |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1507 | encrypt: |
Marcel Holtmann | 693cd8c | 2019-06-22 15:47:01 +0200 | [diff] [blame] | 1508 | if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { |
| 1509 | /* Ensure that the encryption key size has been read, |
| 1510 | * otherwise stall the upper layer responses. |
| 1511 | */ |
| 1512 | if (!conn->enc_key_size) |
| 1513 | return 0; |
| 1514 | |
| 1515 | /* Nothing else needed, all requirements are met */ |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1516 | return 1; |
Marcel Holtmann | 693cd8c | 2019-06-22 15:47:01 +0200 | [diff] [blame] | 1517 | } |
Waldemar Rymarkiewicz | 13d3931 | 2011-04-28 12:07:55 +0200 | [diff] [blame] | 1518 | |
| 1519 | hci_conn_encrypt(conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | return 0; |
| 1521 | } |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1522 | EXPORT_SYMBOL(hci_conn_security); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | |
Waldemar Rymarkiewicz | b3b1b06 | 2011-05-06 09:42:31 +0200 | [diff] [blame] | 1524 | /* Check secure link requirement */ |
| 1525 | int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) |
| 1526 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1527 | BT_DBG("hcon %p", conn); |
Waldemar Rymarkiewicz | b3b1b06 | 2011-05-06 09:42:31 +0200 | [diff] [blame] | 1528 | |
Marcel Holtmann | 9cb2e03 | 2014-02-01 11:32:25 -0800 | [diff] [blame] | 1529 | /* Accept if non-secure or higher security level is required */ |
| 1530 | if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS) |
Waldemar Rymarkiewicz | b3b1b06 | 2011-05-06 09:42:31 +0200 | [diff] [blame] | 1531 | return 1; |
| 1532 | |
Marcel Holtmann | 9cb2e03 | 2014-02-01 11:32:25 -0800 | [diff] [blame] | 1533 | /* Accept if secure or higher security level is already present */ |
| 1534 | if (conn->sec_level == BT_SECURITY_HIGH || |
| 1535 | conn->sec_level == BT_SECURITY_FIPS) |
| 1536 | return 1; |
| 1537 | |
| 1538 | /* Reject not secure link */ |
| 1539 | return 0; |
Waldemar Rymarkiewicz | b3b1b06 | 2011-05-06 09:42:31 +0200 | [diff] [blame] | 1540 | } |
| 1541 | EXPORT_SYMBOL(hci_conn_check_secure); |
| 1542 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | /* Switch role */ |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1544 | int hci_conn_switch_role(struct hci_conn *conn, __u8 role) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | { |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1546 | BT_DBG("hcon %p", conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | |
Johan Hedberg | 40bef30 | 2014-07-16 11:42:27 +0300 | [diff] [blame] | 1548 | if (role == conn->role) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | return 1; |
| 1550 | |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1551 | if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | struct hci_cp_switch_role cp; |
| 1553 | bacpy(&cp.bdaddr, &conn->dst); |
| 1554 | cp.role = role; |
Marcel Holtmann | a9de924 | 2007-10-20 13:33:56 +0200 | [diff] [blame] | 1555 | hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | } |
Marcel Holtmann | 8c1b235 | 2009-01-15 21:58:04 +0100 | [diff] [blame] | 1557 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1558 | return 0; |
| 1559 | } |
| 1560 | EXPORT_SYMBOL(hci_conn_switch_role); |
| 1561 | |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1562 | /* Enter active mode */ |
Jaikumar Ganesh | 14b12d0 | 2011-05-23 18:06:04 -0700 | [diff] [blame] | 1563 | void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1564 | { |
| 1565 | struct hci_dev *hdev = conn->hdev; |
| 1566 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1567 | BT_DBG("hcon %p mode %d", conn, conn->mode); |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1568 | |
Jaikumar Ganesh | 14b12d0 | 2011-05-23 18:06:04 -0700 | [diff] [blame] | 1569 | if (conn->mode != HCI_CM_SNIFF) |
| 1570 | goto timer; |
| 1571 | |
Johan Hedberg | 58a681e | 2012-01-16 06:47:28 +0200 | [diff] [blame] | 1572 | if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active) |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1573 | goto timer; |
| 1574 | |
Johan Hedberg | 51a8efd | 2012-01-16 06:10:31 +0200 | [diff] [blame] | 1575 | if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1576 | struct hci_cp_exit_sniff_mode cp; |
YOSHIFUJI Hideaki | aca3192 | 2007-03-25 20:12:50 -0700 | [diff] [blame] | 1577 | cp.handle = cpu_to_le16(conn->handle); |
Marcel Holtmann | a9de924 | 2007-10-20 13:33:56 +0200 | [diff] [blame] | 1578 | hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1579 | } |
| 1580 | |
| 1581 | timer: |
| 1582 | if (hdev->idle_timeout > 0) |
Johan Hedberg | a74a84f | 2013-10-16 18:11:40 +0300 | [diff] [blame] | 1583 | queue_delayed_work(hdev->workqueue, &conn->idle_work, |
| 1584 | msecs_to_jiffies(hdev->idle_timeout)); |
Marcel Holtmann | 04837f6 | 2006-07-03 10:02:33 +0200 | [diff] [blame] | 1585 | } |
| 1586 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1587 | /* Drop all connection on the device */ |
| 1588 | void hci_conn_hash_flush(struct hci_dev *hdev) |
| 1589 | { |
| 1590 | struct hci_conn_hash *h = &hdev->conn_hash; |
Andrei Emeltchenko | 3c4e0df0 | 2012-02-02 10:32:17 +0200 | [diff] [blame] | 1591 | struct hci_conn *c, *n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | |
| 1593 | BT_DBG("hdev %s", hdev->name); |
| 1594 | |
Andrei Emeltchenko | 3c4e0df0 | 2012-02-02 10:32:17 +0200 | [diff] [blame] | 1595 | list_for_each_entry_safe(c, n, &h->list, list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | c->state = BT_CLOSED; |
| 1597 | |
Johan Hedberg | 3a6d576 | 2015-02-18 14:53:58 +0200 | [diff] [blame] | 1598 | hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | hci_conn_del(c); |
| 1600 | } |
| 1601 | } |
| 1602 | |
Marcel Holtmann | a9de924 | 2007-10-20 13:33:56 +0200 | [diff] [blame] | 1603 | /* Check pending connect attempts */ |
| 1604 | void hci_conn_check_pending(struct hci_dev *hdev) |
| 1605 | { |
| 1606 | struct hci_conn *conn; |
| 1607 | |
| 1608 | BT_DBG("hdev %s", hdev->name); |
| 1609 | |
| 1610 | hci_dev_lock(hdev); |
| 1611 | |
| 1612 | conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); |
| 1613 | if (conn) |
Vinicius Costa Gomes | 1aef866 | 2012-07-27 19:32:55 -0300 | [diff] [blame] | 1614 | hci_acl_create_connection(conn); |
Marcel Holtmann | a9de924 | 2007-10-20 13:33:56 +0200 | [diff] [blame] | 1615 | |
| 1616 | hci_dev_unlock(hdev); |
| 1617 | } |
| 1618 | |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1619 | static u32 get_link_mode(struct hci_conn *conn) |
| 1620 | { |
| 1621 | u32 link_mode = 0; |
| 1622 | |
Johan Hedberg | 40bef30 | 2014-07-16 11:42:27 +0300 | [diff] [blame] | 1623 | if (conn->role == HCI_ROLE_MASTER) |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1624 | link_mode |= HCI_LM_MASTER; |
| 1625 | |
| 1626 | if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) |
| 1627 | link_mode |= HCI_LM_ENCRYPT; |
| 1628 | |
| 1629 | if (test_bit(HCI_CONN_AUTH, &conn->flags)) |
| 1630 | link_mode |= HCI_LM_AUTH; |
| 1631 | |
| 1632 | if (test_bit(HCI_CONN_SECURE, &conn->flags)) |
| 1633 | link_mode |= HCI_LM_SECURE; |
| 1634 | |
| 1635 | if (test_bit(HCI_CONN_FIPS, &conn->flags)) |
| 1636 | link_mode |= HCI_LM_FIPS; |
| 1637 | |
| 1638 | return link_mode; |
| 1639 | } |
| 1640 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | int hci_get_conn_list(void __user *arg) |
| 1642 | { |
Gustavo Padovan | fc5fef6 | 2012-05-23 04:04:19 -0300 | [diff] [blame] | 1643 | struct hci_conn *c; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | struct hci_conn_list_req req, *cl; |
| 1645 | struct hci_conn_info *ci; |
| 1646 | struct hci_dev *hdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1647 | int n = 0, size, err; |
| 1648 | |
| 1649 | if (copy_from_user(&req, arg, sizeof(req))) |
| 1650 | return -EFAULT; |
| 1651 | |
| 1652 | if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) |
| 1653 | return -EINVAL; |
| 1654 | |
| 1655 | size = sizeof(req) + req.conn_num * sizeof(*ci); |
| 1656 | |
Andrei Emeltchenko | 70f23020 | 2010-12-01 16:58:25 +0200 | [diff] [blame] | 1657 | cl = kmalloc(size, GFP_KERNEL); |
| 1658 | if (!cl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1659 | return -ENOMEM; |
| 1660 | |
Andrei Emeltchenko | 70f23020 | 2010-12-01 16:58:25 +0200 | [diff] [blame] | 1661 | hdev = hci_dev_get(req.dev_id); |
| 1662 | if (!hdev) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | kfree(cl); |
| 1664 | return -ENODEV; |
| 1665 | } |
| 1666 | |
| 1667 | ci = cl->conn_info; |
| 1668 | |
Gustavo F. Padovan | 09fd0de | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 1669 | hci_dev_lock(hdev); |
Luiz Augusto von Dentz | 8035ded | 2011-11-01 10:58:56 +0200 | [diff] [blame] | 1670 | list_for_each_entry(c, &hdev->conn_hash.list, list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | bacpy(&(ci + n)->bdaddr, &c->dst); |
| 1672 | (ci + n)->handle = c->handle; |
| 1673 | (ci + n)->type = c->type; |
| 1674 | (ci + n)->out = c->out; |
| 1675 | (ci + n)->state = c->state; |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1676 | (ci + n)->link_mode = get_link_mode(c); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | if (++n >= req.conn_num) |
| 1678 | break; |
| 1679 | } |
Gustavo F. Padovan | 09fd0de | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 1680 | hci_dev_unlock(hdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | |
| 1682 | cl->dev_id = hdev->id; |
| 1683 | cl->conn_num = n; |
| 1684 | size = sizeof(req) + n * sizeof(*ci); |
| 1685 | |
| 1686 | hci_dev_put(hdev); |
| 1687 | |
| 1688 | err = copy_to_user(arg, cl, size); |
| 1689 | kfree(cl); |
| 1690 | |
| 1691 | return err ? -EFAULT : 0; |
| 1692 | } |
| 1693 | |
| 1694 | int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) |
| 1695 | { |
| 1696 | struct hci_conn_info_req req; |
| 1697 | struct hci_conn_info ci; |
| 1698 | struct hci_conn *conn; |
| 1699 | char __user *ptr = arg + sizeof(req); |
| 1700 | |
| 1701 | if (copy_from_user(&req, arg, sizeof(req))) |
| 1702 | return -EFAULT; |
| 1703 | |
Gustavo F. Padovan | 09fd0de | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 1704 | hci_dev_lock(hdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); |
| 1706 | if (conn) { |
| 1707 | bacpy(&ci.bdaddr, &conn->dst); |
| 1708 | ci.handle = conn->handle; |
| 1709 | ci.type = conn->type; |
| 1710 | ci.out = conn->out; |
| 1711 | ci.state = conn->state; |
Johan Hedberg | 4dae279 | 2014-06-24 17:03:50 +0300 | [diff] [blame] | 1712 | ci.link_mode = get_link_mode(conn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1713 | } |
Gustavo F. Padovan | 09fd0de | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 1714 | hci_dev_unlock(hdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1715 | |
| 1716 | if (!conn) |
| 1717 | return -ENOENT; |
| 1718 | |
| 1719 | return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; |
| 1720 | } |
Marcel Holtmann | 40be492 | 2008-07-14 20:13:50 +0200 | [diff] [blame] | 1721 | |
| 1722 | int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) |
| 1723 | { |
| 1724 | struct hci_auth_info_req req; |
| 1725 | struct hci_conn *conn; |
| 1726 | |
| 1727 | if (copy_from_user(&req, arg, sizeof(req))) |
| 1728 | return -EFAULT; |
| 1729 | |
Gustavo F. Padovan | 09fd0de | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 1730 | hci_dev_lock(hdev); |
Marcel Holtmann | 40be492 | 2008-07-14 20:13:50 +0200 | [diff] [blame] | 1731 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); |
| 1732 | if (conn) |
| 1733 | req.type = conn->auth_type; |
Gustavo F. Padovan | 09fd0de | 2011-06-17 13:03:21 -0300 | [diff] [blame] | 1734 | hci_dev_unlock(hdev); |
Marcel Holtmann | 40be492 | 2008-07-14 20:13:50 +0200 | [diff] [blame] | 1735 | |
| 1736 | if (!conn) |
| 1737 | return -ENOENT; |
| 1738 | |
| 1739 | return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; |
| 1740 | } |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1741 | |
| 1742 | struct hci_chan *hci_chan_create(struct hci_conn *conn) |
| 1743 | { |
| 1744 | struct hci_dev *hdev = conn->hdev; |
| 1745 | struct hci_chan *chan; |
| 1746 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1747 | BT_DBG("%s hcon %p", hdev->name, conn); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1748 | |
Johan Hedberg | f94b665 | 2014-08-18 00:41:44 +0300 | [diff] [blame] | 1749 | if (test_bit(HCI_CONN_DROP, &conn->flags)) { |
| 1750 | BT_DBG("Refusing to create new hci_chan"); |
| 1751 | return NULL; |
| 1752 | } |
| 1753 | |
Johan Hedberg | 27f70f3 | 2014-07-21 10:50:06 +0300 | [diff] [blame] | 1754 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1755 | if (!chan) |
| 1756 | return NULL; |
| 1757 | |
Johan Hedberg | 6c388d3 | 2014-08-18 00:41:42 +0300 | [diff] [blame] | 1758 | chan->conn = hci_conn_get(conn); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1759 | skb_queue_head_init(&chan->data_q); |
Mat Martineau | 168df8e | 2012-10-23 15:24:13 -0700 | [diff] [blame] | 1760 | chan->state = BT_CONNECTED; |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1761 | |
Gustavo F. Padovan | 8192ede | 2011-12-14 15:08:48 -0200 | [diff] [blame] | 1762 | list_add_rcu(&chan->list, &conn->chan_list); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1763 | |
| 1764 | return chan; |
| 1765 | } |
| 1766 | |
Andrei Emeltchenko | 9472007 | 2012-09-06 15:05:43 +0300 | [diff] [blame] | 1767 | void hci_chan_del(struct hci_chan *chan) |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1768 | { |
| 1769 | struct hci_conn *conn = chan->conn; |
| 1770 | struct hci_dev *hdev = conn->hdev; |
| 1771 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1772 | BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1773 | |
Gustavo F. Padovan | 8192ede | 2011-12-14 15:08:48 -0200 | [diff] [blame] | 1774 | list_del_rcu(&chan->list); |
| 1775 | |
| 1776 | synchronize_rcu(); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1777 | |
Johan Hedberg | bcbb655 | 2014-08-18 20:33:27 +0300 | [diff] [blame] | 1778 | /* Prevent new hci_chan's to be created for this hci_conn */ |
Johan Hedberg | f94b665 | 2014-08-18 00:41:44 +0300 | [diff] [blame] | 1779 | set_bit(HCI_CONN_DROP, &conn->flags); |
Johan Hedberg | b3ff670 | 2014-08-18 00:41:43 +0300 | [diff] [blame] | 1780 | |
Johan Hedberg | 6c388d3 | 2014-08-18 00:41:42 +0300 | [diff] [blame] | 1781 | hci_conn_put(conn); |
Andrei Emeltchenko | e9b0274 | 2012-10-25 15:20:51 +0300 | [diff] [blame] | 1782 | |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1783 | skb_queue_purge(&chan->data_q); |
| 1784 | kfree(chan); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1785 | } |
| 1786 | |
Gustavo F. Padovan | 2c33c06 | 2011-12-14 13:02:51 -0200 | [diff] [blame] | 1787 | void hci_chan_list_flush(struct hci_conn *conn) |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1788 | { |
Andrei Emeltchenko | 2a5a5ec | 2012-02-02 10:32:18 +0200 | [diff] [blame] | 1789 | struct hci_chan *chan, *n; |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1790 | |
Andrei Emeltchenko | 38b3fef | 2012-06-15 11:50:28 +0300 | [diff] [blame] | 1791 | BT_DBG("hcon %p", conn); |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1792 | |
Andrei Emeltchenko | 2a5a5ec | 2012-02-02 10:32:18 +0200 | [diff] [blame] | 1793 | list_for_each_entry_safe(chan, n, &conn->chan_list, list) |
Luiz Augusto von Dentz | 73d80de | 2011-11-02 15:52:01 +0200 | [diff] [blame] | 1794 | hci_chan_del(chan); |
| 1795 | } |
Andrei Emeltchenko | 42c4e53 | 2012-10-10 17:38:28 +0300 | [diff] [blame] | 1796 | |
| 1797 | static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon, |
| 1798 | __u16 handle) |
| 1799 | { |
| 1800 | struct hci_chan *hchan; |
| 1801 | |
| 1802 | list_for_each_entry(hchan, &hcon->chan_list, list) { |
| 1803 | if (hchan->handle == handle) |
| 1804 | return hchan; |
| 1805 | } |
| 1806 | |
| 1807 | return NULL; |
| 1808 | } |
| 1809 | |
| 1810 | struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) |
| 1811 | { |
| 1812 | struct hci_conn_hash *h = &hdev->conn_hash; |
| 1813 | struct hci_conn *hcon; |
| 1814 | struct hci_chan *hchan = NULL; |
| 1815 | |
| 1816 | rcu_read_lock(); |
| 1817 | |
| 1818 | list_for_each_entry_rcu(hcon, &h->list, list) { |
| 1819 | hchan = __hci_chan_lookup_handle(hcon, handle); |
| 1820 | if (hchan) |
| 1821 | break; |
| 1822 | } |
| 1823 | |
| 1824 | rcu_read_unlock(); |
| 1825 | |
| 1826 | return hchan; |
| 1827 | } |
Luiz Augusto von Dentz | eab2404 | 2020-02-14 10:08:57 -0800 | [diff] [blame] | 1828 | |
| 1829 | u32 hci_conn_get_phy(struct hci_conn *conn) |
| 1830 | { |
| 1831 | u32 phys = 0; |
| 1832 | |
| 1833 | hci_dev_lock(conn->hdev); |
| 1834 | |
| 1835 | /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471: |
| 1836 | * Table 6.2: Packets defined for synchronous, asynchronous, and |
| 1837 | * CSB logical transport types. |
| 1838 | */ |
| 1839 | switch (conn->type) { |
| 1840 | case SCO_LINK: |
| 1841 | /* SCO logical transport (1 Mb/s): |
| 1842 | * HV1, HV2, HV3 and DV. |
| 1843 | */ |
| 1844 | phys |= BT_PHY_BR_1M_1SLOT; |
| 1845 | |
| 1846 | break; |
| 1847 | |
| 1848 | case ACL_LINK: |
| 1849 | /* ACL logical transport (1 Mb/s) ptt=0: |
| 1850 | * DH1, DM3, DH3, DM5 and DH5. |
| 1851 | */ |
| 1852 | phys |= BT_PHY_BR_1M_1SLOT; |
| 1853 | |
| 1854 | if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) |
| 1855 | phys |= BT_PHY_BR_1M_3SLOT; |
| 1856 | |
| 1857 | if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) |
| 1858 | phys |= BT_PHY_BR_1M_5SLOT; |
| 1859 | |
| 1860 | /* ACL logical transport (2 Mb/s) ptt=1: |
| 1861 | * 2-DH1, 2-DH3 and 2-DH5. |
| 1862 | */ |
| 1863 | if (!(conn->pkt_type & HCI_2DH1)) |
| 1864 | phys |= BT_PHY_EDR_2M_1SLOT; |
| 1865 | |
| 1866 | if (!(conn->pkt_type & HCI_2DH3)) |
| 1867 | phys |= BT_PHY_EDR_2M_3SLOT; |
| 1868 | |
| 1869 | if (!(conn->pkt_type & HCI_2DH5)) |
| 1870 | phys |= BT_PHY_EDR_2M_5SLOT; |
| 1871 | |
| 1872 | /* ACL logical transport (3 Mb/s) ptt=1: |
| 1873 | * 3-DH1, 3-DH3 and 3-DH5. |
| 1874 | */ |
| 1875 | if (!(conn->pkt_type & HCI_3DH1)) |
| 1876 | phys |= BT_PHY_EDR_3M_1SLOT; |
| 1877 | |
| 1878 | if (!(conn->pkt_type & HCI_3DH3)) |
| 1879 | phys |= BT_PHY_EDR_3M_3SLOT; |
| 1880 | |
| 1881 | if (!(conn->pkt_type & HCI_3DH5)) |
| 1882 | phys |= BT_PHY_EDR_3M_5SLOT; |
| 1883 | |
| 1884 | break; |
| 1885 | |
| 1886 | case ESCO_LINK: |
| 1887 | /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */ |
| 1888 | phys |= BT_PHY_BR_1M_1SLOT; |
| 1889 | |
| 1890 | if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) |
| 1891 | phys |= BT_PHY_BR_1M_3SLOT; |
| 1892 | |
| 1893 | /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ |
| 1894 | if (!(conn->pkt_type & ESCO_2EV3)) |
| 1895 | phys |= BT_PHY_EDR_2M_1SLOT; |
| 1896 | |
| 1897 | if (!(conn->pkt_type & ESCO_2EV5)) |
| 1898 | phys |= BT_PHY_EDR_2M_3SLOT; |
| 1899 | |
| 1900 | /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ |
| 1901 | if (!(conn->pkt_type & ESCO_3EV3)) |
| 1902 | phys |= BT_PHY_EDR_3M_1SLOT; |
| 1903 | |
| 1904 | if (!(conn->pkt_type & ESCO_3EV5)) |
| 1905 | phys |= BT_PHY_EDR_3M_3SLOT; |
| 1906 | |
| 1907 | break; |
| 1908 | |
| 1909 | case LE_LINK: |
| 1910 | if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) |
| 1911 | phys |= BT_PHY_LE_1M_TX; |
| 1912 | |
| 1913 | if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) |
| 1914 | phys |= BT_PHY_LE_1M_RX; |
| 1915 | |
| 1916 | if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) |
| 1917 | phys |= BT_PHY_LE_2M_TX; |
| 1918 | |
| 1919 | if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) |
| 1920 | phys |= BT_PHY_LE_2M_RX; |
| 1921 | |
| 1922 | if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) |
| 1923 | phys |= BT_PHY_LE_CODED_TX; |
| 1924 | |
| 1925 | if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) |
| 1926 | phys |= BT_PHY_LE_CODED_RX; |
| 1927 | |
| 1928 | break; |
| 1929 | } |
| 1930 | |
| 1931 | hci_dev_unlock(conn->hdev); |
| 1932 | |
| 1933 | return phys; |
| 1934 | } |