blob: 6a74097c50d341e42227e12e4c2fb03e982b7edb [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200368 }
369
Alain Michaud10873f92020-06-11 02:01:56 +0000370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
Johan Hedberg196a5e92015-11-22 18:55:44 +0200381/* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
384 *
385 * This function requires the caller holds hdev->lock.
386 */
387static void __hci_update_background_scan(struct hci_request *req)
388{
389 struct hci_dev *hdev = req->hdev;
390
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 return;
398
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 return;
402
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
405 return;
406
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
409 *
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
413 */
414 hci_discovery_filter_clear(hdev);
415
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200416 BT_DBG("%s ADV monitoring is %s", hdev->name,
417 hci_is_adv_monitoring(hdev) ? "on" : "off");
418
Johan Hedberg196a5e92015-11-22 18:55:44 +0200419 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200420 list_empty(&hdev->pend_le_reports) &&
421 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200422 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200423 * to be scanned for or no ADV monitors, we should stop the
424 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200425 */
426
427 /* If controller is not scanning we are done. */
428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 return;
430
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530431 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200432
433 BT_DBG("%s stopping background scanning", hdev->name);
434 } else {
435 /* If there is at least one pending LE connection, we should
436 * keep the background scan running.
437 */
438
439 /* If controller is connecting, we should not start scanning
440 * since some controllers are not able to scan and connect at
441 * the same time.
442 */
443 if (hci_lookup_le_connect(hdev))
444 return;
445
446 /* If controller is currently scanning, we stop it to ensure we
447 * don't miss any advertising (due to duplicates filter).
448 */
449 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530450 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200451
452 hci_req_add_le_passive_scan(req);
453
454 BT_DBG("%s starting background scanning", hdev->name);
455 }
456}
457
Johan Hedberg00cf5042015-11-25 16:15:41 +0200458void __hci_req_update_name(struct hci_request *req)
459{
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_local_name cp;
462
463 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464
465 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466}
467
Johan Hedbergb1a89172015-11-25 16:15:42 +0200468#define PNP_INFO_SVCLASS_ID 0x1200
469
470static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471{
472 u8 *ptr = data, *uuids_start = NULL;
473 struct bt_uuid *uuid;
474
475 if (len < 4)
476 return ptr;
477
478 list_for_each_entry(uuid, &hdev->uuids, list) {
479 u16 uuid16;
480
481 if (uuid->size != 16)
482 continue;
483
484 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 if (uuid16 < 0x1100)
486 continue;
487
488 if (uuid16 == PNP_INFO_SVCLASS_ID)
489 continue;
490
491 if (!uuids_start) {
492 uuids_start = ptr;
493 uuids_start[0] = 1;
494 uuids_start[1] = EIR_UUID16_ALL;
495 ptr += 2;
496 }
497
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u16) > len) {
500 uuids_start[1] = EIR_UUID16_SOME;
501 break;
502 }
503
504 *ptr++ = (uuid16 & 0x00ff);
505 *ptr++ = (uuid16 & 0xff00) >> 8;
506 uuids_start[0] += sizeof(uuid16);
507 }
508
509 return ptr;
510}
511
512static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513{
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
516
517 if (len < 6)
518 return ptr;
519
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 32)
522 continue;
523
524 if (!uuids_start) {
525 uuids_start = ptr;
526 uuids_start[0] = 1;
527 uuids_start[1] = EIR_UUID32_ALL;
528 ptr += 2;
529 }
530
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + sizeof(u32) > len) {
533 uuids_start[1] = EIR_UUID32_SOME;
534 break;
535 }
536
537 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 ptr += sizeof(u32);
539 uuids_start[0] += sizeof(u32);
540 }
541
542 return ptr;
543}
544
545static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546{
547 u8 *ptr = data, *uuids_start = NULL;
548 struct bt_uuid *uuid;
549
550 if (len < 18)
551 return ptr;
552
553 list_for_each_entry(uuid, &hdev->uuids, list) {
554 if (uuid->size != 128)
555 continue;
556
557 if (!uuids_start) {
558 uuids_start = ptr;
559 uuids_start[0] = 1;
560 uuids_start[1] = EIR_UUID128_ALL;
561 ptr += 2;
562 }
563
564 /* Stop if not enough space to put next UUID */
565 if ((ptr - data) + 16 > len) {
566 uuids_start[1] = EIR_UUID128_SOME;
567 break;
568 }
569
570 memcpy(ptr, uuid->uuid, 16);
571 ptr += 16;
572 uuids_start[0] += 16;
573 }
574
575 return ptr;
576}
577
578static void create_eir(struct hci_dev *hdev, u8 *data)
579{
580 u8 *ptr = data;
581 size_t name_len;
582
583 name_len = strlen(hdev->dev_name);
584
585 if (name_len > 0) {
586 /* EIR Data type */
587 if (name_len > 48) {
588 name_len = 48;
589 ptr[1] = EIR_NAME_SHORT;
590 } else
591 ptr[1] = EIR_NAME_COMPLETE;
592
593 /* EIR Data length */
594 ptr[0] = name_len + 1;
595
596 memcpy(ptr + 2, hdev->dev_name, name_len);
597
598 ptr += (name_len + 2);
599 }
600
601 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 ptr[0] = 2;
603 ptr[1] = EIR_TX_POWER;
604 ptr[2] = (u8) hdev->inq_tx_power;
605
606 ptr += 3;
607 }
608
609 if (hdev->devid_source > 0) {
610 ptr[0] = 9;
611 ptr[1] = EIR_DEVICE_ID;
612
613 put_unaligned_le16(hdev->devid_source, ptr + 2);
614 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 put_unaligned_le16(hdev->devid_product, ptr + 6);
616 put_unaligned_le16(hdev->devid_version, ptr + 8);
617
618 ptr += 10;
619 }
620
621 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624}
625
626void __hci_req_update_eir(struct hci_request *req)
627{
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_write_eir cp;
630
631 if (!hdev_is_powered(hdev))
632 return;
633
634 if (!lmp_ext_inq_capable(hdev))
635 return;
636
637 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 return;
639
640 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 return;
642
643 memset(&cp, 0, sizeof(cp));
644
645 create_eir(hdev, cp.data);
646
647 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 return;
649
650 memcpy(hdev->eir, cp.data, sizeof(cp.data));
651
652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653}
654
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530655void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200656{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200658
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700659 if (hdev->scanning_paused) {
660 bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 return;
662 }
663
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530664 if (use_ext_scan(hdev)) {
665 struct hci_cp_le_set_ext_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 &cp);
671 } else {
672 struct hci_cp_le_set_scan_enable cp;
673
674 memset(&cp, 0, sizeof(cp));
675 cp.enable = LE_SCAN_DISABLE;
676 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530678
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530679 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530680 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530681 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530682 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530683 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530684
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530685 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
686 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200687}
688
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700689static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
690 u8 bdaddr_type)
691{
692 struct hci_cp_le_del_from_white_list cp;
693
694 cp.bdaddr_type = bdaddr_type;
695 bacpy(&cp.bdaddr, bdaddr);
696
697 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
698 cp.bdaddr_type);
699 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530700
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530701 if (use_ll_privacy(req->hdev) &&
702 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530703 struct smp_irk *irk;
704
705 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
706 if (irk) {
707 struct hci_cp_le_del_from_resolv_list cp;
708
709 cp.bdaddr_type = bdaddr_type;
710 bacpy(&cp.bdaddr, bdaddr);
711
712 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
713 sizeof(cp), &cp);
714 }
715 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700716}
717
718/* Adds connection to white list if needed. On error, returns -1. */
719static int add_to_white_list(struct hci_request *req,
720 struct hci_conn_params *params, u8 *num_entries,
721 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200722{
723 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700724 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200725
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700726 /* Already in white list */
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
728 params->addr_type))
729 return 0;
730
731 /* Select filter policy to accept all advertising */
732 if (*num_entries >= hdev->le_white_list_size)
733 return -1;
734
735 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530736 if (!allow_rpa &&
737 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700738 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
739 return -1;
740 }
741
742 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200743 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
744 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700745 return 0;
746
747 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200748 cp.bdaddr_type = params->addr_type;
749 bacpy(&cp.bdaddr, &params->addr);
750
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700751 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
752 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200753 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700754
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530755 if (use_ll_privacy(hdev) &&
756 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530757 struct smp_irk *irk;
758
759 irk = hci_find_irk_by_addr(hdev, &params->addr,
760 params->addr_type);
761 if (irk) {
762 struct hci_cp_le_add_to_resolv_list cp;
763
764 cp.bdaddr_type = params->addr_type;
765 bacpy(&cp.bdaddr, &params->addr);
766 memcpy(cp.peer_irk, irk->val, 16);
767
768 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
769 memcpy(cp.local_irk, hdev->irk, 16);
770 else
771 memset(cp.local_irk, 0, 16);
772
773 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
774 sizeof(cp), &cp);
775 }
776 }
777
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700778 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200779}
780
781static u8 update_white_list(struct hci_request *req)
782{
783 struct hci_dev *hdev = req->hdev;
784 struct hci_conn_params *params;
785 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700786 u8 num_entries = 0;
787 bool pend_conn, pend_report;
788 /* We allow whitelisting even with RPAs in suspend. In the worst case,
789 * we won't be able to wake from devices that use the privacy1.2
790 * features. Additionally, once we support privacy1.2 and IRK
791 * offloading, we can update this to also check for those conditions.
792 */
793 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200794
795 /* Go through the current white list programmed into the
796 * controller one by one and check if that address is still
797 * in the list of pending connections or list of devices to
798 * report. If not present in either list, then queue the
799 * command to remove it from the controller.
800 */
801 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700802 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
803 &b->bdaddr,
804 b->bdaddr_type);
805 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
806 &b->bdaddr,
807 b->bdaddr_type);
808
809 /* If the device is not likely to connect or report,
810 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500811 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700812 if (!pend_conn && !pend_report) {
813 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200814 continue;
815 }
816
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700817 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530818 if (!allow_rpa &&
819 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700820 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500821 return 0x00;
822 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200823
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700824 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200825 }
826
827 /* Since all no longer valid white list entries have been
828 * removed, walk through the list of pending connections
829 * and ensure that any new device gets programmed into
830 * the controller.
831 *
832 * If the list of the devices is larger than the list of
833 * available white list entries in the controller, then
834 * just abort and return filer policy value to not use the
835 * white list.
836 */
837 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700838 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200839 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200840 }
841
842 /* After adding all new pending connections, walk through
843 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700844 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200845 */
846 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700847 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200848 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200849 }
850
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200851 /* Once the controller offloading of advertisement monitor is in place,
852 * the if condition should include the support of MSFT extension
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700853 * support. If suspend is ongoing, whitelist should be the default to
854 * prevent waking by random advertisements.
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200855 */
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700856 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200857 return 0x00;
858
Johan Hedberg0857dd32014-12-19 13:40:20 +0200859 /* Select filter policy to use white list */
860 return 0x01;
861}
862
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200863static bool scan_use_rpa(struct hci_dev *hdev)
864{
865 return hci_dev_test_flag(hdev, HCI_PRIVACY);
866}
867
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530868static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530869 u16 window, u8 own_addr_type, u8 filter_policy,
870 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200871{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530872 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530873
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700874 if (hdev->scanning_paused) {
875 bt_dev_dbg(hdev, "Scanning is paused for suspend");
876 return;
877 }
878
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530879 if (use_ll_privacy(hdev) &&
880 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
881 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530882 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530883
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530884 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
885 }
886
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530887 /* Use ext scanning if set ext scan param and ext scan enable is
888 * supported
889 */
890 if (use_ext_scan(hdev)) {
891 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
892 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
893 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530894 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
895 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530896
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530897 ext_param_cp = (void *)data;
898 phy_params = (void *)ext_param_cp->data;
899
900 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
901 ext_param_cp->own_addr_type = own_addr_type;
902 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530903
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530904 plen = sizeof(*ext_param_cp);
905
906 if (scan_1m(hdev) || scan_2m(hdev)) {
907 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
908
909 memset(phy_params, 0, sizeof(*phy_params));
910 phy_params->type = type;
911 phy_params->interval = cpu_to_le16(interval);
912 phy_params->window = cpu_to_le16(window);
913
914 plen += sizeof(*phy_params);
915 phy_params++;
916 }
917
918 if (scan_coded(hdev)) {
919 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
920
921 memset(phy_params, 0, sizeof(*phy_params));
922 phy_params->type = type;
923 phy_params->interval = cpu_to_le16(interval);
924 phy_params->window = cpu_to_le16(window);
925
926 plen += sizeof(*phy_params);
927 phy_params++;
928 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530929
930 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530931 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530932
933 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
934 ext_enable_cp.enable = LE_SCAN_ENABLE;
935 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
936
937 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
938 sizeof(ext_enable_cp), &ext_enable_cp);
939 } else {
940 struct hci_cp_le_set_scan_param param_cp;
941 struct hci_cp_le_set_scan_enable enable_cp;
942
943 memset(&param_cp, 0, sizeof(param_cp));
944 param_cp.type = type;
945 param_cp.interval = cpu_to_le16(interval);
946 param_cp.window = cpu_to_le16(window);
947 param_cp.own_address_type = own_addr_type;
948 param_cp.filter_policy = filter_policy;
949 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
950 &param_cp);
951
952 memset(&enable_cp, 0, sizeof(enable_cp));
953 enable_cp.enable = LE_SCAN_ENABLE;
954 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
955 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
956 &enable_cp);
957 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530958}
959
Alain Michaud9a9373f2020-07-31 01:05:34 +0000960/* Returns true if an le connection is in the scanning state */
961static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
962{
963 struct hci_conn_hash *h = &hdev->conn_hash;
964 struct hci_conn *c;
965
966 rcu_read_lock();
967
968 list_for_each_entry_rcu(c, &h->list, list) {
969 if (c->type == LE_LINK && c->state == BT_CONNECT &&
970 test_bit(HCI_CONN_SCANNING, &c->flags)) {
971 rcu_read_unlock();
972 return true;
973 }
974 }
975
976 rcu_read_unlock();
977
978 return false;
979}
980
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530981/* Ensure to call hci_req_add_le_scan_disable() first to disable the
982 * controller based address resolution to be able to reconfigure
983 * resolving list.
984 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530985void hci_req_add_le_passive_scan(struct hci_request *req)
986{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200987 struct hci_dev *hdev = req->hdev;
988 u8 own_addr_type;
989 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700990 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530991 /* Background scanning should run with address resolution */
992 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700993
994 if (hdev->scanning_paused) {
995 bt_dev_dbg(hdev, "Scanning is paused for suspend");
996 return;
997 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200998
999 /* Set require_privacy to false since no SCAN_REQ are send
1000 * during passive scanning. Not using an non-resolvable address
1001 * here is important so that peer devices using direct
1002 * advertising with our address will be correctly reported
1003 * by the controller.
1004 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001005 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1006 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001007 return;
1008
1009 /* Adding or removing entries from the white list must
1010 * happen before enabling scanning. The controller does
1011 * not allow white list modification while scanning.
1012 */
1013 filter_policy = update_white_list(req);
1014
1015 /* When the controller is using random resolvable addresses and
1016 * with that having LE privacy enabled, then controllers with
1017 * Extended Scanner Filter Policies support can now enable support
1018 * for handling directed advertising.
1019 *
1020 * So instead of using filter polices 0x00 (no whitelist)
1021 * and 0x01 (whitelist enabled) use the new filter policies
1022 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1023 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001024 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001025 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1026 filter_policy |= 0x02;
1027
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001028 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +00001029 window = hdev->le_scan_window_suspend;
1030 interval = hdev->le_scan_int_suspend;
Alain Michaud9a9373f2020-07-31 01:05:34 +00001031 } else if (hci_is_le_conn_scanning(hdev)) {
1032 window = hdev->le_scan_window_connect;
1033 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +08001034 } else if (hci_is_adv_monitoring(hdev)) {
1035 window = hdev->le_scan_window_adv_monitor;
1036 interval = hdev->le_scan_int_adv_monitor;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001037 } else {
1038 window = hdev->le_scan_window;
1039 interval = hdev->le_scan_interval;
1040 }
1041
1042 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1043 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301044 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001045}
1046
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301047static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1048{
1049 struct adv_info *adv_instance;
1050
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001051 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301052 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001053 return 1;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301054
1055 adv_instance = hci_find_adv_instance(hdev, instance);
1056 if (!adv_instance)
1057 return 0;
1058
1059 /* TODO: Take into account the "appearance" and "local-name" flags here.
1060 * These are currently being ignored as they are not supported.
1061 */
1062 return adv_instance->scan_rsp_len;
1063}
1064
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001065static void hci_req_clear_event_filter(struct hci_request *req)
1066{
1067 struct hci_cp_set_event_filter f;
1068
1069 memset(&f, 0, sizeof(f));
1070 f.flt_type = HCI_FLT_CLEAR_ALL;
1071 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1072
1073 /* Update page scan state (since we may have modified it when setting
1074 * the event filter).
1075 */
1076 __hci_req_update_scan(req);
1077}
1078
1079static void hci_req_set_event_filter(struct hci_request *req)
1080{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001081 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001082 struct hci_cp_set_event_filter f;
1083 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001084 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001085
1086 /* Always clear event filter when starting */
1087 hci_req_clear_event_filter(req);
1088
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001089 list_for_each_entry(b, &hdev->whitelist, list) {
1090 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1091 b->current_flags))
1092 continue;
1093
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001094 memset(&f, 0, sizeof(f));
1095 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1096 f.flt_type = HCI_FLT_CONN_SETUP;
1097 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1098 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1099
1100 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1101 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001102 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001103 }
1104
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001105 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1106}
1107
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001108static void hci_req_config_le_suspend_scan(struct hci_request *req)
1109{
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001110 /* Before changing params disable scan if enabled */
1111 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301112 hci_req_add_le_scan_disable(req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001113
1114 /* Configure params and enable scanning */
1115 hci_req_add_le_passive_scan(req);
1116
1117 /* Block suspend notifier on response */
1118 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1119}
1120
Daniel Winkler53274472020-09-15 14:14:27 -07001121static void cancel_adv_timeout(struct hci_dev *hdev)
1122{
1123 if (hdev->adv_instance_timeout) {
1124 hdev->adv_instance_timeout = 0;
1125 cancel_delayed_work(&hdev->adv_instance_expire);
1126 }
1127}
1128
1129/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001130void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001131{
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001132 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -07001133
1134 /* Call to disable any advertisements active on the controller.
1135 * This will succeed even if no advertisements are configured.
1136 */
1137 __hci_req_disable_advertising(req);
1138
1139 /* If we are using software rotation, pause the loop */
1140 if (!ext_adv_capable(req->hdev))
1141 cancel_adv_timeout(req->hdev);
1142}
1143
1144/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001145static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001146{
1147 struct adv_info *adv;
1148
1149 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1150
1151 if (ext_adv_capable(req->hdev)) {
1152 /* Call for each tracked instance to be re-enabled */
1153 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1154 __hci_req_enable_ext_advertising(req,
1155 adv->instance);
1156 }
1157
1158 } else {
1159 /* Schedule for most recent instance to be restarted and begin
1160 * the software rotation loop
1161 */
1162 __hci_req_schedule_adv_instance(req,
1163 req->hdev->cur_adv_instance,
1164 true);
1165 }
1166}
1167
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001168/* This function requires the caller holds hdev->lock */
1169int hci_req_resume_adv_instances(struct hci_dev *hdev)
1170{
1171 struct hci_request req;
1172
1173 hci_req_init(&req, hdev);
1174 __hci_req_resume_adv_instances(&req);
1175
1176 return hci_req_run(&req, NULL);
1177}
1178
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001179static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1180{
1181 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1182 status);
1183 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1184 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1185 wake_up(&hdev->suspend_wait_q);
1186 }
1187}
1188
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001189/* Call with hci_dev_lock */
1190void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1191{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001192 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001193 struct hci_conn *conn;
1194 struct hci_request req;
1195 u8 page_scan;
1196 int disconnect_counter;
1197
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001198 if (next == hdev->suspend_state) {
1199 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1200 goto done;
1201 }
1202
1203 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001204 hci_req_init(&req, hdev);
1205
1206 if (next == BT_SUSPEND_DISCONNECT) {
1207 /* Mark device as suspended */
1208 hdev->suspended = true;
1209
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001210 /* Pause discovery if not already stopped */
1211 old_state = hdev->discovery.state;
1212 if (old_state != DISCOVERY_STOPPED) {
1213 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1214 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1215 queue_work(hdev->req_workqueue, &hdev->discov_update);
1216 }
1217
1218 hdev->discovery_paused = true;
1219 hdev->discovery_old_state = old_state;
1220
Daniel Winkler53274472020-09-15 14:14:27 -07001221 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001222 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1223 if (old_state) {
1224 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1225 cancel_delayed_work(&hdev->discov_off);
1226 queue_delayed_work(hdev->req_workqueue,
1227 &hdev->discov_off, 0);
1228 }
1229
Daniel Winkler53274472020-09-15 14:14:27 -07001230 /* Pause other advertisements */
1231 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001232 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001233
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001234 hdev->advertising_paused = true;
1235 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001236 /* Disable page scan */
1237 page_scan = SCAN_DISABLED;
1238 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1239
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001240 /* Disable LE passive scan if enabled */
1241 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301242 hci_req_add_le_scan_disable(&req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001243
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001244 /* Mark task needing completion */
1245 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1246
1247 /* Prevent disconnects from causing scanning to be re-enabled */
1248 hdev->scanning_paused = true;
1249
1250 /* Run commands before disconnecting */
1251 hci_req_run(&req, suspend_req_complete);
1252
1253 disconnect_counter = 0;
1254 /* Soft disconnect everything (power off) */
1255 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1256 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1257 disconnect_counter++;
1258 }
1259
1260 if (disconnect_counter > 0) {
1261 bt_dev_dbg(hdev,
1262 "Had %d disconnects. Will wait on them",
1263 disconnect_counter);
1264 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1265 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001266 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001267 /* Unpause to take care of updating scanning params */
1268 hdev->scanning_paused = false;
1269 /* Enable event filter for paired devices */
1270 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001271 /* Enable passive scan at lower duty cycle */
1272 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001273 /* Pause scan changes again. */
1274 hdev->scanning_paused = true;
1275 hci_req_run(&req, suspend_req_complete);
1276 } else {
1277 hdev->suspended = false;
1278 hdev->scanning_paused = false;
1279
1280 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001281 /* Reset passive/background scanning to normal */
1282 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001283
Daniel Winkler53274472020-09-15 14:14:27 -07001284 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001285 hdev->advertising_paused = false;
1286 if (hdev->advertising_old_state) {
1287 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1288 hdev->suspend_tasks);
1289 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1290 queue_work(hdev->req_workqueue,
1291 &hdev->discoverable_update);
1292 hdev->advertising_old_state = 0;
1293 }
1294
Daniel Winkler53274472020-09-15 14:14:27 -07001295 /* Resume other advertisements */
1296 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001297 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001298
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001299 /* Unpause discovery */
1300 hdev->discovery_paused = false;
1301 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1302 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1303 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1304 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1305 queue_work(hdev->req_workqueue, &hdev->discov_update);
1306 }
1307
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001308 hci_req_run(&req, suspend_req_complete);
1309 }
1310
1311 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001312
1313done:
1314 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1315 wake_up(&hdev->suspend_wait_q);
1316}
1317
Johan Hedbergf2252572015-11-18 12:49:20 +02001318static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1319{
Johan Hedbergcab054a2015-11-30 11:21:45 +02001320 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001321 struct adv_info *adv_instance;
1322
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001323 /* Instance 0x00 always set local name */
Johan Hedbergf2252572015-11-18 12:49:20 +02001324 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001325 return 1;
Johan Hedbergf2252572015-11-18 12:49:20 +02001326
1327 adv_instance = hci_find_adv_instance(hdev, instance);
1328 if (!adv_instance)
1329 return 0;
1330
1331 /* TODO: Take into account the "appearance" and "local-name" flags here.
1332 * These are currently being ignored as they are not supported.
1333 */
1334 return adv_instance->scan_rsp_len;
1335}
1336
1337void __hci_req_disable_advertising(struct hci_request *req)
1338{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301339 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001340 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001341
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301342 } else {
1343 u8 enable = 0x00;
1344
1345 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1346 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001347}
1348
1349static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1350{
1351 u32 flags;
1352 struct adv_info *adv_instance;
1353
1354 if (instance == 0x00) {
1355 /* Instance 0 always manages the "Tx Power" and "Flags"
1356 * fields
1357 */
1358 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1359
1360 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1361 * corresponds to the "connectable" instance flag.
1362 */
1363 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1364 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1365
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001366 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1367 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1368 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001369 flags |= MGMT_ADV_FLAG_DISCOV;
1370
Johan Hedbergf2252572015-11-18 12:49:20 +02001371 return flags;
1372 }
1373
1374 adv_instance = hci_find_adv_instance(hdev, instance);
1375
1376 /* Return 0 when we got an invalid instance identifier. */
1377 if (!adv_instance)
1378 return 0;
1379
1380 return adv_instance->flags;
1381}
1382
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001383static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1384{
1385 /* If privacy is not enabled don't use RPA */
1386 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1387 return false;
1388
1389 /* If basic privacy mode is enabled use RPA */
1390 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1391 return true;
1392
1393 /* If limited privacy mode is enabled don't use RPA if we're
1394 * both discoverable and bondable.
1395 */
1396 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1397 hci_dev_test_flag(hdev, HCI_BONDABLE))
1398 return false;
1399
1400 /* We're neither bondable nor discoverable in the limited
1401 * privacy mode, therefore use RPA.
1402 */
1403 return true;
1404}
1405
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001406static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1407{
1408 /* If there is no connection we are OK to advertise. */
1409 if (hci_conn_num(hdev, LE_LINK) == 0)
1410 return true;
1411
1412 /* Check le_states if there is any connection in slave role. */
1413 if (hdev->conn_hash.le_num_slave > 0) {
1414 /* Slave connection state and non connectable mode bit 20. */
1415 if (!connectable && !(hdev->le_states[2] & 0x10))
1416 return false;
1417
1418 /* Slave connection state and connectable mode bit 38
1419 * and scannable bit 21.
1420 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001421 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1422 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001423 return false;
1424 }
1425
1426 /* Check le_states if there is any connection in master role. */
1427 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1428 /* Master connection state and non connectable mode bit 18. */
1429 if (!connectable && !(hdev->le_states[2] & 0x02))
1430 return false;
1431
1432 /* Master connection state and connectable mode bit 35 and
1433 * scannable 19.
1434 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001435 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001436 !(hdev->le_states[2] & 0x08)))
1437 return false;
1438 }
1439
1440 return true;
1441}
1442
Johan Hedbergf2252572015-11-18 12:49:20 +02001443void __hci_req_enable_advertising(struct hci_request *req)
1444{
1445 struct hci_dev *hdev = req->hdev;
1446 struct hci_cp_le_set_adv_param cp;
1447 u8 own_addr_type, enable = 0x01;
1448 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301449 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001450 u32 flags;
1451
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001452 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1453
1454 /* If the "connectable" instance flag was not set, then choose between
1455 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1456 */
1457 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1458 mgmt_get_connectable(hdev);
1459
1460 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001461 return;
1462
1463 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1464 __hci_req_disable_advertising(req);
1465
1466 /* Clear the HCI_LE_ADV bit temporarily so that the
1467 * hci_update_random_address knows that it's safe to go ahead
1468 * and write a new random address. The flag will be set back on
1469 * as soon as the SET_ADV_ENABLE HCI command completes.
1470 */
1471 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1472
Johan Hedbergf2252572015-11-18 12:49:20 +02001473 /* Set require_privacy to true only when non-connectable
1474 * advertising is used. In that case it is fine to use a
1475 * non-resolvable private address.
1476 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001477 if (hci_update_random_address(req, !connectable,
1478 adv_use_rpa(hdev, flags),
1479 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001480 return;
1481
1482 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001483
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301484 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001485 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001486
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301487 adv_min_interval = hdev->le_adv_min_interval;
1488 adv_max_interval = hdev->le_adv_max_interval;
1489 } else {
1490 if (get_cur_adv_instance_scan_rsp_len(hdev))
1491 cp.type = LE_ADV_SCAN_IND;
1492 else
1493 cp.type = LE_ADV_NONCONN_IND;
1494
1495 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1496 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1497 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1498 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1499 } else {
1500 adv_min_interval = hdev->le_adv_min_interval;
1501 adv_max_interval = hdev->le_adv_max_interval;
1502 }
1503 }
1504
1505 cp.min_interval = cpu_to_le16(adv_min_interval);
1506 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001507 cp.own_address_type = own_addr_type;
1508 cp.channel_map = hdev->le_adv_channel_map;
1509
1510 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1511
1512 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1513}
1514
Michał Narajowskif61851f2016-10-19 10:20:27 +02001515u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001516{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001517 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001518 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001519
Michał Narajowskif61851f2016-10-19 10:20:27 +02001520 /* no space left for name (+ NULL + type + len) */
1521 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1522 return ad_len;
1523
1524 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001525 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001526 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001527 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001528 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001529
Michał Narajowskif61851f2016-10-19 10:20:27 +02001530 /* use short name if present */
1531 short_len = strlen(hdev->short_name);
1532 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001533 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001534 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001535
Michał Narajowskif61851f2016-10-19 10:20:27 +02001536 /* use shortened full name if present, we already know that name
1537 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1538 */
1539 if (complete_len) {
1540 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1541
1542 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1543 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1544
1545 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1546 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001547 }
1548
1549 return ad_len;
1550}
1551
Michał Narajowski1b422062016-10-05 12:28:27 +02001552static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1553{
1554 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1555}
1556
Michał Narajowski7c295c42016-09-18 12:50:02 +02001557static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1558{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001559 u8 scan_rsp_len = 0;
1560
1561 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001562 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001563 }
1564
Michał Narajowski1b422062016-10-05 12:28:27 +02001565 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001566}
1567
Johan Hedbergf2252572015-11-18 12:49:20 +02001568static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1569 u8 *ptr)
1570{
1571 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001572 u32 instance_flags;
1573 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001574
1575 adv_instance = hci_find_adv_instance(hdev, instance);
1576 if (!adv_instance)
1577 return 0;
1578
Michał Narajowski7c295c42016-09-18 12:50:02 +02001579 instance_flags = adv_instance->flags;
1580
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001581 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001582 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001583 }
1584
Michał Narajowski1b422062016-10-05 12:28:27 +02001585 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001586 adv_instance->scan_rsp_len);
1587
Michał Narajowski7c295c42016-09-18 12:50:02 +02001588 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001589
1590 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1591 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1592
1593 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001594}
1595
Johan Hedbergcab054a2015-11-30 11:21:45 +02001596void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001597{
1598 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001599 u8 len;
1600
1601 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1602 return;
1603
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301604 if (ext_adv_capable(hdev)) {
1605 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001606
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301607 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001608
Abhishek Pandit-Subedi6baf8a62020-09-11 15:32:20 -07001609 /* Extended scan response data doesn't allow a response to be
1610 * set if the instance isn't scannable.
1611 */
1612 if (get_adv_instance_scan_rsp_len(hdev, instance))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301613 len = create_instance_scan_rsp_data(hdev, instance,
1614 cp.data);
1615 else
Abhishek Pandit-Subedi6baf8a62020-09-11 15:32:20 -07001616 len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001617
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301618 if (hdev->scan_rsp_data_len == len &&
1619 !memcmp(cp.data, hdev->scan_rsp_data, len))
1620 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001621
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301622 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1623 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001624
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001625 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301626 cp.length = len;
1627 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1628 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1629
1630 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1631 &cp);
1632 } else {
1633 struct hci_cp_le_set_scan_rsp_data cp;
1634
1635 memset(&cp, 0, sizeof(cp));
1636
1637 if (instance)
1638 len = create_instance_scan_rsp_data(hdev, instance,
1639 cp.data);
1640 else
1641 len = create_default_scan_rsp_data(hdev, cp.data);
1642
1643 if (hdev->scan_rsp_data_len == len &&
1644 !memcmp(cp.data, hdev->scan_rsp_data, len))
1645 return;
1646
1647 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1648 hdev->scan_rsp_data_len = len;
1649
1650 cp.length = len;
1651
1652 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1653 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001654}
1655
Johan Hedbergf2252572015-11-18 12:49:20 +02001656static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1657{
1658 struct adv_info *adv_instance = NULL;
1659 u8 ad_len = 0, flags = 0;
1660 u32 instance_flags;
1661
1662 /* Return 0 when the current instance identifier is invalid. */
1663 if (instance) {
1664 adv_instance = hci_find_adv_instance(hdev, instance);
1665 if (!adv_instance)
1666 return 0;
1667 }
1668
1669 instance_flags = get_adv_instance_flags(hdev, instance);
1670
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001671 /* If instance already has the flags set skip adding it once
1672 * again.
1673 */
1674 if (adv_instance && eir_get_data(adv_instance->adv_data,
1675 adv_instance->adv_data_len, EIR_FLAGS,
1676 NULL))
1677 goto skip_flags;
1678
Johan Hedbergf2252572015-11-18 12:49:20 +02001679 /* The Add Advertising command allows userspace to set both the general
1680 * and limited discoverable flags.
1681 */
1682 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1683 flags |= LE_AD_GENERAL;
1684
1685 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1686 flags |= LE_AD_LIMITED;
1687
Johan Hedbergf18ba582016-04-06 13:09:05 +03001688 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1689 flags |= LE_AD_NO_BREDR;
1690
Johan Hedbergf2252572015-11-18 12:49:20 +02001691 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1692 /* If a discovery flag wasn't provided, simply use the global
1693 * settings.
1694 */
1695 if (!flags)
1696 flags |= mgmt_get_adv_discov_flags(hdev);
1697
Johan Hedbergf2252572015-11-18 12:49:20 +02001698 /* If flags would still be empty, then there is no need to
1699 * include the "Flags" AD field".
1700 */
1701 if (flags) {
1702 ptr[0] = 0x02;
1703 ptr[1] = EIR_FLAGS;
1704 ptr[2] = flags;
1705
1706 ad_len += 3;
1707 ptr += 3;
1708 }
1709 }
1710
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001711skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001712 if (adv_instance) {
1713 memcpy(ptr, adv_instance->adv_data,
1714 adv_instance->adv_data_len);
1715 ad_len += adv_instance->adv_data_len;
1716 ptr += adv_instance->adv_data_len;
1717 }
1718
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301719 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1720 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001721
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301722 if (ext_adv_capable(hdev)) {
1723 if (adv_instance)
1724 adv_tx_power = adv_instance->tx_power;
1725 else
1726 adv_tx_power = hdev->adv_tx_power;
1727 } else {
1728 adv_tx_power = hdev->adv_tx_power;
1729 }
1730
1731 /* Provide Tx Power only if we can provide a valid value for it */
1732 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1733 ptr[0] = 0x02;
1734 ptr[1] = EIR_TX_POWER;
1735 ptr[2] = (u8)adv_tx_power;
1736
1737 ad_len += 3;
1738 ptr += 3;
1739 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001740 }
1741
1742 return ad_len;
1743}
1744
Johan Hedbergcab054a2015-11-30 11:21:45 +02001745void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001746{
1747 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001748 u8 len;
1749
1750 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1751 return;
1752
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301753 if (ext_adv_capable(hdev)) {
1754 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001755
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301756 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001757
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301758 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001759
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301760 /* There's nothing to do if the data hasn't changed */
1761 if (hdev->adv_data_len == len &&
1762 memcmp(cp.data, hdev->adv_data, len) == 0)
1763 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001764
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301765 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1766 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001767
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301768 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001769 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301770 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1771 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1772
1773 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1774 } else {
1775 struct hci_cp_le_set_adv_data cp;
1776
1777 memset(&cp, 0, sizeof(cp));
1778
1779 len = create_instance_adv_data(hdev, instance, cp.data);
1780
1781 /* There's nothing to do if the data hasn't changed */
1782 if (hdev->adv_data_len == len &&
1783 memcmp(cp.data, hdev->adv_data, len) == 0)
1784 return;
1785
1786 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1787 hdev->adv_data_len = len;
1788
1789 cp.length = len;
1790
1791 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1792 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001793}
1794
Johan Hedbergcab054a2015-11-30 11:21:45 +02001795int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001796{
1797 struct hci_request req;
1798
1799 hci_req_init(&req, hdev);
1800 __hci_req_update_adv_data(&req, instance);
1801
1802 return hci_req_run(&req, NULL);
1803}
1804
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301805static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1806 u16 opcode)
1807{
1808 BT_DBG("%s status %u", hdev->name, status);
1809}
1810
1811void hci_req_disable_address_resolution(struct hci_dev *hdev)
1812{
1813 struct hci_request req;
1814 __u8 enable = 0x00;
1815
1816 if (!use_ll_privacy(hdev) &&
1817 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1818 return;
1819
1820 hci_req_init(&req, hdev);
1821
1822 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1823
1824 hci_req_run(&req, enable_addr_resolution_complete);
1825}
1826
Johan Hedbergf2252572015-11-18 12:49:20 +02001827static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1828{
1829 BT_DBG("%s status %u", hdev->name, status);
1830}
1831
1832void hci_req_reenable_advertising(struct hci_dev *hdev)
1833{
1834 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001835
1836 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001837 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001838 return;
1839
Johan Hedbergf2252572015-11-18 12:49:20 +02001840 hci_req_init(&req, hdev);
1841
Johan Hedbergcab054a2015-11-30 11:21:45 +02001842 if (hdev->cur_adv_instance) {
1843 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1844 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001845 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301846 if (ext_adv_capable(hdev)) {
1847 __hci_req_start_ext_adv(&req, 0x00);
1848 } else {
1849 __hci_req_update_adv_data(&req, 0x00);
1850 __hci_req_update_scan_rsp_data(&req, 0x00);
1851 __hci_req_enable_advertising(&req);
1852 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001853 }
1854
1855 hci_req_run(&req, adv_enable_complete);
1856}
1857
1858static void adv_timeout_expire(struct work_struct *work)
1859{
1860 struct hci_dev *hdev = container_of(work, struct hci_dev,
1861 adv_instance_expire.work);
1862
1863 struct hci_request req;
1864 u8 instance;
1865
1866 BT_DBG("%s", hdev->name);
1867
1868 hci_dev_lock(hdev);
1869
1870 hdev->adv_instance_timeout = 0;
1871
Johan Hedbergcab054a2015-11-30 11:21:45 +02001872 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001873 if (instance == 0x00)
1874 goto unlock;
1875
1876 hci_req_init(&req, hdev);
1877
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001878 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001879
1880 if (list_empty(&hdev->adv_instances))
1881 __hci_req_disable_advertising(&req);
1882
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001883 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001884
1885unlock:
1886 hci_dev_unlock(hdev);
1887}
1888
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301889int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1890 bool use_rpa, struct adv_info *adv_instance,
1891 u8 *own_addr_type, bdaddr_t *rand_addr)
1892{
1893 int err;
1894
1895 bacpy(rand_addr, BDADDR_ANY);
1896
1897 /* If privacy is enabled use a resolvable private address. If
1898 * current RPA has expired then generate a new one.
1899 */
1900 if (use_rpa) {
1901 int to;
1902
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301903 /* If Controller supports LL Privacy use own address type is
1904 * 0x03
1905 */
1906 if (use_ll_privacy(hdev))
1907 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1908 else
1909 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301910
1911 if (adv_instance) {
1912 if (!adv_instance->rpa_expired &&
1913 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1914 return 0;
1915
1916 adv_instance->rpa_expired = false;
1917 } else {
1918 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1919 !bacmp(&hdev->random_addr, &hdev->rpa))
1920 return 0;
1921 }
1922
1923 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1924 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001925 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301926 return err;
1927 }
1928
1929 bacpy(rand_addr, &hdev->rpa);
1930
1931 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1932 if (adv_instance)
1933 queue_delayed_work(hdev->workqueue,
1934 &adv_instance->rpa_expired_cb, to);
1935 else
1936 queue_delayed_work(hdev->workqueue,
1937 &hdev->rpa_expired, to);
1938
1939 return 0;
1940 }
1941
1942 /* In case of required privacy without resolvable private address,
1943 * use an non-resolvable private address. This is useful for
1944 * non-connectable advertising.
1945 */
1946 if (require_privacy) {
1947 bdaddr_t nrpa;
1948
1949 while (true) {
1950 /* The non-resolvable private address is generated
1951 * from random six bytes with the two most significant
1952 * bits cleared.
1953 */
1954 get_random_bytes(&nrpa, 6);
1955 nrpa.b[5] &= 0x3f;
1956
1957 /* The non-resolvable private address shall not be
1958 * equal to the public address.
1959 */
1960 if (bacmp(&hdev->bdaddr, &nrpa))
1961 break;
1962 }
1963
1964 *own_addr_type = ADDR_LE_DEV_RANDOM;
1965 bacpy(rand_addr, &nrpa);
1966
1967 return 0;
1968 }
1969
1970 /* No privacy so use a public address. */
1971 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1972
1973 return 0;
1974}
1975
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301976void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1977{
1978 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1979}
1980
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301981int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301982{
1983 struct hci_cp_le_set_ext_adv_params cp;
1984 struct hci_dev *hdev = req->hdev;
1985 bool connectable;
1986 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301987 bdaddr_t random_addr;
1988 u8 own_addr_type;
1989 int err;
1990 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301991 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301992
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301993 if (instance > 0) {
1994 adv_instance = hci_find_adv_instance(hdev, instance);
1995 if (!adv_instance)
1996 return -EINVAL;
1997 } else {
1998 adv_instance = NULL;
1999 }
2000
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302001 flags = get_adv_instance_flags(hdev, instance);
2002
2003 /* If the "connectable" instance flag was not set, then choose between
2004 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2005 */
2006 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2007 mgmt_get_connectable(hdev);
2008
Colin Ian King75edd1f2018-11-09 13:27:36 +00002009 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302010 return -EPERM;
2011
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302012 /* Set require_privacy to true only when non-connectable
2013 * advertising is used. In that case it is fine to use a
2014 * non-resolvable private address.
2015 */
2016 err = hci_get_random_address(hdev, !connectable,
2017 adv_use_rpa(hdev, flags), adv_instance,
2018 &own_addr_type, &random_addr);
2019 if (err < 0)
2020 return err;
2021
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302022 memset(&cp, 0, sizeof(cp));
2023
Alain Michaud5cbd3eb2020-06-22 13:30:28 +00002024 /* In ext adv set param interval is 3 octets */
2025 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2026 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302027
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302028 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2029
2030 if (connectable) {
2031 if (secondary_adv)
2032 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2033 else
2034 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2035 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2036 if (secondary_adv)
2037 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2038 else
2039 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2040 } else {
2041 if (secondary_adv)
2042 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2043 else
2044 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2045 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302046
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302047 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302048 cp.channel_map = hdev->le_adv_channel_map;
2049 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002050 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302051
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302052 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2053 cp.primary_phy = HCI_ADV_PHY_1M;
2054 cp.secondary_phy = HCI_ADV_PHY_2M;
2055 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2056 cp.primary_phy = HCI_ADV_PHY_CODED;
2057 cp.secondary_phy = HCI_ADV_PHY_CODED;
2058 } else {
2059 /* In all other cases use 1M */
2060 cp.primary_phy = HCI_ADV_PHY_1M;
2061 cp.secondary_phy = HCI_ADV_PHY_1M;
2062 }
2063
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302064 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2065
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302066 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2067 bacmp(&random_addr, BDADDR_ANY)) {
2068 struct hci_cp_le_set_adv_set_rand_addr cp;
2069
2070 /* Check if random address need to be updated */
2071 if (adv_instance) {
2072 if (!bacmp(&random_addr, &adv_instance->random_addr))
2073 return 0;
2074 } else {
2075 if (!bacmp(&random_addr, &hdev->random_addr))
2076 return 0;
2077 }
2078
2079 memset(&cp, 0, sizeof(cp));
2080
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07002081 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302082 bacpy(&cp.bdaddr, &random_addr);
2083
2084 hci_req_add(req,
2085 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2086 sizeof(cp), &cp);
2087 }
2088
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302089 return 0;
2090}
2091
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002092int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302093{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002094 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302095 struct hci_cp_le_set_ext_adv_enable *cp;
2096 struct hci_cp_ext_adv_set *adv_set;
2097 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002098 struct adv_info *adv_instance;
2099
2100 if (instance > 0) {
2101 adv_instance = hci_find_adv_instance(hdev, instance);
2102 if (!adv_instance)
2103 return -EINVAL;
2104 } else {
2105 adv_instance = NULL;
2106 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302107
2108 cp = (void *) data;
2109 adv_set = (void *) cp->data;
2110
2111 memset(cp, 0, sizeof(*cp));
2112
2113 cp->enable = 0x01;
2114 cp->num_of_sets = 0x01;
2115
2116 memset(adv_set, 0, sizeof(*adv_set));
2117
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002118 adv_set->handle = instance;
2119
2120 /* Set duration per instance since controller is responsible for
2121 * scheduling it.
2122 */
2123 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03002124 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002125
2126 /* Time = N * 10 ms */
2127 adv_set->duration = cpu_to_le16(duration / 10);
2128 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302129
2130 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2131 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2132 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002133
2134 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302135}
2136
Daniel Winkler37adf702020-07-14 14:16:00 -07002137int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2138{
2139 struct hci_dev *hdev = req->hdev;
2140 struct hci_cp_le_set_ext_adv_enable *cp;
2141 struct hci_cp_ext_adv_set *adv_set;
2142 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2143 u8 req_size;
2144
2145 /* If request specifies an instance that doesn't exist, fail */
2146 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2147 return -EINVAL;
2148
2149 memset(data, 0, sizeof(data));
2150
2151 cp = (void *)data;
2152 adv_set = (void *)cp->data;
2153
2154 /* Instance 0x00 indicates all advertising instances will be disabled */
2155 cp->num_of_sets = !!instance;
2156 cp->enable = 0x00;
2157
2158 adv_set->handle = instance;
2159
2160 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2161 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2162
2163 return 0;
2164}
2165
2166int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2167{
2168 struct hci_dev *hdev = req->hdev;
2169
2170 /* If request specifies an instance that doesn't exist, fail */
2171 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2172 return -EINVAL;
2173
2174 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2175
2176 return 0;
2177}
2178
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302179int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2180{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302181 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002182 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302183 int err;
2184
Daniel Winkler37adf702020-07-14 14:16:00 -07002185 /* If instance isn't pending, the chip knows about it, and it's safe to
2186 * disable
2187 */
2188 if (adv_instance && !adv_instance->pending)
2189 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302190
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302191 err = __hci_req_setup_ext_adv_instance(req, instance);
2192 if (err < 0)
2193 return err;
2194
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302195 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002196 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302197
2198 return 0;
2199}
2200
Johan Hedbergf2252572015-11-18 12:49:20 +02002201int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2202 bool force)
2203{
2204 struct hci_dev *hdev = req->hdev;
2205 struct adv_info *adv_instance = NULL;
2206 u16 timeout;
2207
2208 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002209 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002210 return -EPERM;
2211
2212 if (hdev->adv_instance_timeout)
2213 return -EBUSY;
2214
2215 adv_instance = hci_find_adv_instance(hdev, instance);
2216 if (!adv_instance)
2217 return -ENOENT;
2218
2219 /* A zero timeout means unlimited advertising. As long as there is
2220 * only one instance, duration should be ignored. We still set a timeout
2221 * in case further instances are being added later on.
2222 *
2223 * If the remaining lifetime of the instance is more than the duration
2224 * then the timeout corresponds to the duration, otherwise it will be
2225 * reduced to the remaining instance lifetime.
2226 */
2227 if (adv_instance->timeout == 0 ||
2228 adv_instance->duration <= adv_instance->remaining_time)
2229 timeout = adv_instance->duration;
2230 else
2231 timeout = adv_instance->remaining_time;
2232
2233 /* The remaining time is being reduced unless the instance is being
2234 * advertised without time limit.
2235 */
2236 if (adv_instance->timeout)
2237 adv_instance->remaining_time =
2238 adv_instance->remaining_time - timeout;
2239
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002240 /* Only use work for scheduling instances with legacy advertising */
2241 if (!ext_adv_capable(hdev)) {
2242 hdev->adv_instance_timeout = timeout;
2243 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002244 &hdev->adv_instance_expire,
2245 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002246 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002247
2248 /* If we're just re-scheduling the same instance again then do not
2249 * execute any HCI commands. This happens when a single instance is
2250 * being advertised.
2251 */
2252 if (!force && hdev->cur_adv_instance == instance &&
2253 hci_dev_test_flag(hdev, HCI_LE_ADV))
2254 return 0;
2255
2256 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302257 if (ext_adv_capable(hdev)) {
2258 __hci_req_start_ext_adv(req, instance);
2259 } else {
2260 __hci_req_update_adv_data(req, instance);
2261 __hci_req_update_scan_rsp_data(req, instance);
2262 __hci_req_enable_advertising(req);
2263 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002264
2265 return 0;
2266}
2267
Johan Hedbergf2252572015-11-18 12:49:20 +02002268/* For a single instance:
2269 * - force == true: The instance will be removed even when its remaining
2270 * lifetime is not zero.
2271 * - force == false: the instance will be deactivated but kept stored unless
2272 * the remaining lifetime is zero.
2273 *
2274 * For instance == 0x00:
2275 * - force == true: All instances will be removed regardless of their timeout
2276 * setting.
2277 * - force == false: Only instances that have a timeout will be removed.
2278 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002279void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2280 struct hci_request *req, u8 instance,
2281 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002282{
2283 struct adv_info *adv_instance, *n, *next_instance = NULL;
2284 int err;
2285 u8 rem_inst;
2286
2287 /* Cancel any timeout concerning the removed instance(s). */
2288 if (!instance || hdev->cur_adv_instance == instance)
2289 cancel_adv_timeout(hdev);
2290
2291 /* Get the next instance to advertise BEFORE we remove
2292 * the current one. This can be the same instance again
2293 * if there is only one instance.
2294 */
2295 if (instance && hdev->cur_adv_instance == instance)
2296 next_instance = hci_get_next_instance(hdev, instance);
2297
2298 if (instance == 0x00) {
2299 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2300 list) {
2301 if (!(force || adv_instance->timeout))
2302 continue;
2303
2304 rem_inst = adv_instance->instance;
2305 err = hci_remove_adv_instance(hdev, rem_inst);
2306 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002307 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002308 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002309 } else {
2310 adv_instance = hci_find_adv_instance(hdev, instance);
2311
2312 if (force || (adv_instance && adv_instance->timeout &&
2313 !adv_instance->remaining_time)) {
2314 /* Don't advertise a removed instance. */
2315 if (next_instance &&
2316 next_instance->instance == instance)
2317 next_instance = NULL;
2318
2319 err = hci_remove_adv_instance(hdev, instance);
2320 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002321 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002322 }
2323 }
2324
Johan Hedbergf2252572015-11-18 12:49:20 +02002325 if (!req || !hdev_is_powered(hdev) ||
2326 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2327 return;
2328
Daniel Winkler37adf702020-07-14 14:16:00 -07002329 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002330 __hci_req_schedule_adv_instance(req, next_instance->instance,
2331 false);
2332}
2333
Johan Hedberg0857dd32014-12-19 13:40:20 +02002334static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2335{
2336 struct hci_dev *hdev = req->hdev;
2337
2338 /* If we're advertising or initiating an LE connection we can't
2339 * go ahead and change the random address at this time. This is
2340 * because the eventual initiator address used for the
2341 * subsequently created connection will be undefined (some
2342 * controllers use the new address and others the one we had
2343 * when the operation started).
2344 *
2345 * In this kind of scenario skip the update and let the random
2346 * address be updated at the next cycle.
2347 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002348 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002349 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002350 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002351 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002352 return;
2353 }
2354
2355 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2356}
2357
2358int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002359 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002360{
2361 struct hci_dev *hdev = req->hdev;
2362 int err;
2363
2364 /* If privacy is enabled use a resolvable private address. If
2365 * current RPA has expired or there is something else than
2366 * the current RPA in use, then generate a new one.
2367 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002368 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002369 int to;
2370
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302371 /* If Controller supports LL Privacy use own address type is
2372 * 0x03
2373 */
2374 if (use_ll_privacy(hdev))
2375 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2376 else
2377 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002378
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002379 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002380 !bacmp(&hdev->random_addr, &hdev->rpa))
2381 return 0;
2382
2383 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2384 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002385 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002386 return err;
2387 }
2388
2389 set_random_addr(req, &hdev->rpa);
2390
2391 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2392 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2393
2394 return 0;
2395 }
2396
2397 /* In case of required privacy without resolvable private address,
2398 * use an non-resolvable private address. This is useful for active
2399 * scanning and non-connectable advertising.
2400 */
2401 if (require_privacy) {
2402 bdaddr_t nrpa;
2403
2404 while (true) {
2405 /* The non-resolvable private address is generated
2406 * from random six bytes with the two most significant
2407 * bits cleared.
2408 */
2409 get_random_bytes(&nrpa, 6);
2410 nrpa.b[5] &= 0x3f;
2411
2412 /* The non-resolvable private address shall not be
2413 * equal to the public address.
2414 */
2415 if (bacmp(&hdev->bdaddr, &nrpa))
2416 break;
2417 }
2418
2419 *own_addr_type = ADDR_LE_DEV_RANDOM;
2420 set_random_addr(req, &nrpa);
2421 return 0;
2422 }
2423
2424 /* If forcing static address is in use or there is no public
2425 * address use the static address as random address (but skip
2426 * the HCI command if the current random address is already the
2427 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002428 *
2429 * In case BR/EDR has been disabled on a dual-mode controller
2430 * and a static address has been configured, then use that
2431 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002432 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002433 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002434 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002435 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002436 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002437 *own_addr_type = ADDR_LE_DEV_RANDOM;
2438 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2439 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2440 &hdev->static_addr);
2441 return 0;
2442 }
2443
2444 /* Neither privacy nor static address is being used so use a
2445 * public address.
2446 */
2447 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2448
2449 return 0;
2450}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002451
Johan Hedberg405a2612014-12-19 23:18:22 +02002452static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2453{
2454 struct bdaddr_list *b;
2455
2456 list_for_each_entry(b, &hdev->whitelist, list) {
2457 struct hci_conn *conn;
2458
2459 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2460 if (!conn)
2461 return true;
2462
2463 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2464 return true;
2465 }
2466
2467 return false;
2468}
2469
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002470void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002471{
2472 struct hci_dev *hdev = req->hdev;
2473 u8 scan;
2474
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002475 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002476 return;
2477
2478 if (!hdev_is_powered(hdev))
2479 return;
2480
2481 if (mgmt_powering_down(hdev))
2482 return;
2483
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002484 if (hdev->scanning_paused)
2485 return;
2486
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002487 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002488 disconnected_whitelist_entries(hdev))
2489 scan = SCAN_PAGE;
2490 else
2491 scan = SCAN_DISABLED;
2492
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002493 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002494 scan |= SCAN_INQUIRY;
2495
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002496 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2497 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2498 return;
2499
Johan Hedberg405a2612014-12-19 23:18:22 +02002500 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2501}
2502
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002503static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002504{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002505 hci_dev_lock(req->hdev);
2506 __hci_req_update_scan(req);
2507 hci_dev_unlock(req->hdev);
2508 return 0;
2509}
Johan Hedberg405a2612014-12-19 23:18:22 +02002510
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002511static void scan_update_work(struct work_struct *work)
2512{
2513 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2514
2515 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002516}
2517
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002518static int connectable_update(struct hci_request *req, unsigned long opt)
2519{
2520 struct hci_dev *hdev = req->hdev;
2521
2522 hci_dev_lock(hdev);
2523
2524 __hci_req_update_scan(req);
2525
2526 /* If BR/EDR is not enabled and we disable advertising as a
2527 * by-product of disabling connectable, we need to update the
2528 * advertising flags.
2529 */
2530 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002531 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002532
2533 /* Update the advertising parameters if necessary */
2534 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302535 !list_empty(&hdev->adv_instances)) {
2536 if (ext_adv_capable(hdev))
2537 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2538 else
2539 __hci_req_enable_advertising(req);
2540 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002541
2542 __hci_update_background_scan(req);
2543
2544 hci_dev_unlock(hdev);
2545
2546 return 0;
2547}
2548
2549static void connectable_update_work(struct work_struct *work)
2550{
2551 struct hci_dev *hdev = container_of(work, struct hci_dev,
2552 connectable_update);
2553 u8 status;
2554
2555 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2556 mgmt_set_connectable_complete(hdev, status);
2557}
2558
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002559static u8 get_service_classes(struct hci_dev *hdev)
2560{
2561 struct bt_uuid *uuid;
2562 u8 val = 0;
2563
2564 list_for_each_entry(uuid, &hdev->uuids, list)
2565 val |= uuid->svc_hint;
2566
2567 return val;
2568}
2569
2570void __hci_req_update_class(struct hci_request *req)
2571{
2572 struct hci_dev *hdev = req->hdev;
2573 u8 cod[3];
2574
2575 BT_DBG("%s", hdev->name);
2576
2577 if (!hdev_is_powered(hdev))
2578 return;
2579
2580 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2581 return;
2582
2583 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2584 return;
2585
2586 cod[0] = hdev->minor_class;
2587 cod[1] = hdev->major_class;
2588 cod[2] = get_service_classes(hdev);
2589
2590 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2591 cod[1] |= 0x20;
2592
2593 if (memcmp(cod, hdev->dev_class, 3) == 0)
2594 return;
2595
2596 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2597}
2598
Johan Hedbergaed1a882015-11-22 17:24:44 +03002599static void write_iac(struct hci_request *req)
2600{
2601 struct hci_dev *hdev = req->hdev;
2602 struct hci_cp_write_current_iac_lap cp;
2603
2604 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2605 return;
2606
2607 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2608 /* Limited discoverable mode */
2609 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2610 cp.iac_lap[0] = 0x00; /* LIAC */
2611 cp.iac_lap[1] = 0x8b;
2612 cp.iac_lap[2] = 0x9e;
2613 cp.iac_lap[3] = 0x33; /* GIAC */
2614 cp.iac_lap[4] = 0x8b;
2615 cp.iac_lap[5] = 0x9e;
2616 } else {
2617 /* General discoverable mode */
2618 cp.num_iac = 1;
2619 cp.iac_lap[0] = 0x33; /* GIAC */
2620 cp.iac_lap[1] = 0x8b;
2621 cp.iac_lap[2] = 0x9e;
2622 }
2623
2624 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2625 (cp.num_iac * 3) + 1, &cp);
2626}
2627
2628static int discoverable_update(struct hci_request *req, unsigned long opt)
2629{
2630 struct hci_dev *hdev = req->hdev;
2631
2632 hci_dev_lock(hdev);
2633
2634 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2635 write_iac(req);
2636 __hci_req_update_scan(req);
2637 __hci_req_update_class(req);
2638 }
2639
2640 /* Advertising instances don't use the global discoverable setting, so
2641 * only update AD if advertising was enabled using Set Advertising.
2642 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002643 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002644 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002645
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002646 /* Discoverable mode affects the local advertising
2647 * address in limited privacy mode.
2648 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302649 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2650 if (ext_adv_capable(hdev))
2651 __hci_req_start_ext_adv(req, 0x00);
2652 else
2653 __hci_req_enable_advertising(req);
2654 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002655 }
2656
Johan Hedbergaed1a882015-11-22 17:24:44 +03002657 hci_dev_unlock(hdev);
2658
2659 return 0;
2660}
2661
2662static void discoverable_update_work(struct work_struct *work)
2663{
2664 struct hci_dev *hdev = container_of(work, struct hci_dev,
2665 discoverable_update);
2666 u8 status;
2667
2668 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2669 mgmt_set_discoverable_complete(hdev, status);
2670}
2671
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002672void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2673 u8 reason)
2674{
2675 switch (conn->state) {
2676 case BT_CONNECTED:
2677 case BT_CONFIG:
2678 if (conn->type == AMP_LINK) {
2679 struct hci_cp_disconn_phy_link cp;
2680
2681 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2682 cp.reason = reason;
2683 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2684 &cp);
2685 } else {
2686 struct hci_cp_disconnect dc;
2687
2688 dc.handle = cpu_to_le16(conn->handle);
2689 dc.reason = reason;
2690 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2691 }
2692
2693 conn->state = BT_DISCONN;
2694
2695 break;
2696 case BT_CONNECT:
2697 if (conn->type == LE_LINK) {
2698 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2699 break;
2700 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2701 0, NULL);
2702 } else if (conn->type == ACL_LINK) {
2703 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2704 break;
2705 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2706 6, &conn->dst);
2707 }
2708 break;
2709 case BT_CONNECT2:
2710 if (conn->type == ACL_LINK) {
2711 struct hci_cp_reject_conn_req rej;
2712
2713 bacpy(&rej.bdaddr, &conn->dst);
2714 rej.reason = reason;
2715
2716 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2717 sizeof(rej), &rej);
2718 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2719 struct hci_cp_reject_sync_conn_req rej;
2720
2721 bacpy(&rej.bdaddr, &conn->dst);
2722
2723 /* SCO rejection has its own limited set of
2724 * allowed error values (0x0D-0x0F) which isn't
2725 * compatible with most values passed to this
2726 * function. To be safe hard-code one of the
2727 * values that's suitable for SCO.
2728 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002729 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002730
2731 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2732 sizeof(rej), &rej);
2733 }
2734 break;
2735 default:
2736 conn->state = BT_CLOSED;
2737 break;
2738 }
2739}
2740
2741static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2742{
2743 if (status)
2744 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2745}
2746
2747int hci_abort_conn(struct hci_conn *conn, u8 reason)
2748{
2749 struct hci_request req;
2750 int err;
2751
2752 hci_req_init(&req, conn->hdev);
2753
2754 __hci_abort_conn(&req, conn, reason);
2755
2756 err = hci_req_run(&req, abort_conn_complete);
2757 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002758 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002759 return err;
2760 }
2761
2762 return 0;
2763}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002764
Johan Hedberga1d01db2015-11-11 08:11:25 +02002765static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002766{
2767 hci_dev_lock(req->hdev);
2768 __hci_update_background_scan(req);
2769 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002770 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002771}
2772
2773static void bg_scan_update(struct work_struct *work)
2774{
2775 struct hci_dev *hdev = container_of(work, struct hci_dev,
2776 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002777 struct hci_conn *conn;
2778 u8 status;
2779 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002780
Johan Hedberg84235d22015-11-11 08:11:20 +02002781 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2782 if (!err)
2783 return;
2784
2785 hci_dev_lock(hdev);
2786
2787 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2788 if (conn)
2789 hci_le_conn_failed(conn, status);
2790
2791 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002792}
2793
Johan Hedberga1d01db2015-11-11 08:11:25 +02002794static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002795{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302796 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002797 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002798}
2799
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002800static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2801{
2802 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002803 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2804 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002805 struct hci_cp_inquiry cp;
2806
2807 BT_DBG("%s", req->hdev->name);
2808
2809 hci_dev_lock(req->hdev);
2810 hci_inquiry_cache_flush(req->hdev);
2811 hci_dev_unlock(req->hdev);
2812
2813 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002814
2815 if (req->hdev->discovery.limited)
2816 memcpy(&cp.lap, liac, sizeof(cp.lap));
2817 else
2818 memcpy(&cp.lap, giac, sizeof(cp.lap));
2819
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002820 cp.length = length;
2821
2822 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2823
2824 return 0;
2825}
2826
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002827static void le_scan_disable_work(struct work_struct *work)
2828{
2829 struct hci_dev *hdev = container_of(work, struct hci_dev,
2830 le_scan_disable.work);
2831 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002832
2833 BT_DBG("%s", hdev->name);
2834
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002835 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002836 return;
2837
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002838 cancel_delayed_work(&hdev->le_scan_restart);
2839
2840 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2841 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002842 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2843 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002844 return;
2845 }
2846
2847 hdev->discovery.scan_start = 0;
2848
2849 /* If we were running LE only scan, change discovery state. If
2850 * we were running both LE and BR/EDR inquiry simultaneously,
2851 * and BR/EDR inquiry is already finished, stop discovery,
2852 * otherwise BR/EDR inquiry will stop discovery when finished.
2853 * If we will resolve remote device name, do not change
2854 * discovery state.
2855 */
2856
2857 if (hdev->discovery.type == DISCOV_TYPE_LE)
2858 goto discov_stopped;
2859
2860 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2861 return;
2862
2863 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2864 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2865 hdev->discovery.state != DISCOVERY_RESOLVING)
2866 goto discov_stopped;
2867
2868 return;
2869 }
2870
2871 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2872 HCI_CMD_TIMEOUT, &status);
2873 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002874 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002875 goto discov_stopped;
2876 }
2877
2878 return;
2879
2880discov_stopped:
2881 hci_dev_lock(hdev);
2882 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2883 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002884}
2885
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002886static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002887{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002888 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002889
2890 /* If controller is not scanning we are done. */
2891 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2892 return 0;
2893
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002894 if (hdev->scanning_paused) {
2895 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2896 return 0;
2897 }
2898
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302899 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002900
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302901 if (use_ext_scan(hdev)) {
2902 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2903
2904 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2905 ext_enable_cp.enable = LE_SCAN_ENABLE;
2906 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2907
2908 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2909 sizeof(ext_enable_cp), &ext_enable_cp);
2910 } else {
2911 struct hci_cp_le_set_scan_enable cp;
2912
2913 memset(&cp, 0, sizeof(cp));
2914 cp.enable = LE_SCAN_ENABLE;
2915 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2916 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2917 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002918
2919 return 0;
2920}
2921
2922static void le_scan_restart_work(struct work_struct *work)
2923{
2924 struct hci_dev *hdev = container_of(work, struct hci_dev,
2925 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002926 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002927 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002928
2929 BT_DBG("%s", hdev->name);
2930
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002931 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002932 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002933 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2934 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002935 return;
2936 }
2937
2938 hci_dev_lock(hdev);
2939
2940 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2941 !hdev->discovery.scan_start)
2942 goto unlock;
2943
2944 /* When the scan was started, hdev->le_scan_disable has been queued
2945 * after duration from scan_start. During scan restart this job
2946 * has been canceled, and we need to queue it again after proper
2947 * timeout, to make sure that scan does not run indefinitely.
2948 */
2949 duration = hdev->discovery.scan_duration;
2950 scan_start = hdev->discovery.scan_start;
2951 now = jiffies;
2952 if (now - scan_start <= duration) {
2953 int elapsed;
2954
2955 if (now >= scan_start)
2956 elapsed = now - scan_start;
2957 else
2958 elapsed = ULONG_MAX - scan_start + now;
2959
2960 timeout = duration - elapsed;
2961 } else {
2962 timeout = 0;
2963 }
2964
2965 queue_delayed_work(hdev->req_workqueue,
2966 &hdev->le_scan_disable, timeout);
2967
2968unlock:
2969 hci_dev_unlock(hdev);
2970}
2971
Johan Hedberge68f0722015-11-11 08:30:30 +02002972static int active_scan(struct hci_request *req, unsigned long opt)
2973{
2974 uint16_t interval = opt;
2975 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002976 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002977 /* White list is not used for discovery */
2978 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302979 /* Discovery doesn't require controller address resolution */
2980 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002981 int err;
2982
2983 BT_DBG("%s", hdev->name);
2984
Johan Hedberge68f0722015-11-11 08:30:30 +02002985 /* If controller is scanning, it means the background scanning is
2986 * running. Thus, we should temporarily stop it in order to set the
2987 * discovery scanning parameters.
2988 */
2989 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302990 hci_req_add_le_scan_disable(req, false);
Johan Hedberge68f0722015-11-11 08:30:30 +02002991
2992 /* All active scans will be done with either a resolvable private
2993 * address (when privacy feature has been enabled) or non-resolvable
2994 * private address.
2995 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002996 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2997 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002998 if (err < 0)
2999 own_addr_type = ADDR_LE_DEV_PUBLIC;
3000
Alain Michaudd4edda02020-06-29 17:04:15 +00003001 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3002 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303003 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02003004 return 0;
3005}
3006
3007static int interleaved_discov(struct hci_request *req, unsigned long opt)
3008{
3009 int err;
3010
3011 BT_DBG("%s", req->hdev->name);
3012
3013 err = active_scan(req, opt);
3014 if (err)
3015 return err;
3016
Johan Hedberg7df26b52015-11-11 12:24:21 +02003017 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02003018}
3019
3020static void start_discovery(struct hci_dev *hdev, u8 *status)
3021{
3022 unsigned long timeout;
3023
3024 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3025
3026 switch (hdev->discovery.type) {
3027 case DISCOV_TYPE_BREDR:
3028 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02003029 hci_req_sync(hdev, bredr_inquiry,
3030 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003031 status);
3032 return;
3033 case DISCOV_TYPE_INTERLEAVED:
3034 /* When running simultaneous discovery, the LE scanning time
3035 * should occupy the whole discovery time sine BR/EDR inquiry
3036 * and LE scanning are scheduled by the controller.
3037 *
3038 * For interleaving discovery in comparison, BR/EDR inquiry
3039 * and LE scanning are done sequentially with separate
3040 * timeouts.
3041 */
3042 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3043 &hdev->quirks)) {
3044 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3045 /* During simultaneous discovery, we double LE scan
3046 * interval. We must leave some time for the controller
3047 * to do BR/EDR inquiry.
3048 */
3049 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00003050 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003051 status);
3052 break;
3053 }
3054
3055 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00003056 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003057 HCI_CMD_TIMEOUT, status);
3058 break;
3059 case DISCOV_TYPE_LE:
3060 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00003061 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003062 HCI_CMD_TIMEOUT, status);
3063 break;
3064 default:
3065 *status = HCI_ERROR_UNSPECIFIED;
3066 return;
3067 }
3068
3069 if (*status)
3070 return;
3071
3072 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3073
3074 /* When service discovery is used and the controller has a
3075 * strict duplicate filter, it is important to remember the
3076 * start and duration of the scan. This is required for
3077 * restarting scanning during the discovery phase.
3078 */
3079 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3080 hdev->discovery.result_filtering) {
3081 hdev->discovery.scan_start = jiffies;
3082 hdev->discovery.scan_duration = timeout;
3083 }
3084
3085 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3086 timeout);
3087}
3088
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003089bool hci_req_stop_discovery(struct hci_request *req)
3090{
3091 struct hci_dev *hdev = req->hdev;
3092 struct discovery_state *d = &hdev->discovery;
3093 struct hci_cp_remote_name_req_cancel cp;
3094 struct inquiry_entry *e;
3095 bool ret = false;
3096
3097 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3098
3099 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3100 if (test_bit(HCI_INQUIRY, &hdev->flags))
3101 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3102
3103 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3104 cancel_delayed_work(&hdev->le_scan_disable);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303105 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003106 }
3107
3108 ret = true;
3109 } else {
3110 /* Passive scanning */
3111 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303112 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003113 ret = true;
3114 }
3115 }
3116
3117 /* No further actions needed for LE-only discovery */
3118 if (d->type == DISCOV_TYPE_LE)
3119 return ret;
3120
3121 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3122 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3123 NAME_PENDING);
3124 if (!e)
3125 return ret;
3126
3127 bacpy(&cp.bdaddr, &e->data.bdaddr);
3128 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3129 &cp);
3130 ret = true;
3131 }
3132
3133 return ret;
3134}
3135
3136static int stop_discovery(struct hci_request *req, unsigned long opt)
3137{
3138 hci_dev_lock(req->hdev);
3139 hci_req_stop_discovery(req);
3140 hci_dev_unlock(req->hdev);
3141
3142 return 0;
3143}
3144
Johan Hedberge68f0722015-11-11 08:30:30 +02003145static void discov_update(struct work_struct *work)
3146{
3147 struct hci_dev *hdev = container_of(work, struct hci_dev,
3148 discov_update);
3149 u8 status = 0;
3150
3151 switch (hdev->discovery.state) {
3152 case DISCOVERY_STARTING:
3153 start_discovery(hdev, &status);
3154 mgmt_start_discovery_complete(hdev, status);
3155 if (status)
3156 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3157 else
3158 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3159 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003160 case DISCOVERY_STOPPING:
3161 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3162 mgmt_stop_discovery_complete(hdev, status);
3163 if (!status)
3164 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3165 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003166 case DISCOVERY_STOPPED:
3167 default:
3168 return;
3169 }
3170}
3171
Johan Hedbergc366f552015-11-23 15:43:06 +02003172static void discov_off(struct work_struct *work)
3173{
3174 struct hci_dev *hdev = container_of(work, struct hci_dev,
3175 discov_off.work);
3176
3177 BT_DBG("%s", hdev->name);
3178
3179 hci_dev_lock(hdev);
3180
3181 /* When discoverable timeout triggers, then just make sure
3182 * the limited discoverable flag is cleared. Even in the case
3183 * of a timeout triggered from general discoverable, it is
3184 * safe to unconditionally clear the flag.
3185 */
3186 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3187 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3188 hdev->discov_timeout = 0;
3189
3190 hci_dev_unlock(hdev);
3191
3192 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3193 mgmt_new_settings(hdev);
3194}
3195
Johan Hedberg2ff13892015-11-25 16:15:44 +02003196static int powered_update_hci(struct hci_request *req, unsigned long opt)
3197{
3198 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003199 u8 link_sec;
3200
3201 hci_dev_lock(hdev);
3202
3203 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3204 !lmp_host_ssp_capable(hdev)) {
3205 u8 mode = 0x01;
3206
3207 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3208
3209 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3210 u8 support = 0x01;
3211
3212 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3213 sizeof(support), &support);
3214 }
3215 }
3216
3217 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3218 lmp_bredr_capable(hdev)) {
3219 struct hci_cp_write_le_host_supported cp;
3220
3221 cp.le = 0x01;
3222 cp.simul = 0x00;
3223
3224 /* Check first if we already have the right
3225 * host state (host features set)
3226 */
3227 if (cp.le != lmp_host_le_capable(hdev) ||
3228 cp.simul != lmp_host_le_br_capable(hdev))
3229 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3230 sizeof(cp), &cp);
3231 }
3232
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003233 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003234 /* Make sure the controller has a good default for
3235 * advertising data. This also applies to the case
3236 * where BR/EDR was toggled during the AUTO_OFF phase.
3237 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003238 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3239 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303240 int err;
3241
3242 if (ext_adv_capable(hdev)) {
3243 err = __hci_req_setup_ext_adv_instance(req,
3244 0x00);
3245 if (!err)
3246 __hci_req_update_scan_rsp_data(req,
3247 0x00);
3248 } else {
3249 err = 0;
3250 __hci_req_update_adv_data(req, 0x00);
3251 __hci_req_update_scan_rsp_data(req, 0x00);
3252 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003253
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303254 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303255 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303256 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303257 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003258 __hci_req_enable_ext_advertising(req,
3259 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303260 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003261 } else if (!list_empty(&hdev->adv_instances)) {
3262 struct adv_info *adv_instance;
3263
Johan Hedberg2ff13892015-11-25 16:15:44 +02003264 adv_instance = list_first_entry(&hdev->adv_instances,
3265 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003266 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003267 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003268 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003269 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003270 }
3271
3272 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3273 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3274 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3275 sizeof(link_sec), &link_sec);
3276
3277 if (lmp_bredr_capable(hdev)) {
3278 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3279 __hci_req_write_fast_connectable(req, true);
3280 else
3281 __hci_req_write_fast_connectable(req, false);
3282 __hci_req_update_scan(req);
3283 __hci_req_update_class(req);
3284 __hci_req_update_name(req);
3285 __hci_req_update_eir(req);
3286 }
3287
3288 hci_dev_unlock(hdev);
3289 return 0;
3290}
3291
3292int __hci_req_hci_power_on(struct hci_dev *hdev)
3293{
3294 /* Register the available SMP channels (BR/EDR and LE) only when
3295 * successfully powering on the controller. This late
3296 * registration is required so that LE SMP can clearly decide if
3297 * the public address or static address is used.
3298 */
3299 smp_register(hdev);
3300
3301 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3302 NULL);
3303}
3304
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003305void hci_request_setup(struct hci_dev *hdev)
3306{
Johan Hedberge68f0722015-11-11 08:30:30 +02003307 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003308 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003309 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003310 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003311 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003312 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003313 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3314 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003315 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003316}
3317
3318void hci_request_cancel_all(struct hci_dev *hdev)
3319{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003320 hci_req_sync_cancel(hdev, ENODEV);
3321
Johan Hedberge68f0722015-11-11 08:30:30 +02003322 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003323 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003324 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003325 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003326 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003327 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003328 cancel_delayed_work_sync(&hdev->le_scan_disable);
3329 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003330
3331 if (hdev->adv_instance_timeout) {
3332 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3333 hdev->adv_instance_timeout = 0;
3334 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003335}