blob: d2b06f5c93804f6ca2aee351979e6f106f013788 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200368 }
369
Alain Michaud10873f92020-06-11 02:01:56 +0000370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
Johan Hedberg196a5e92015-11-22 18:55:44 +0200381/* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
384 *
385 * This function requires the caller holds hdev->lock.
386 */
387static void __hci_update_background_scan(struct hci_request *req)
388{
389 struct hci_dev *hdev = req->hdev;
390
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 return;
398
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 return;
402
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
405 return;
406
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
409 *
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
413 */
414 hci_discovery_filter_clear(hdev);
415
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200416 BT_DBG("%s ADV monitoring is %s", hdev->name,
417 hci_is_adv_monitoring(hdev) ? "on" : "off");
418
Johan Hedberg196a5e92015-11-22 18:55:44 +0200419 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200420 list_empty(&hdev->pend_le_reports) &&
421 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200422 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200423 * to be scanned for or no ADV monitors, we should stop the
424 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200425 */
426
427 /* If controller is not scanning we are done. */
428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 return;
430
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530431 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200432
433 BT_DBG("%s stopping background scanning", hdev->name);
434 } else {
435 /* If there is at least one pending LE connection, we should
436 * keep the background scan running.
437 */
438
439 /* If controller is connecting, we should not start scanning
440 * since some controllers are not able to scan and connect at
441 * the same time.
442 */
443 if (hci_lookup_le_connect(hdev))
444 return;
445
446 /* If controller is currently scanning, we stop it to ensure we
447 * don't miss any advertising (due to duplicates filter).
448 */
449 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530450 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200451
452 hci_req_add_le_passive_scan(req);
453
454 BT_DBG("%s starting background scanning", hdev->name);
455 }
456}
457
Johan Hedberg00cf5042015-11-25 16:15:41 +0200458void __hci_req_update_name(struct hci_request *req)
459{
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_local_name cp;
462
463 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464
465 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466}
467
Johan Hedbergb1a89172015-11-25 16:15:42 +0200468#define PNP_INFO_SVCLASS_ID 0x1200
469
470static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471{
472 u8 *ptr = data, *uuids_start = NULL;
473 struct bt_uuid *uuid;
474
475 if (len < 4)
476 return ptr;
477
478 list_for_each_entry(uuid, &hdev->uuids, list) {
479 u16 uuid16;
480
481 if (uuid->size != 16)
482 continue;
483
484 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 if (uuid16 < 0x1100)
486 continue;
487
488 if (uuid16 == PNP_INFO_SVCLASS_ID)
489 continue;
490
491 if (!uuids_start) {
492 uuids_start = ptr;
493 uuids_start[0] = 1;
494 uuids_start[1] = EIR_UUID16_ALL;
495 ptr += 2;
496 }
497
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u16) > len) {
500 uuids_start[1] = EIR_UUID16_SOME;
501 break;
502 }
503
504 *ptr++ = (uuid16 & 0x00ff);
505 *ptr++ = (uuid16 & 0xff00) >> 8;
506 uuids_start[0] += sizeof(uuid16);
507 }
508
509 return ptr;
510}
511
512static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513{
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
516
517 if (len < 6)
518 return ptr;
519
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 32)
522 continue;
523
524 if (!uuids_start) {
525 uuids_start = ptr;
526 uuids_start[0] = 1;
527 uuids_start[1] = EIR_UUID32_ALL;
528 ptr += 2;
529 }
530
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + sizeof(u32) > len) {
533 uuids_start[1] = EIR_UUID32_SOME;
534 break;
535 }
536
537 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 ptr += sizeof(u32);
539 uuids_start[0] += sizeof(u32);
540 }
541
542 return ptr;
543}
544
545static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546{
547 u8 *ptr = data, *uuids_start = NULL;
548 struct bt_uuid *uuid;
549
550 if (len < 18)
551 return ptr;
552
553 list_for_each_entry(uuid, &hdev->uuids, list) {
554 if (uuid->size != 128)
555 continue;
556
557 if (!uuids_start) {
558 uuids_start = ptr;
559 uuids_start[0] = 1;
560 uuids_start[1] = EIR_UUID128_ALL;
561 ptr += 2;
562 }
563
564 /* Stop if not enough space to put next UUID */
565 if ((ptr - data) + 16 > len) {
566 uuids_start[1] = EIR_UUID128_SOME;
567 break;
568 }
569
570 memcpy(ptr, uuid->uuid, 16);
571 ptr += 16;
572 uuids_start[0] += 16;
573 }
574
575 return ptr;
576}
577
578static void create_eir(struct hci_dev *hdev, u8 *data)
579{
580 u8 *ptr = data;
581 size_t name_len;
582
583 name_len = strlen(hdev->dev_name);
584
585 if (name_len > 0) {
586 /* EIR Data type */
587 if (name_len > 48) {
588 name_len = 48;
589 ptr[1] = EIR_NAME_SHORT;
590 } else
591 ptr[1] = EIR_NAME_COMPLETE;
592
593 /* EIR Data length */
594 ptr[0] = name_len + 1;
595
596 memcpy(ptr + 2, hdev->dev_name, name_len);
597
598 ptr += (name_len + 2);
599 }
600
601 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 ptr[0] = 2;
603 ptr[1] = EIR_TX_POWER;
604 ptr[2] = (u8) hdev->inq_tx_power;
605
606 ptr += 3;
607 }
608
609 if (hdev->devid_source > 0) {
610 ptr[0] = 9;
611 ptr[1] = EIR_DEVICE_ID;
612
613 put_unaligned_le16(hdev->devid_source, ptr + 2);
614 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 put_unaligned_le16(hdev->devid_product, ptr + 6);
616 put_unaligned_le16(hdev->devid_version, ptr + 8);
617
618 ptr += 10;
619 }
620
621 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624}
625
626void __hci_req_update_eir(struct hci_request *req)
627{
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_write_eir cp;
630
631 if (!hdev_is_powered(hdev))
632 return;
633
634 if (!lmp_ext_inq_capable(hdev))
635 return;
636
637 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 return;
639
640 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 return;
642
643 memset(&cp, 0, sizeof(cp));
644
645 create_eir(hdev, cp.data);
646
647 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 return;
649
650 memcpy(hdev->eir, cp.data, sizeof(cp.data));
651
652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653}
654
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530655void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200656{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200658
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700659 if (hdev->scanning_paused) {
660 bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 return;
662 }
663
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530664 if (use_ext_scan(hdev)) {
665 struct hci_cp_le_set_ext_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 &cp);
671 } else {
672 struct hci_cp_le_set_scan_enable cp;
673
674 memset(&cp, 0, sizeof(cp));
675 cp.enable = LE_SCAN_DISABLE;
676 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530678
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530679 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530680 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530681 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530682 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530683 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530684
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530685 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
686 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200687}
688
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700689static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
690 u8 bdaddr_type)
691{
692 struct hci_cp_le_del_from_white_list cp;
693
694 cp.bdaddr_type = bdaddr_type;
695 bacpy(&cp.bdaddr, bdaddr);
696
697 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
698 cp.bdaddr_type);
699 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530700
701 if (use_ll_privacy(req->hdev)) {
702 struct smp_irk *irk;
703
704 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
705 if (irk) {
706 struct hci_cp_le_del_from_resolv_list cp;
707
708 cp.bdaddr_type = bdaddr_type;
709 bacpy(&cp.bdaddr, bdaddr);
710
711 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
712 sizeof(cp), &cp);
713 }
714 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700715}
716
717/* Adds connection to white list if needed. On error, returns -1. */
718static int add_to_white_list(struct hci_request *req,
719 struct hci_conn_params *params, u8 *num_entries,
720 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200721{
722 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700723 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200724
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700725 /* Already in white list */
726 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
727 params->addr_type))
728 return 0;
729
730 /* Select filter policy to accept all advertising */
731 if (*num_entries >= hdev->le_white_list_size)
732 return -1;
733
734 /* White list can not be used with RPAs */
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530735 if (!allow_rpa && !use_ll_privacy(hdev) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700736 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
737 return -1;
738 }
739
740 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200741 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
742 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700743 return 0;
744
745 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200746 cp.bdaddr_type = params->addr_type;
747 bacpy(&cp.bdaddr, &params->addr);
748
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700749 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
750 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200751 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700752
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530753 if (use_ll_privacy(hdev)) {
754 struct smp_irk *irk;
755
756 irk = hci_find_irk_by_addr(hdev, &params->addr,
757 params->addr_type);
758 if (irk) {
759 struct hci_cp_le_add_to_resolv_list cp;
760
761 cp.bdaddr_type = params->addr_type;
762 bacpy(&cp.bdaddr, &params->addr);
763 memcpy(cp.peer_irk, irk->val, 16);
764
765 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
766 memcpy(cp.local_irk, hdev->irk, 16);
767 else
768 memset(cp.local_irk, 0, 16);
769
770 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
771 sizeof(cp), &cp);
772 }
773 }
774
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700775 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200776}
777
778static u8 update_white_list(struct hci_request *req)
779{
780 struct hci_dev *hdev = req->hdev;
781 struct hci_conn_params *params;
782 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700783 u8 num_entries = 0;
784 bool pend_conn, pend_report;
785 /* We allow whitelisting even with RPAs in suspend. In the worst case,
786 * we won't be able to wake from devices that use the privacy1.2
787 * features. Additionally, once we support privacy1.2 and IRK
788 * offloading, we can update this to also check for those conditions.
789 */
790 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200791
792 /* Go through the current white list programmed into the
793 * controller one by one and check if that address is still
794 * in the list of pending connections or list of devices to
795 * report. If not present in either list, then queue the
796 * command to remove it from the controller.
797 */
798 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700799 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
800 &b->bdaddr,
801 b->bdaddr_type);
802 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
803 &b->bdaddr,
804 b->bdaddr_type);
805
806 /* If the device is not likely to connect or report,
807 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500808 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700809 if (!pend_conn && !pend_report) {
810 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200811 continue;
812 }
813
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700814 /* White list can not be used with RPAs */
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530815 if (!allow_rpa && !use_ll_privacy(hdev) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700816 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500817 return 0x00;
818 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200819
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700820 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200821 }
822
823 /* Since all no longer valid white list entries have been
824 * removed, walk through the list of pending connections
825 * and ensure that any new device gets programmed into
826 * the controller.
827 *
828 * If the list of the devices is larger than the list of
829 * available white list entries in the controller, then
830 * just abort and return filer policy value to not use the
831 * white list.
832 */
833 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700834 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200835 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200836 }
837
838 /* After adding all new pending connections, walk through
839 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700840 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200841 */
842 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700843 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200844 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200845 }
846
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200847 /* Once the controller offloading of advertisement monitor is in place,
848 * the if condition should include the support of MSFT extension
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700849 * support. If suspend is ongoing, whitelist should be the default to
850 * prevent waking by random advertisements.
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200851 */
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700852 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200853 return 0x00;
854
Johan Hedberg0857dd32014-12-19 13:40:20 +0200855 /* Select filter policy to use white list */
856 return 0x01;
857}
858
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200859static bool scan_use_rpa(struct hci_dev *hdev)
860{
861 return hci_dev_test_flag(hdev, HCI_PRIVACY);
862}
863
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530864static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530865 u16 window, u8 own_addr_type, u8 filter_policy,
866 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200867{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530868 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530869
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700870 if (hdev->scanning_paused) {
871 bt_dev_dbg(hdev, "Scanning is paused for suspend");
872 return;
873 }
874
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530875 if (use_ll_privacy(hdev) &&
876 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
877 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530878 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530879
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530880 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
881 }
882
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530883 /* Use ext scanning if set ext scan param and ext scan enable is
884 * supported
885 */
886 if (use_ext_scan(hdev)) {
887 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
888 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
889 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530890 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
891 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530892
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530893 ext_param_cp = (void *)data;
894 phy_params = (void *)ext_param_cp->data;
895
896 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
897 ext_param_cp->own_addr_type = own_addr_type;
898 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530899
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530900 plen = sizeof(*ext_param_cp);
901
902 if (scan_1m(hdev) || scan_2m(hdev)) {
903 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
904
905 memset(phy_params, 0, sizeof(*phy_params));
906 phy_params->type = type;
907 phy_params->interval = cpu_to_le16(interval);
908 phy_params->window = cpu_to_le16(window);
909
910 plen += sizeof(*phy_params);
911 phy_params++;
912 }
913
914 if (scan_coded(hdev)) {
915 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
916
917 memset(phy_params, 0, sizeof(*phy_params));
918 phy_params->type = type;
919 phy_params->interval = cpu_to_le16(interval);
920 phy_params->window = cpu_to_le16(window);
921
922 plen += sizeof(*phy_params);
923 phy_params++;
924 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530925
926 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530927 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530928
929 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
930 ext_enable_cp.enable = LE_SCAN_ENABLE;
931 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
932
933 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
934 sizeof(ext_enable_cp), &ext_enable_cp);
935 } else {
936 struct hci_cp_le_set_scan_param param_cp;
937 struct hci_cp_le_set_scan_enable enable_cp;
938
939 memset(&param_cp, 0, sizeof(param_cp));
940 param_cp.type = type;
941 param_cp.interval = cpu_to_le16(interval);
942 param_cp.window = cpu_to_le16(window);
943 param_cp.own_address_type = own_addr_type;
944 param_cp.filter_policy = filter_policy;
945 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
946 &param_cp);
947
948 memset(&enable_cp, 0, sizeof(enable_cp));
949 enable_cp.enable = LE_SCAN_ENABLE;
950 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
951 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
952 &enable_cp);
953 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530954}
955
Alain Michaud9a9373f2020-07-31 01:05:34 +0000956/* Returns true if an le connection is in the scanning state */
957static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
958{
959 struct hci_conn_hash *h = &hdev->conn_hash;
960 struct hci_conn *c;
961
962 rcu_read_lock();
963
964 list_for_each_entry_rcu(c, &h->list, list) {
965 if (c->type == LE_LINK && c->state == BT_CONNECT &&
966 test_bit(HCI_CONN_SCANNING, &c->flags)) {
967 rcu_read_unlock();
968 return true;
969 }
970 }
971
972 rcu_read_unlock();
973
974 return false;
975}
976
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530977/* Ensure to call hci_req_add_le_scan_disable() first to disable the
978 * controller based address resolution to be able to reconfigure
979 * resolving list.
980 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530981void hci_req_add_le_passive_scan(struct hci_request *req)
982{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200983 struct hci_dev *hdev = req->hdev;
984 u8 own_addr_type;
985 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700986 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530987 /* Background scanning should run with address resolution */
988 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700989
990 if (hdev->scanning_paused) {
991 bt_dev_dbg(hdev, "Scanning is paused for suspend");
992 return;
993 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200994
995 /* Set require_privacy to false since no SCAN_REQ are send
996 * during passive scanning. Not using an non-resolvable address
997 * here is important so that peer devices using direct
998 * advertising with our address will be correctly reported
999 * by the controller.
1000 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001001 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1002 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001003 return;
1004
1005 /* Adding or removing entries from the white list must
1006 * happen before enabling scanning. The controller does
1007 * not allow white list modification while scanning.
1008 */
1009 filter_policy = update_white_list(req);
1010
1011 /* When the controller is using random resolvable addresses and
1012 * with that having LE privacy enabled, then controllers with
1013 * Extended Scanner Filter Policies support can now enable support
1014 * for handling directed advertising.
1015 *
1016 * So instead of using filter polices 0x00 (no whitelist)
1017 * and 0x01 (whitelist enabled) use the new filter policies
1018 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1019 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001020 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001021 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1022 filter_policy |= 0x02;
1023
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001024 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +00001025 window = hdev->le_scan_window_suspend;
1026 interval = hdev->le_scan_int_suspend;
Alain Michaud9a9373f2020-07-31 01:05:34 +00001027 } else if (hci_is_le_conn_scanning(hdev)) {
1028 window = hdev->le_scan_window_connect;
1029 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +08001030 } else if (hci_is_adv_monitoring(hdev)) {
1031 window = hdev->le_scan_window_adv_monitor;
1032 interval = hdev->le_scan_int_adv_monitor;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001033 } else {
1034 window = hdev->le_scan_window;
1035 interval = hdev->le_scan_interval;
1036 }
1037
1038 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1039 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301040 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001041}
1042
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301043static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1044{
1045 struct adv_info *adv_instance;
1046
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001047 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301048 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001049 return 1;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301050
1051 adv_instance = hci_find_adv_instance(hdev, instance);
1052 if (!adv_instance)
1053 return 0;
1054
1055 /* TODO: Take into account the "appearance" and "local-name" flags here.
1056 * These are currently being ignored as they are not supported.
1057 */
1058 return adv_instance->scan_rsp_len;
1059}
1060
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001061static void hci_req_clear_event_filter(struct hci_request *req)
1062{
1063 struct hci_cp_set_event_filter f;
1064
1065 memset(&f, 0, sizeof(f));
1066 f.flt_type = HCI_FLT_CLEAR_ALL;
1067 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1068
1069 /* Update page scan state (since we may have modified it when setting
1070 * the event filter).
1071 */
1072 __hci_req_update_scan(req);
1073}
1074
1075static void hci_req_set_event_filter(struct hci_request *req)
1076{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001077 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001078 struct hci_cp_set_event_filter f;
1079 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001080 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001081
1082 /* Always clear event filter when starting */
1083 hci_req_clear_event_filter(req);
1084
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001085 list_for_each_entry(b, &hdev->whitelist, list) {
1086 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1087 b->current_flags))
1088 continue;
1089
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001090 memset(&f, 0, sizeof(f));
1091 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1092 f.flt_type = HCI_FLT_CONN_SETUP;
1093 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1094 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1095
1096 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1097 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001098 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001099 }
1100
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001101 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1102}
1103
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001104static void hci_req_config_le_suspend_scan(struct hci_request *req)
1105{
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001106 /* Before changing params disable scan if enabled */
1107 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301108 hci_req_add_le_scan_disable(req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001109
1110 /* Configure params and enable scanning */
1111 hci_req_add_le_passive_scan(req);
1112
1113 /* Block suspend notifier on response */
1114 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1115}
1116
Daniel Winkler53274472020-09-15 14:14:27 -07001117static void cancel_adv_timeout(struct hci_dev *hdev)
1118{
1119 if (hdev->adv_instance_timeout) {
1120 hdev->adv_instance_timeout = 0;
1121 cancel_delayed_work(&hdev->adv_instance_expire);
1122 }
1123}
1124
1125/* This function requires the caller holds hdev->lock */
1126static void hci_suspend_adv_instances(struct hci_request *req)
1127{
1128 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1129
1130 /* Call to disable any advertisements active on the controller.
1131 * This will succeed even if no advertisements are configured.
1132 */
1133 __hci_req_disable_advertising(req);
1134
1135 /* If we are using software rotation, pause the loop */
1136 if (!ext_adv_capable(req->hdev))
1137 cancel_adv_timeout(req->hdev);
1138}
1139
1140/* This function requires the caller holds hdev->lock */
1141static void hci_resume_adv_instances(struct hci_request *req)
1142{
1143 struct adv_info *adv;
1144
1145 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1146
1147 if (ext_adv_capable(req->hdev)) {
1148 /* Call for each tracked instance to be re-enabled */
1149 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1150 __hci_req_enable_ext_advertising(req,
1151 adv->instance);
1152 }
1153
1154 } else {
1155 /* Schedule for most recent instance to be restarted and begin
1156 * the software rotation loop
1157 */
1158 __hci_req_schedule_adv_instance(req,
1159 req->hdev->cur_adv_instance,
1160 true);
1161 }
1162}
1163
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001164static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1165{
1166 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1167 status);
1168 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1169 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1170 wake_up(&hdev->suspend_wait_q);
1171 }
1172}
1173
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001174/* Call with hci_dev_lock */
1175void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1176{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001177 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001178 struct hci_conn *conn;
1179 struct hci_request req;
1180 u8 page_scan;
1181 int disconnect_counter;
1182
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001183 if (next == hdev->suspend_state) {
1184 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1185 goto done;
1186 }
1187
1188 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001189 hci_req_init(&req, hdev);
1190
1191 if (next == BT_SUSPEND_DISCONNECT) {
1192 /* Mark device as suspended */
1193 hdev->suspended = true;
1194
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001195 /* Pause discovery if not already stopped */
1196 old_state = hdev->discovery.state;
1197 if (old_state != DISCOVERY_STOPPED) {
1198 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1199 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1200 queue_work(hdev->req_workqueue, &hdev->discov_update);
1201 }
1202
1203 hdev->discovery_paused = true;
1204 hdev->discovery_old_state = old_state;
1205
Daniel Winkler53274472020-09-15 14:14:27 -07001206 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001207 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1208 if (old_state) {
1209 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1210 cancel_delayed_work(&hdev->discov_off);
1211 queue_delayed_work(hdev->req_workqueue,
1212 &hdev->discov_off, 0);
1213 }
1214
Daniel Winkler53274472020-09-15 14:14:27 -07001215 /* Pause other advertisements */
1216 if (hdev->adv_instance_cnt)
1217 hci_suspend_adv_instances(&req);
1218
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001219 hdev->advertising_paused = true;
1220 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001221 /* Disable page scan */
1222 page_scan = SCAN_DISABLED;
1223 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1224
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001225 /* Disable LE passive scan if enabled */
1226 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301227 hci_req_add_le_scan_disable(&req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001228
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001229 /* Mark task needing completion */
1230 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1231
1232 /* Prevent disconnects from causing scanning to be re-enabled */
1233 hdev->scanning_paused = true;
1234
1235 /* Run commands before disconnecting */
1236 hci_req_run(&req, suspend_req_complete);
1237
1238 disconnect_counter = 0;
1239 /* Soft disconnect everything (power off) */
1240 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1241 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1242 disconnect_counter++;
1243 }
1244
1245 if (disconnect_counter > 0) {
1246 bt_dev_dbg(hdev,
1247 "Had %d disconnects. Will wait on them",
1248 disconnect_counter);
1249 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1250 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001251 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001252 /* Unpause to take care of updating scanning params */
1253 hdev->scanning_paused = false;
1254 /* Enable event filter for paired devices */
1255 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001256 /* Enable passive scan at lower duty cycle */
1257 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001258 /* Pause scan changes again. */
1259 hdev->scanning_paused = true;
1260 hci_req_run(&req, suspend_req_complete);
1261 } else {
1262 hdev->suspended = false;
1263 hdev->scanning_paused = false;
1264
1265 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001266 /* Reset passive/background scanning to normal */
1267 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001268
Daniel Winkler53274472020-09-15 14:14:27 -07001269 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001270 hdev->advertising_paused = false;
1271 if (hdev->advertising_old_state) {
1272 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1273 hdev->suspend_tasks);
1274 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1275 queue_work(hdev->req_workqueue,
1276 &hdev->discoverable_update);
1277 hdev->advertising_old_state = 0;
1278 }
1279
Daniel Winkler53274472020-09-15 14:14:27 -07001280 /* Resume other advertisements */
1281 if (hdev->adv_instance_cnt)
1282 hci_resume_adv_instances(&req);
1283
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001284 /* Unpause discovery */
1285 hdev->discovery_paused = false;
1286 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1287 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1288 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1289 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1290 queue_work(hdev->req_workqueue, &hdev->discov_update);
1291 }
1292
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001293 hci_req_run(&req, suspend_req_complete);
1294 }
1295
1296 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001297
1298done:
1299 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1300 wake_up(&hdev->suspend_wait_q);
1301}
1302
Johan Hedbergf2252572015-11-18 12:49:20 +02001303static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1304{
Johan Hedbergcab054a2015-11-30 11:21:45 +02001305 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001306 struct adv_info *adv_instance;
1307
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001308 /* Instance 0x00 always set local name */
Johan Hedbergf2252572015-11-18 12:49:20 +02001309 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001310 return 1;
Johan Hedbergf2252572015-11-18 12:49:20 +02001311
1312 adv_instance = hci_find_adv_instance(hdev, instance);
1313 if (!adv_instance)
1314 return 0;
1315
1316 /* TODO: Take into account the "appearance" and "local-name" flags here.
1317 * These are currently being ignored as they are not supported.
1318 */
1319 return adv_instance->scan_rsp_len;
1320}
1321
1322void __hci_req_disable_advertising(struct hci_request *req)
1323{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301324 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001325 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001326
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301327 } else {
1328 u8 enable = 0x00;
1329
1330 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1331 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001332}
1333
1334static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1335{
1336 u32 flags;
1337 struct adv_info *adv_instance;
1338
1339 if (instance == 0x00) {
1340 /* Instance 0 always manages the "Tx Power" and "Flags"
1341 * fields
1342 */
1343 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1344
1345 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1346 * corresponds to the "connectable" instance flag.
1347 */
1348 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1349 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1350
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001351 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1352 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1353 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001354 flags |= MGMT_ADV_FLAG_DISCOV;
1355
Johan Hedbergf2252572015-11-18 12:49:20 +02001356 return flags;
1357 }
1358
1359 adv_instance = hci_find_adv_instance(hdev, instance);
1360
1361 /* Return 0 when we got an invalid instance identifier. */
1362 if (!adv_instance)
1363 return 0;
1364
1365 return adv_instance->flags;
1366}
1367
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001368static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1369{
1370 /* If privacy is not enabled don't use RPA */
1371 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1372 return false;
1373
1374 /* If basic privacy mode is enabled use RPA */
1375 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1376 return true;
1377
1378 /* If limited privacy mode is enabled don't use RPA if we're
1379 * both discoverable and bondable.
1380 */
1381 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1382 hci_dev_test_flag(hdev, HCI_BONDABLE))
1383 return false;
1384
1385 /* We're neither bondable nor discoverable in the limited
1386 * privacy mode, therefore use RPA.
1387 */
1388 return true;
1389}
1390
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001391static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1392{
1393 /* If there is no connection we are OK to advertise. */
1394 if (hci_conn_num(hdev, LE_LINK) == 0)
1395 return true;
1396
1397 /* Check le_states if there is any connection in slave role. */
1398 if (hdev->conn_hash.le_num_slave > 0) {
1399 /* Slave connection state and non connectable mode bit 20. */
1400 if (!connectable && !(hdev->le_states[2] & 0x10))
1401 return false;
1402
1403 /* Slave connection state and connectable mode bit 38
1404 * and scannable bit 21.
1405 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001406 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1407 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001408 return false;
1409 }
1410
1411 /* Check le_states if there is any connection in master role. */
1412 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1413 /* Master connection state and non connectable mode bit 18. */
1414 if (!connectable && !(hdev->le_states[2] & 0x02))
1415 return false;
1416
1417 /* Master connection state and connectable mode bit 35 and
1418 * scannable 19.
1419 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001420 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001421 !(hdev->le_states[2] & 0x08)))
1422 return false;
1423 }
1424
1425 return true;
1426}
1427
Johan Hedbergf2252572015-11-18 12:49:20 +02001428void __hci_req_enable_advertising(struct hci_request *req)
1429{
1430 struct hci_dev *hdev = req->hdev;
1431 struct hci_cp_le_set_adv_param cp;
1432 u8 own_addr_type, enable = 0x01;
1433 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301434 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001435 u32 flags;
1436
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001437 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1438
1439 /* If the "connectable" instance flag was not set, then choose between
1440 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1441 */
1442 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1443 mgmt_get_connectable(hdev);
1444
1445 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001446 return;
1447
1448 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1449 __hci_req_disable_advertising(req);
1450
1451 /* Clear the HCI_LE_ADV bit temporarily so that the
1452 * hci_update_random_address knows that it's safe to go ahead
1453 * and write a new random address. The flag will be set back on
1454 * as soon as the SET_ADV_ENABLE HCI command completes.
1455 */
1456 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1457
Johan Hedbergf2252572015-11-18 12:49:20 +02001458 /* Set require_privacy to true only when non-connectable
1459 * advertising is used. In that case it is fine to use a
1460 * non-resolvable private address.
1461 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001462 if (hci_update_random_address(req, !connectable,
1463 adv_use_rpa(hdev, flags),
1464 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001465 return;
1466
1467 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001468
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301469 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001470 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001471
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301472 adv_min_interval = hdev->le_adv_min_interval;
1473 adv_max_interval = hdev->le_adv_max_interval;
1474 } else {
1475 if (get_cur_adv_instance_scan_rsp_len(hdev))
1476 cp.type = LE_ADV_SCAN_IND;
1477 else
1478 cp.type = LE_ADV_NONCONN_IND;
1479
1480 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1481 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1482 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1483 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1484 } else {
1485 adv_min_interval = hdev->le_adv_min_interval;
1486 adv_max_interval = hdev->le_adv_max_interval;
1487 }
1488 }
1489
1490 cp.min_interval = cpu_to_le16(adv_min_interval);
1491 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001492 cp.own_address_type = own_addr_type;
1493 cp.channel_map = hdev->le_adv_channel_map;
1494
1495 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1496
1497 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1498}
1499
Michał Narajowskif61851f2016-10-19 10:20:27 +02001500u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001501{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001502 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001503 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001504
Michał Narajowskif61851f2016-10-19 10:20:27 +02001505 /* no space left for name (+ NULL + type + len) */
1506 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1507 return ad_len;
1508
1509 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001510 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001511 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001512 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001513 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001514
Michał Narajowskif61851f2016-10-19 10:20:27 +02001515 /* use short name if present */
1516 short_len = strlen(hdev->short_name);
1517 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001518 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001519 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001520
Michał Narajowskif61851f2016-10-19 10:20:27 +02001521 /* use shortened full name if present, we already know that name
1522 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1523 */
1524 if (complete_len) {
1525 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1526
1527 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1528 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1529
1530 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1531 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001532 }
1533
1534 return ad_len;
1535}
1536
Michał Narajowski1b422062016-10-05 12:28:27 +02001537static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1538{
1539 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1540}
1541
Michał Narajowski7c295c42016-09-18 12:50:02 +02001542static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1543{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001544 u8 scan_rsp_len = 0;
1545
1546 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001547 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001548 }
1549
Michał Narajowski1b422062016-10-05 12:28:27 +02001550 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001551}
1552
Johan Hedbergf2252572015-11-18 12:49:20 +02001553static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1554 u8 *ptr)
1555{
1556 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001557 u32 instance_flags;
1558 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001559
1560 adv_instance = hci_find_adv_instance(hdev, instance);
1561 if (!adv_instance)
1562 return 0;
1563
Michał Narajowski7c295c42016-09-18 12:50:02 +02001564 instance_flags = adv_instance->flags;
1565
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001566 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001567 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001568 }
1569
Michał Narajowski1b422062016-10-05 12:28:27 +02001570 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001571 adv_instance->scan_rsp_len);
1572
Michał Narajowski7c295c42016-09-18 12:50:02 +02001573 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001574
1575 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1576 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1577
1578 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001579}
1580
Johan Hedbergcab054a2015-11-30 11:21:45 +02001581void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001582{
1583 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001584 u8 len;
1585
1586 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1587 return;
1588
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301589 if (ext_adv_capable(hdev)) {
1590 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001591
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301592 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001593
Abhishek Pandit-Subedi6baf8a62020-09-11 15:32:20 -07001594 /* Extended scan response data doesn't allow a response to be
1595 * set if the instance isn't scannable.
1596 */
1597 if (get_adv_instance_scan_rsp_len(hdev, instance))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301598 len = create_instance_scan_rsp_data(hdev, instance,
1599 cp.data);
1600 else
Abhishek Pandit-Subedi6baf8a62020-09-11 15:32:20 -07001601 len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001602
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301603 if (hdev->scan_rsp_data_len == len &&
1604 !memcmp(cp.data, hdev->scan_rsp_data, len))
1605 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001606
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301607 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1608 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001609
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001610 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301611 cp.length = len;
1612 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1613 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1614
1615 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1616 &cp);
1617 } else {
1618 struct hci_cp_le_set_scan_rsp_data cp;
1619
1620 memset(&cp, 0, sizeof(cp));
1621
1622 if (instance)
1623 len = create_instance_scan_rsp_data(hdev, instance,
1624 cp.data);
1625 else
1626 len = create_default_scan_rsp_data(hdev, cp.data);
1627
1628 if (hdev->scan_rsp_data_len == len &&
1629 !memcmp(cp.data, hdev->scan_rsp_data, len))
1630 return;
1631
1632 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1633 hdev->scan_rsp_data_len = len;
1634
1635 cp.length = len;
1636
1637 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1638 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001639}
1640
Johan Hedbergf2252572015-11-18 12:49:20 +02001641static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1642{
1643 struct adv_info *adv_instance = NULL;
1644 u8 ad_len = 0, flags = 0;
1645 u32 instance_flags;
1646
1647 /* Return 0 when the current instance identifier is invalid. */
1648 if (instance) {
1649 adv_instance = hci_find_adv_instance(hdev, instance);
1650 if (!adv_instance)
1651 return 0;
1652 }
1653
1654 instance_flags = get_adv_instance_flags(hdev, instance);
1655
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001656 /* If instance already has the flags set skip adding it once
1657 * again.
1658 */
1659 if (adv_instance && eir_get_data(adv_instance->adv_data,
1660 adv_instance->adv_data_len, EIR_FLAGS,
1661 NULL))
1662 goto skip_flags;
1663
Johan Hedbergf2252572015-11-18 12:49:20 +02001664 /* The Add Advertising command allows userspace to set both the general
1665 * and limited discoverable flags.
1666 */
1667 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1668 flags |= LE_AD_GENERAL;
1669
1670 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1671 flags |= LE_AD_LIMITED;
1672
Johan Hedbergf18ba582016-04-06 13:09:05 +03001673 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1674 flags |= LE_AD_NO_BREDR;
1675
Johan Hedbergf2252572015-11-18 12:49:20 +02001676 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1677 /* If a discovery flag wasn't provided, simply use the global
1678 * settings.
1679 */
1680 if (!flags)
1681 flags |= mgmt_get_adv_discov_flags(hdev);
1682
Johan Hedbergf2252572015-11-18 12:49:20 +02001683 /* If flags would still be empty, then there is no need to
1684 * include the "Flags" AD field".
1685 */
1686 if (flags) {
1687 ptr[0] = 0x02;
1688 ptr[1] = EIR_FLAGS;
1689 ptr[2] = flags;
1690
1691 ad_len += 3;
1692 ptr += 3;
1693 }
1694 }
1695
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001696skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001697 if (adv_instance) {
1698 memcpy(ptr, adv_instance->adv_data,
1699 adv_instance->adv_data_len);
1700 ad_len += adv_instance->adv_data_len;
1701 ptr += adv_instance->adv_data_len;
1702 }
1703
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301704 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1705 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001706
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301707 if (ext_adv_capable(hdev)) {
1708 if (adv_instance)
1709 adv_tx_power = adv_instance->tx_power;
1710 else
1711 adv_tx_power = hdev->adv_tx_power;
1712 } else {
1713 adv_tx_power = hdev->adv_tx_power;
1714 }
1715
1716 /* Provide Tx Power only if we can provide a valid value for it */
1717 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1718 ptr[0] = 0x02;
1719 ptr[1] = EIR_TX_POWER;
1720 ptr[2] = (u8)adv_tx_power;
1721
1722 ad_len += 3;
1723 ptr += 3;
1724 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001725 }
1726
1727 return ad_len;
1728}
1729
Johan Hedbergcab054a2015-11-30 11:21:45 +02001730void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001731{
1732 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001733 u8 len;
1734
1735 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1736 return;
1737
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301738 if (ext_adv_capable(hdev)) {
1739 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001740
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301741 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001742
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301743 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001744
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301745 /* There's nothing to do if the data hasn't changed */
1746 if (hdev->adv_data_len == len &&
1747 memcmp(cp.data, hdev->adv_data, len) == 0)
1748 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001749
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301750 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1751 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001752
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301753 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001754 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301755 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1756 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1757
1758 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1759 } else {
1760 struct hci_cp_le_set_adv_data cp;
1761
1762 memset(&cp, 0, sizeof(cp));
1763
1764 len = create_instance_adv_data(hdev, instance, cp.data);
1765
1766 /* There's nothing to do if the data hasn't changed */
1767 if (hdev->adv_data_len == len &&
1768 memcmp(cp.data, hdev->adv_data, len) == 0)
1769 return;
1770
1771 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1772 hdev->adv_data_len = len;
1773
1774 cp.length = len;
1775
1776 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1777 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001778}
1779
Johan Hedbergcab054a2015-11-30 11:21:45 +02001780int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001781{
1782 struct hci_request req;
1783
1784 hci_req_init(&req, hdev);
1785 __hci_req_update_adv_data(&req, instance);
1786
1787 return hci_req_run(&req, NULL);
1788}
1789
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301790static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1791 u16 opcode)
1792{
1793 BT_DBG("%s status %u", hdev->name, status);
1794}
1795
1796void hci_req_disable_address_resolution(struct hci_dev *hdev)
1797{
1798 struct hci_request req;
1799 __u8 enable = 0x00;
1800
1801 if (!use_ll_privacy(hdev) &&
1802 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1803 return;
1804
1805 hci_req_init(&req, hdev);
1806
1807 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1808
1809 hci_req_run(&req, enable_addr_resolution_complete);
1810}
1811
Johan Hedbergf2252572015-11-18 12:49:20 +02001812static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1813{
1814 BT_DBG("%s status %u", hdev->name, status);
1815}
1816
1817void hci_req_reenable_advertising(struct hci_dev *hdev)
1818{
1819 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001820
1821 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001822 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001823 return;
1824
Johan Hedbergf2252572015-11-18 12:49:20 +02001825 hci_req_init(&req, hdev);
1826
Johan Hedbergcab054a2015-11-30 11:21:45 +02001827 if (hdev->cur_adv_instance) {
1828 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1829 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001830 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301831 if (ext_adv_capable(hdev)) {
1832 __hci_req_start_ext_adv(&req, 0x00);
1833 } else {
1834 __hci_req_update_adv_data(&req, 0x00);
1835 __hci_req_update_scan_rsp_data(&req, 0x00);
1836 __hci_req_enable_advertising(&req);
1837 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001838 }
1839
1840 hci_req_run(&req, adv_enable_complete);
1841}
1842
1843static void adv_timeout_expire(struct work_struct *work)
1844{
1845 struct hci_dev *hdev = container_of(work, struct hci_dev,
1846 adv_instance_expire.work);
1847
1848 struct hci_request req;
1849 u8 instance;
1850
1851 BT_DBG("%s", hdev->name);
1852
1853 hci_dev_lock(hdev);
1854
1855 hdev->adv_instance_timeout = 0;
1856
Johan Hedbergcab054a2015-11-30 11:21:45 +02001857 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001858 if (instance == 0x00)
1859 goto unlock;
1860
1861 hci_req_init(&req, hdev);
1862
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001863 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001864
1865 if (list_empty(&hdev->adv_instances))
1866 __hci_req_disable_advertising(&req);
1867
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001868 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001869
1870unlock:
1871 hci_dev_unlock(hdev);
1872}
1873
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301874int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1875 bool use_rpa, struct adv_info *adv_instance,
1876 u8 *own_addr_type, bdaddr_t *rand_addr)
1877{
1878 int err;
1879
1880 bacpy(rand_addr, BDADDR_ANY);
1881
1882 /* If privacy is enabled use a resolvable private address. If
1883 * current RPA has expired then generate a new one.
1884 */
1885 if (use_rpa) {
1886 int to;
1887
1888 *own_addr_type = ADDR_LE_DEV_RANDOM;
1889
1890 if (adv_instance) {
1891 if (!adv_instance->rpa_expired &&
1892 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1893 return 0;
1894
1895 adv_instance->rpa_expired = false;
1896 } else {
1897 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1898 !bacmp(&hdev->random_addr, &hdev->rpa))
1899 return 0;
1900 }
1901
1902 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1903 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001904 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301905 return err;
1906 }
1907
1908 bacpy(rand_addr, &hdev->rpa);
1909
1910 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1911 if (adv_instance)
1912 queue_delayed_work(hdev->workqueue,
1913 &adv_instance->rpa_expired_cb, to);
1914 else
1915 queue_delayed_work(hdev->workqueue,
1916 &hdev->rpa_expired, to);
1917
1918 return 0;
1919 }
1920
1921 /* In case of required privacy without resolvable private address,
1922 * use an non-resolvable private address. This is useful for
1923 * non-connectable advertising.
1924 */
1925 if (require_privacy) {
1926 bdaddr_t nrpa;
1927
1928 while (true) {
1929 /* The non-resolvable private address is generated
1930 * from random six bytes with the two most significant
1931 * bits cleared.
1932 */
1933 get_random_bytes(&nrpa, 6);
1934 nrpa.b[5] &= 0x3f;
1935
1936 /* The non-resolvable private address shall not be
1937 * equal to the public address.
1938 */
1939 if (bacmp(&hdev->bdaddr, &nrpa))
1940 break;
1941 }
1942
1943 *own_addr_type = ADDR_LE_DEV_RANDOM;
1944 bacpy(rand_addr, &nrpa);
1945
1946 return 0;
1947 }
1948
1949 /* No privacy so use a public address. */
1950 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1951
1952 return 0;
1953}
1954
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301955void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1956{
1957 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1958}
1959
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301960int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301961{
1962 struct hci_cp_le_set_ext_adv_params cp;
1963 struct hci_dev *hdev = req->hdev;
1964 bool connectable;
1965 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301966 bdaddr_t random_addr;
1967 u8 own_addr_type;
1968 int err;
1969 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301970 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301971
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301972 if (instance > 0) {
1973 adv_instance = hci_find_adv_instance(hdev, instance);
1974 if (!adv_instance)
1975 return -EINVAL;
1976 } else {
1977 adv_instance = NULL;
1978 }
1979
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301980 flags = get_adv_instance_flags(hdev, instance);
1981
1982 /* If the "connectable" instance flag was not set, then choose between
1983 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1984 */
1985 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1986 mgmt_get_connectable(hdev);
1987
Colin Ian King75edd1f2018-11-09 13:27:36 +00001988 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301989 return -EPERM;
1990
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301991 /* Set require_privacy to true only when non-connectable
1992 * advertising is used. In that case it is fine to use a
1993 * non-resolvable private address.
1994 */
1995 err = hci_get_random_address(hdev, !connectable,
1996 adv_use_rpa(hdev, flags), adv_instance,
1997 &own_addr_type, &random_addr);
1998 if (err < 0)
1999 return err;
2000
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302001 memset(&cp, 0, sizeof(cp));
2002
Alain Michaud5cbd3eb2020-06-22 13:30:28 +00002003 /* In ext adv set param interval is 3 octets */
2004 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2005 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302006
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302007 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2008
2009 if (connectable) {
2010 if (secondary_adv)
2011 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2012 else
2013 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2014 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2015 if (secondary_adv)
2016 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2017 else
2018 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2019 } else {
2020 if (secondary_adv)
2021 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2022 else
2023 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2024 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302025
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302026 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302027 cp.channel_map = hdev->le_adv_channel_map;
2028 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002029 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302030
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302031 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2032 cp.primary_phy = HCI_ADV_PHY_1M;
2033 cp.secondary_phy = HCI_ADV_PHY_2M;
2034 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2035 cp.primary_phy = HCI_ADV_PHY_CODED;
2036 cp.secondary_phy = HCI_ADV_PHY_CODED;
2037 } else {
2038 /* In all other cases use 1M */
2039 cp.primary_phy = HCI_ADV_PHY_1M;
2040 cp.secondary_phy = HCI_ADV_PHY_1M;
2041 }
2042
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302043 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2044
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302045 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2046 bacmp(&random_addr, BDADDR_ANY)) {
2047 struct hci_cp_le_set_adv_set_rand_addr cp;
2048
2049 /* Check if random address need to be updated */
2050 if (adv_instance) {
2051 if (!bacmp(&random_addr, &adv_instance->random_addr))
2052 return 0;
2053 } else {
2054 if (!bacmp(&random_addr, &hdev->random_addr))
2055 return 0;
2056 }
2057
2058 memset(&cp, 0, sizeof(cp));
2059
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07002060 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302061 bacpy(&cp.bdaddr, &random_addr);
2062
2063 hci_req_add(req,
2064 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2065 sizeof(cp), &cp);
2066 }
2067
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302068 return 0;
2069}
2070
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002071int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302072{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002073 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302074 struct hci_cp_le_set_ext_adv_enable *cp;
2075 struct hci_cp_ext_adv_set *adv_set;
2076 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002077 struct adv_info *adv_instance;
2078
2079 if (instance > 0) {
2080 adv_instance = hci_find_adv_instance(hdev, instance);
2081 if (!adv_instance)
2082 return -EINVAL;
2083 } else {
2084 adv_instance = NULL;
2085 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302086
2087 cp = (void *) data;
2088 adv_set = (void *) cp->data;
2089
2090 memset(cp, 0, sizeof(*cp));
2091
2092 cp->enable = 0x01;
2093 cp->num_of_sets = 0x01;
2094
2095 memset(adv_set, 0, sizeof(*adv_set));
2096
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002097 adv_set->handle = instance;
2098
2099 /* Set duration per instance since controller is responsible for
2100 * scheduling it.
2101 */
2102 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03002103 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002104
2105 /* Time = N * 10 ms */
2106 adv_set->duration = cpu_to_le16(duration / 10);
2107 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302108
2109 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2110 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2111 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002112
2113 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302114}
2115
Daniel Winkler37adf702020-07-14 14:16:00 -07002116int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2117{
2118 struct hci_dev *hdev = req->hdev;
2119 struct hci_cp_le_set_ext_adv_enable *cp;
2120 struct hci_cp_ext_adv_set *adv_set;
2121 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2122 u8 req_size;
2123
2124 /* If request specifies an instance that doesn't exist, fail */
2125 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2126 return -EINVAL;
2127
2128 memset(data, 0, sizeof(data));
2129
2130 cp = (void *)data;
2131 adv_set = (void *)cp->data;
2132
2133 /* Instance 0x00 indicates all advertising instances will be disabled */
2134 cp->num_of_sets = !!instance;
2135 cp->enable = 0x00;
2136
2137 adv_set->handle = instance;
2138
2139 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2140 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2141
2142 return 0;
2143}
2144
2145int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2146{
2147 struct hci_dev *hdev = req->hdev;
2148
2149 /* If request specifies an instance that doesn't exist, fail */
2150 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2151 return -EINVAL;
2152
2153 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2154
2155 return 0;
2156}
2157
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302158int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2159{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302160 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002161 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302162 int err;
2163
Daniel Winkler37adf702020-07-14 14:16:00 -07002164 /* If instance isn't pending, the chip knows about it, and it's safe to
2165 * disable
2166 */
2167 if (adv_instance && !adv_instance->pending)
2168 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302169
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302170 err = __hci_req_setup_ext_adv_instance(req, instance);
2171 if (err < 0)
2172 return err;
2173
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302174 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002175 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302176
2177 return 0;
2178}
2179
Johan Hedbergf2252572015-11-18 12:49:20 +02002180int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2181 bool force)
2182{
2183 struct hci_dev *hdev = req->hdev;
2184 struct adv_info *adv_instance = NULL;
2185 u16 timeout;
2186
2187 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002188 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002189 return -EPERM;
2190
2191 if (hdev->adv_instance_timeout)
2192 return -EBUSY;
2193
2194 adv_instance = hci_find_adv_instance(hdev, instance);
2195 if (!adv_instance)
2196 return -ENOENT;
2197
2198 /* A zero timeout means unlimited advertising. As long as there is
2199 * only one instance, duration should be ignored. We still set a timeout
2200 * in case further instances are being added later on.
2201 *
2202 * If the remaining lifetime of the instance is more than the duration
2203 * then the timeout corresponds to the duration, otherwise it will be
2204 * reduced to the remaining instance lifetime.
2205 */
2206 if (adv_instance->timeout == 0 ||
2207 adv_instance->duration <= adv_instance->remaining_time)
2208 timeout = adv_instance->duration;
2209 else
2210 timeout = adv_instance->remaining_time;
2211
2212 /* The remaining time is being reduced unless the instance is being
2213 * advertised without time limit.
2214 */
2215 if (adv_instance->timeout)
2216 adv_instance->remaining_time =
2217 adv_instance->remaining_time - timeout;
2218
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002219 /* Only use work for scheduling instances with legacy advertising */
2220 if (!ext_adv_capable(hdev)) {
2221 hdev->adv_instance_timeout = timeout;
2222 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002223 &hdev->adv_instance_expire,
2224 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002225 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002226
2227 /* If we're just re-scheduling the same instance again then do not
2228 * execute any HCI commands. This happens when a single instance is
2229 * being advertised.
2230 */
2231 if (!force && hdev->cur_adv_instance == instance &&
2232 hci_dev_test_flag(hdev, HCI_LE_ADV))
2233 return 0;
2234
2235 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302236 if (ext_adv_capable(hdev)) {
2237 __hci_req_start_ext_adv(req, instance);
2238 } else {
2239 __hci_req_update_adv_data(req, instance);
2240 __hci_req_update_scan_rsp_data(req, instance);
2241 __hci_req_enable_advertising(req);
2242 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002243
2244 return 0;
2245}
2246
Johan Hedbergf2252572015-11-18 12:49:20 +02002247/* For a single instance:
2248 * - force == true: The instance will be removed even when its remaining
2249 * lifetime is not zero.
2250 * - force == false: the instance will be deactivated but kept stored unless
2251 * the remaining lifetime is zero.
2252 *
2253 * For instance == 0x00:
2254 * - force == true: All instances will be removed regardless of their timeout
2255 * setting.
2256 * - force == false: Only instances that have a timeout will be removed.
2257 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002258void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2259 struct hci_request *req, u8 instance,
2260 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002261{
2262 struct adv_info *adv_instance, *n, *next_instance = NULL;
2263 int err;
2264 u8 rem_inst;
2265
2266 /* Cancel any timeout concerning the removed instance(s). */
2267 if (!instance || hdev->cur_adv_instance == instance)
2268 cancel_adv_timeout(hdev);
2269
2270 /* Get the next instance to advertise BEFORE we remove
2271 * the current one. This can be the same instance again
2272 * if there is only one instance.
2273 */
2274 if (instance && hdev->cur_adv_instance == instance)
2275 next_instance = hci_get_next_instance(hdev, instance);
2276
2277 if (instance == 0x00) {
2278 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2279 list) {
2280 if (!(force || adv_instance->timeout))
2281 continue;
2282
2283 rem_inst = adv_instance->instance;
2284 err = hci_remove_adv_instance(hdev, rem_inst);
2285 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002286 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002287 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002288 } else {
2289 adv_instance = hci_find_adv_instance(hdev, instance);
2290
2291 if (force || (adv_instance && adv_instance->timeout &&
2292 !adv_instance->remaining_time)) {
2293 /* Don't advertise a removed instance. */
2294 if (next_instance &&
2295 next_instance->instance == instance)
2296 next_instance = NULL;
2297
2298 err = hci_remove_adv_instance(hdev, instance);
2299 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002300 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002301 }
2302 }
2303
Johan Hedbergf2252572015-11-18 12:49:20 +02002304 if (!req || !hdev_is_powered(hdev) ||
2305 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2306 return;
2307
Daniel Winkler37adf702020-07-14 14:16:00 -07002308 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002309 __hci_req_schedule_adv_instance(req, next_instance->instance,
2310 false);
2311}
2312
Johan Hedberg0857dd32014-12-19 13:40:20 +02002313static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2314{
2315 struct hci_dev *hdev = req->hdev;
2316
2317 /* If we're advertising or initiating an LE connection we can't
2318 * go ahead and change the random address at this time. This is
2319 * because the eventual initiator address used for the
2320 * subsequently created connection will be undefined (some
2321 * controllers use the new address and others the one we had
2322 * when the operation started).
2323 *
2324 * In this kind of scenario skip the update and let the random
2325 * address be updated at the next cycle.
2326 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002327 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002328 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002329 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002330 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002331 return;
2332 }
2333
2334 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2335}
2336
2337int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002338 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002339{
2340 struct hci_dev *hdev = req->hdev;
2341 int err;
2342
2343 /* If privacy is enabled use a resolvable private address. If
2344 * current RPA has expired or there is something else than
2345 * the current RPA in use, then generate a new one.
2346 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002347 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002348 int to;
2349
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302350 /* If Controller supports LL Privacy use own address type is
2351 * 0x03
2352 */
2353 if (use_ll_privacy(hdev))
2354 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2355 else
2356 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002357
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002358 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002359 !bacmp(&hdev->random_addr, &hdev->rpa))
2360 return 0;
2361
2362 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2363 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002364 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002365 return err;
2366 }
2367
2368 set_random_addr(req, &hdev->rpa);
2369
2370 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2371 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2372
2373 return 0;
2374 }
2375
2376 /* In case of required privacy without resolvable private address,
2377 * use an non-resolvable private address. This is useful for active
2378 * scanning and non-connectable advertising.
2379 */
2380 if (require_privacy) {
2381 bdaddr_t nrpa;
2382
2383 while (true) {
2384 /* The non-resolvable private address is generated
2385 * from random six bytes with the two most significant
2386 * bits cleared.
2387 */
2388 get_random_bytes(&nrpa, 6);
2389 nrpa.b[5] &= 0x3f;
2390
2391 /* The non-resolvable private address shall not be
2392 * equal to the public address.
2393 */
2394 if (bacmp(&hdev->bdaddr, &nrpa))
2395 break;
2396 }
2397
2398 *own_addr_type = ADDR_LE_DEV_RANDOM;
2399 set_random_addr(req, &nrpa);
2400 return 0;
2401 }
2402
2403 /* If forcing static address is in use or there is no public
2404 * address use the static address as random address (but skip
2405 * the HCI command if the current random address is already the
2406 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002407 *
2408 * In case BR/EDR has been disabled on a dual-mode controller
2409 * and a static address has been configured, then use that
2410 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002411 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002412 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002413 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002414 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002415 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002416 *own_addr_type = ADDR_LE_DEV_RANDOM;
2417 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2418 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2419 &hdev->static_addr);
2420 return 0;
2421 }
2422
2423 /* Neither privacy nor static address is being used so use a
2424 * public address.
2425 */
2426 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2427
2428 return 0;
2429}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002430
Johan Hedberg405a2612014-12-19 23:18:22 +02002431static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2432{
2433 struct bdaddr_list *b;
2434
2435 list_for_each_entry(b, &hdev->whitelist, list) {
2436 struct hci_conn *conn;
2437
2438 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2439 if (!conn)
2440 return true;
2441
2442 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2443 return true;
2444 }
2445
2446 return false;
2447}
2448
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002449void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002450{
2451 struct hci_dev *hdev = req->hdev;
2452 u8 scan;
2453
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002454 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002455 return;
2456
2457 if (!hdev_is_powered(hdev))
2458 return;
2459
2460 if (mgmt_powering_down(hdev))
2461 return;
2462
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002463 if (hdev->scanning_paused)
2464 return;
2465
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002466 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002467 disconnected_whitelist_entries(hdev))
2468 scan = SCAN_PAGE;
2469 else
2470 scan = SCAN_DISABLED;
2471
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002472 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002473 scan |= SCAN_INQUIRY;
2474
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002475 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2476 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2477 return;
2478
Johan Hedberg405a2612014-12-19 23:18:22 +02002479 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2480}
2481
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002482static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002483{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002484 hci_dev_lock(req->hdev);
2485 __hci_req_update_scan(req);
2486 hci_dev_unlock(req->hdev);
2487 return 0;
2488}
Johan Hedberg405a2612014-12-19 23:18:22 +02002489
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002490static void scan_update_work(struct work_struct *work)
2491{
2492 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2493
2494 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002495}
2496
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002497static int connectable_update(struct hci_request *req, unsigned long opt)
2498{
2499 struct hci_dev *hdev = req->hdev;
2500
2501 hci_dev_lock(hdev);
2502
2503 __hci_req_update_scan(req);
2504
2505 /* If BR/EDR is not enabled and we disable advertising as a
2506 * by-product of disabling connectable, we need to update the
2507 * advertising flags.
2508 */
2509 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002510 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002511
2512 /* Update the advertising parameters if necessary */
2513 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302514 !list_empty(&hdev->adv_instances)) {
2515 if (ext_adv_capable(hdev))
2516 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2517 else
2518 __hci_req_enable_advertising(req);
2519 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002520
2521 __hci_update_background_scan(req);
2522
2523 hci_dev_unlock(hdev);
2524
2525 return 0;
2526}
2527
2528static void connectable_update_work(struct work_struct *work)
2529{
2530 struct hci_dev *hdev = container_of(work, struct hci_dev,
2531 connectable_update);
2532 u8 status;
2533
2534 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2535 mgmt_set_connectable_complete(hdev, status);
2536}
2537
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002538static u8 get_service_classes(struct hci_dev *hdev)
2539{
2540 struct bt_uuid *uuid;
2541 u8 val = 0;
2542
2543 list_for_each_entry(uuid, &hdev->uuids, list)
2544 val |= uuid->svc_hint;
2545
2546 return val;
2547}
2548
2549void __hci_req_update_class(struct hci_request *req)
2550{
2551 struct hci_dev *hdev = req->hdev;
2552 u8 cod[3];
2553
2554 BT_DBG("%s", hdev->name);
2555
2556 if (!hdev_is_powered(hdev))
2557 return;
2558
2559 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2560 return;
2561
2562 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2563 return;
2564
2565 cod[0] = hdev->minor_class;
2566 cod[1] = hdev->major_class;
2567 cod[2] = get_service_classes(hdev);
2568
2569 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2570 cod[1] |= 0x20;
2571
2572 if (memcmp(cod, hdev->dev_class, 3) == 0)
2573 return;
2574
2575 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2576}
2577
Johan Hedbergaed1a882015-11-22 17:24:44 +03002578static void write_iac(struct hci_request *req)
2579{
2580 struct hci_dev *hdev = req->hdev;
2581 struct hci_cp_write_current_iac_lap cp;
2582
2583 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2584 return;
2585
2586 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2587 /* Limited discoverable mode */
2588 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2589 cp.iac_lap[0] = 0x00; /* LIAC */
2590 cp.iac_lap[1] = 0x8b;
2591 cp.iac_lap[2] = 0x9e;
2592 cp.iac_lap[3] = 0x33; /* GIAC */
2593 cp.iac_lap[4] = 0x8b;
2594 cp.iac_lap[5] = 0x9e;
2595 } else {
2596 /* General discoverable mode */
2597 cp.num_iac = 1;
2598 cp.iac_lap[0] = 0x33; /* GIAC */
2599 cp.iac_lap[1] = 0x8b;
2600 cp.iac_lap[2] = 0x9e;
2601 }
2602
2603 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2604 (cp.num_iac * 3) + 1, &cp);
2605}
2606
2607static int discoverable_update(struct hci_request *req, unsigned long opt)
2608{
2609 struct hci_dev *hdev = req->hdev;
2610
2611 hci_dev_lock(hdev);
2612
2613 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2614 write_iac(req);
2615 __hci_req_update_scan(req);
2616 __hci_req_update_class(req);
2617 }
2618
2619 /* Advertising instances don't use the global discoverable setting, so
2620 * only update AD if advertising was enabled using Set Advertising.
2621 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002622 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002623 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002624
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002625 /* Discoverable mode affects the local advertising
2626 * address in limited privacy mode.
2627 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302628 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2629 if (ext_adv_capable(hdev))
2630 __hci_req_start_ext_adv(req, 0x00);
2631 else
2632 __hci_req_enable_advertising(req);
2633 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002634 }
2635
Johan Hedbergaed1a882015-11-22 17:24:44 +03002636 hci_dev_unlock(hdev);
2637
2638 return 0;
2639}
2640
2641static void discoverable_update_work(struct work_struct *work)
2642{
2643 struct hci_dev *hdev = container_of(work, struct hci_dev,
2644 discoverable_update);
2645 u8 status;
2646
2647 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2648 mgmt_set_discoverable_complete(hdev, status);
2649}
2650
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002651void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2652 u8 reason)
2653{
2654 switch (conn->state) {
2655 case BT_CONNECTED:
2656 case BT_CONFIG:
2657 if (conn->type == AMP_LINK) {
2658 struct hci_cp_disconn_phy_link cp;
2659
2660 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2661 cp.reason = reason;
2662 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2663 &cp);
2664 } else {
2665 struct hci_cp_disconnect dc;
2666
2667 dc.handle = cpu_to_le16(conn->handle);
2668 dc.reason = reason;
2669 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2670 }
2671
2672 conn->state = BT_DISCONN;
2673
2674 break;
2675 case BT_CONNECT:
2676 if (conn->type == LE_LINK) {
2677 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2678 break;
2679 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2680 0, NULL);
2681 } else if (conn->type == ACL_LINK) {
2682 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2683 break;
2684 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2685 6, &conn->dst);
2686 }
2687 break;
2688 case BT_CONNECT2:
2689 if (conn->type == ACL_LINK) {
2690 struct hci_cp_reject_conn_req rej;
2691
2692 bacpy(&rej.bdaddr, &conn->dst);
2693 rej.reason = reason;
2694
2695 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2696 sizeof(rej), &rej);
2697 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2698 struct hci_cp_reject_sync_conn_req rej;
2699
2700 bacpy(&rej.bdaddr, &conn->dst);
2701
2702 /* SCO rejection has its own limited set of
2703 * allowed error values (0x0D-0x0F) which isn't
2704 * compatible with most values passed to this
2705 * function. To be safe hard-code one of the
2706 * values that's suitable for SCO.
2707 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002708 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002709
2710 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2711 sizeof(rej), &rej);
2712 }
2713 break;
2714 default:
2715 conn->state = BT_CLOSED;
2716 break;
2717 }
2718}
2719
2720static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2721{
2722 if (status)
2723 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2724}
2725
2726int hci_abort_conn(struct hci_conn *conn, u8 reason)
2727{
2728 struct hci_request req;
2729 int err;
2730
2731 hci_req_init(&req, conn->hdev);
2732
2733 __hci_abort_conn(&req, conn, reason);
2734
2735 err = hci_req_run(&req, abort_conn_complete);
2736 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002737 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002738 return err;
2739 }
2740
2741 return 0;
2742}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002743
Johan Hedberga1d01db2015-11-11 08:11:25 +02002744static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002745{
2746 hci_dev_lock(req->hdev);
2747 __hci_update_background_scan(req);
2748 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002749 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002750}
2751
2752static void bg_scan_update(struct work_struct *work)
2753{
2754 struct hci_dev *hdev = container_of(work, struct hci_dev,
2755 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002756 struct hci_conn *conn;
2757 u8 status;
2758 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002759
Johan Hedberg84235d22015-11-11 08:11:20 +02002760 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2761 if (!err)
2762 return;
2763
2764 hci_dev_lock(hdev);
2765
2766 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2767 if (conn)
2768 hci_le_conn_failed(conn, status);
2769
2770 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002771}
2772
Johan Hedberga1d01db2015-11-11 08:11:25 +02002773static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002774{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302775 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002776 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002777}
2778
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002779static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2780{
2781 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002782 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2783 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002784 struct hci_cp_inquiry cp;
2785
2786 BT_DBG("%s", req->hdev->name);
2787
2788 hci_dev_lock(req->hdev);
2789 hci_inquiry_cache_flush(req->hdev);
2790 hci_dev_unlock(req->hdev);
2791
2792 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002793
2794 if (req->hdev->discovery.limited)
2795 memcpy(&cp.lap, liac, sizeof(cp.lap));
2796 else
2797 memcpy(&cp.lap, giac, sizeof(cp.lap));
2798
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002799 cp.length = length;
2800
2801 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2802
2803 return 0;
2804}
2805
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002806static void le_scan_disable_work(struct work_struct *work)
2807{
2808 struct hci_dev *hdev = container_of(work, struct hci_dev,
2809 le_scan_disable.work);
2810 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002811
2812 BT_DBG("%s", hdev->name);
2813
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002814 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002815 return;
2816
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002817 cancel_delayed_work(&hdev->le_scan_restart);
2818
2819 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2820 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002821 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2822 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002823 return;
2824 }
2825
2826 hdev->discovery.scan_start = 0;
2827
2828 /* If we were running LE only scan, change discovery state. If
2829 * we were running both LE and BR/EDR inquiry simultaneously,
2830 * and BR/EDR inquiry is already finished, stop discovery,
2831 * otherwise BR/EDR inquiry will stop discovery when finished.
2832 * If we will resolve remote device name, do not change
2833 * discovery state.
2834 */
2835
2836 if (hdev->discovery.type == DISCOV_TYPE_LE)
2837 goto discov_stopped;
2838
2839 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2840 return;
2841
2842 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2843 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2844 hdev->discovery.state != DISCOVERY_RESOLVING)
2845 goto discov_stopped;
2846
2847 return;
2848 }
2849
2850 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2851 HCI_CMD_TIMEOUT, &status);
2852 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002853 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002854 goto discov_stopped;
2855 }
2856
2857 return;
2858
2859discov_stopped:
2860 hci_dev_lock(hdev);
2861 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2862 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002863}
2864
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002865static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002866{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002867 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002868
2869 /* If controller is not scanning we are done. */
2870 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2871 return 0;
2872
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002873 if (hdev->scanning_paused) {
2874 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2875 return 0;
2876 }
2877
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302878 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002879
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302880 if (use_ext_scan(hdev)) {
2881 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2882
2883 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2884 ext_enable_cp.enable = LE_SCAN_ENABLE;
2885 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2886
2887 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2888 sizeof(ext_enable_cp), &ext_enable_cp);
2889 } else {
2890 struct hci_cp_le_set_scan_enable cp;
2891
2892 memset(&cp, 0, sizeof(cp));
2893 cp.enable = LE_SCAN_ENABLE;
2894 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2895 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2896 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002897
2898 return 0;
2899}
2900
2901static void le_scan_restart_work(struct work_struct *work)
2902{
2903 struct hci_dev *hdev = container_of(work, struct hci_dev,
2904 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002905 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002906 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002907
2908 BT_DBG("%s", hdev->name);
2909
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002910 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002911 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002912 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2913 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002914 return;
2915 }
2916
2917 hci_dev_lock(hdev);
2918
2919 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2920 !hdev->discovery.scan_start)
2921 goto unlock;
2922
2923 /* When the scan was started, hdev->le_scan_disable has been queued
2924 * after duration from scan_start. During scan restart this job
2925 * has been canceled, and we need to queue it again after proper
2926 * timeout, to make sure that scan does not run indefinitely.
2927 */
2928 duration = hdev->discovery.scan_duration;
2929 scan_start = hdev->discovery.scan_start;
2930 now = jiffies;
2931 if (now - scan_start <= duration) {
2932 int elapsed;
2933
2934 if (now >= scan_start)
2935 elapsed = now - scan_start;
2936 else
2937 elapsed = ULONG_MAX - scan_start + now;
2938
2939 timeout = duration - elapsed;
2940 } else {
2941 timeout = 0;
2942 }
2943
2944 queue_delayed_work(hdev->req_workqueue,
2945 &hdev->le_scan_disable, timeout);
2946
2947unlock:
2948 hci_dev_unlock(hdev);
2949}
2950
Johan Hedberge68f0722015-11-11 08:30:30 +02002951static int active_scan(struct hci_request *req, unsigned long opt)
2952{
2953 uint16_t interval = opt;
2954 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002955 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002956 /* White list is not used for discovery */
2957 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302958 /* Discovery doesn't require controller address resolution */
2959 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002960 int err;
2961
2962 BT_DBG("%s", hdev->name);
2963
Johan Hedberge68f0722015-11-11 08:30:30 +02002964 /* If controller is scanning, it means the background scanning is
2965 * running. Thus, we should temporarily stop it in order to set the
2966 * discovery scanning parameters.
2967 */
2968 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302969 hci_req_add_le_scan_disable(req, false);
Johan Hedberge68f0722015-11-11 08:30:30 +02002970
2971 /* All active scans will be done with either a resolvable private
2972 * address (when privacy feature has been enabled) or non-resolvable
2973 * private address.
2974 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002975 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2976 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002977 if (err < 0)
2978 own_addr_type = ADDR_LE_DEV_PUBLIC;
2979
Alain Michaudd4edda02020-06-29 17:04:15 +00002980 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2981 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302982 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002983 return 0;
2984}
2985
2986static int interleaved_discov(struct hci_request *req, unsigned long opt)
2987{
2988 int err;
2989
2990 BT_DBG("%s", req->hdev->name);
2991
2992 err = active_scan(req, opt);
2993 if (err)
2994 return err;
2995
Johan Hedberg7df26b52015-11-11 12:24:21 +02002996 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002997}
2998
2999static void start_discovery(struct hci_dev *hdev, u8 *status)
3000{
3001 unsigned long timeout;
3002
3003 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3004
3005 switch (hdev->discovery.type) {
3006 case DISCOV_TYPE_BREDR:
3007 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02003008 hci_req_sync(hdev, bredr_inquiry,
3009 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003010 status);
3011 return;
3012 case DISCOV_TYPE_INTERLEAVED:
3013 /* When running simultaneous discovery, the LE scanning time
3014 * should occupy the whole discovery time sine BR/EDR inquiry
3015 * and LE scanning are scheduled by the controller.
3016 *
3017 * For interleaving discovery in comparison, BR/EDR inquiry
3018 * and LE scanning are done sequentially with separate
3019 * timeouts.
3020 */
3021 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3022 &hdev->quirks)) {
3023 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3024 /* During simultaneous discovery, we double LE scan
3025 * interval. We must leave some time for the controller
3026 * to do BR/EDR inquiry.
3027 */
3028 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00003029 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003030 status);
3031 break;
3032 }
3033
3034 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00003035 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003036 HCI_CMD_TIMEOUT, status);
3037 break;
3038 case DISCOV_TYPE_LE:
3039 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00003040 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003041 HCI_CMD_TIMEOUT, status);
3042 break;
3043 default:
3044 *status = HCI_ERROR_UNSPECIFIED;
3045 return;
3046 }
3047
3048 if (*status)
3049 return;
3050
3051 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3052
3053 /* When service discovery is used and the controller has a
3054 * strict duplicate filter, it is important to remember the
3055 * start and duration of the scan. This is required for
3056 * restarting scanning during the discovery phase.
3057 */
3058 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3059 hdev->discovery.result_filtering) {
3060 hdev->discovery.scan_start = jiffies;
3061 hdev->discovery.scan_duration = timeout;
3062 }
3063
3064 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3065 timeout);
3066}
3067
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003068bool hci_req_stop_discovery(struct hci_request *req)
3069{
3070 struct hci_dev *hdev = req->hdev;
3071 struct discovery_state *d = &hdev->discovery;
3072 struct hci_cp_remote_name_req_cancel cp;
3073 struct inquiry_entry *e;
3074 bool ret = false;
3075
3076 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3077
3078 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3079 if (test_bit(HCI_INQUIRY, &hdev->flags))
3080 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3081
3082 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3083 cancel_delayed_work(&hdev->le_scan_disable);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303084 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003085 }
3086
3087 ret = true;
3088 } else {
3089 /* Passive scanning */
3090 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303091 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003092 ret = true;
3093 }
3094 }
3095
3096 /* No further actions needed for LE-only discovery */
3097 if (d->type == DISCOV_TYPE_LE)
3098 return ret;
3099
3100 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3101 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3102 NAME_PENDING);
3103 if (!e)
3104 return ret;
3105
3106 bacpy(&cp.bdaddr, &e->data.bdaddr);
3107 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3108 &cp);
3109 ret = true;
3110 }
3111
3112 return ret;
3113}
3114
3115static int stop_discovery(struct hci_request *req, unsigned long opt)
3116{
3117 hci_dev_lock(req->hdev);
3118 hci_req_stop_discovery(req);
3119 hci_dev_unlock(req->hdev);
3120
3121 return 0;
3122}
3123
Johan Hedberge68f0722015-11-11 08:30:30 +02003124static void discov_update(struct work_struct *work)
3125{
3126 struct hci_dev *hdev = container_of(work, struct hci_dev,
3127 discov_update);
3128 u8 status = 0;
3129
3130 switch (hdev->discovery.state) {
3131 case DISCOVERY_STARTING:
3132 start_discovery(hdev, &status);
3133 mgmt_start_discovery_complete(hdev, status);
3134 if (status)
3135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3136 else
3137 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3138 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003139 case DISCOVERY_STOPPING:
3140 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3141 mgmt_stop_discovery_complete(hdev, status);
3142 if (!status)
3143 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3144 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003145 case DISCOVERY_STOPPED:
3146 default:
3147 return;
3148 }
3149}
3150
Johan Hedbergc366f552015-11-23 15:43:06 +02003151static void discov_off(struct work_struct *work)
3152{
3153 struct hci_dev *hdev = container_of(work, struct hci_dev,
3154 discov_off.work);
3155
3156 BT_DBG("%s", hdev->name);
3157
3158 hci_dev_lock(hdev);
3159
3160 /* When discoverable timeout triggers, then just make sure
3161 * the limited discoverable flag is cleared. Even in the case
3162 * of a timeout triggered from general discoverable, it is
3163 * safe to unconditionally clear the flag.
3164 */
3165 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3166 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3167 hdev->discov_timeout = 0;
3168
3169 hci_dev_unlock(hdev);
3170
3171 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3172 mgmt_new_settings(hdev);
3173}
3174
Johan Hedberg2ff13892015-11-25 16:15:44 +02003175static int powered_update_hci(struct hci_request *req, unsigned long opt)
3176{
3177 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003178 u8 link_sec;
3179
3180 hci_dev_lock(hdev);
3181
3182 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3183 !lmp_host_ssp_capable(hdev)) {
3184 u8 mode = 0x01;
3185
3186 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3187
3188 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3189 u8 support = 0x01;
3190
3191 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3192 sizeof(support), &support);
3193 }
3194 }
3195
3196 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3197 lmp_bredr_capable(hdev)) {
3198 struct hci_cp_write_le_host_supported cp;
3199
3200 cp.le = 0x01;
3201 cp.simul = 0x00;
3202
3203 /* Check first if we already have the right
3204 * host state (host features set)
3205 */
3206 if (cp.le != lmp_host_le_capable(hdev) ||
3207 cp.simul != lmp_host_le_br_capable(hdev))
3208 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3209 sizeof(cp), &cp);
3210 }
3211
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003212 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003213 /* Make sure the controller has a good default for
3214 * advertising data. This also applies to the case
3215 * where BR/EDR was toggled during the AUTO_OFF phase.
3216 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003217 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3218 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303219 int err;
3220
3221 if (ext_adv_capable(hdev)) {
3222 err = __hci_req_setup_ext_adv_instance(req,
3223 0x00);
3224 if (!err)
3225 __hci_req_update_scan_rsp_data(req,
3226 0x00);
3227 } else {
3228 err = 0;
3229 __hci_req_update_adv_data(req, 0x00);
3230 __hci_req_update_scan_rsp_data(req, 0x00);
3231 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003232
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303233 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303234 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303235 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303236 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003237 __hci_req_enable_ext_advertising(req,
3238 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303239 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003240 } else if (!list_empty(&hdev->adv_instances)) {
3241 struct adv_info *adv_instance;
3242
Johan Hedberg2ff13892015-11-25 16:15:44 +02003243 adv_instance = list_first_entry(&hdev->adv_instances,
3244 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003245 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003246 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003247 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003248 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003249 }
3250
3251 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3252 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3253 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3254 sizeof(link_sec), &link_sec);
3255
3256 if (lmp_bredr_capable(hdev)) {
3257 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3258 __hci_req_write_fast_connectable(req, true);
3259 else
3260 __hci_req_write_fast_connectable(req, false);
3261 __hci_req_update_scan(req);
3262 __hci_req_update_class(req);
3263 __hci_req_update_name(req);
3264 __hci_req_update_eir(req);
3265 }
3266
3267 hci_dev_unlock(hdev);
3268 return 0;
3269}
3270
3271int __hci_req_hci_power_on(struct hci_dev *hdev)
3272{
3273 /* Register the available SMP channels (BR/EDR and LE) only when
3274 * successfully powering on the controller. This late
3275 * registration is required so that LE SMP can clearly decide if
3276 * the public address or static address is used.
3277 */
3278 smp_register(hdev);
3279
3280 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3281 NULL);
3282}
3283
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003284void hci_request_setup(struct hci_dev *hdev)
3285{
Johan Hedberge68f0722015-11-11 08:30:30 +02003286 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003287 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003288 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003289 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003290 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003291 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003292 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3293 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003294 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003295}
3296
3297void hci_request_cancel_all(struct hci_dev *hdev)
3298{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003299 hci_req_sync_cancel(hdev, ENODEV);
3300
Johan Hedberge68f0722015-11-11 08:30:30 +02003301 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003302 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003303 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003304 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003305 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003306 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003307 cancel_delayed_work_sync(&hdev->le_scan_disable);
3308 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003309
3310 if (hdev->adv_instance_timeout) {
3311 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3312 hdev->adv_instance_timeout = 0;
3313 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003314}