blob: 413e3a5aabf5446b61c2524d752d243470f122ec [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200368 }
369
Alain Michaud10873f92020-06-11 02:01:56 +0000370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
Johan Hedberg196a5e92015-11-22 18:55:44 +0200381/* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
384 *
385 * This function requires the caller holds hdev->lock.
386 */
387static void __hci_update_background_scan(struct hci_request *req)
388{
389 struct hci_dev *hdev = req->hdev;
390
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 return;
398
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 return;
402
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
405 return;
406
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
409 *
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
413 */
414 hci_discovery_filter_clear(hdev);
415
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200416 BT_DBG("%s ADV monitoring is %s", hdev->name,
417 hci_is_adv_monitoring(hdev) ? "on" : "off");
418
Johan Hedberg196a5e92015-11-22 18:55:44 +0200419 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200420 list_empty(&hdev->pend_le_reports) &&
421 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200422 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200423 * to be scanned for or no ADV monitors, we should stop the
424 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200425 */
426
427 /* If controller is not scanning we are done. */
428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 return;
430
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530431 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200432
433 BT_DBG("%s stopping background scanning", hdev->name);
434 } else {
435 /* If there is at least one pending LE connection, we should
436 * keep the background scan running.
437 */
438
439 /* If controller is connecting, we should not start scanning
440 * since some controllers are not able to scan and connect at
441 * the same time.
442 */
443 if (hci_lookup_le_connect(hdev))
444 return;
445
446 /* If controller is currently scanning, we stop it to ensure we
447 * don't miss any advertising (due to duplicates filter).
448 */
449 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530450 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200451
452 hci_req_add_le_passive_scan(req);
453
454 BT_DBG("%s starting background scanning", hdev->name);
455 }
456}
457
Johan Hedberg00cf5042015-11-25 16:15:41 +0200458void __hci_req_update_name(struct hci_request *req)
459{
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_local_name cp;
462
463 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464
465 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466}
467
Johan Hedbergb1a89172015-11-25 16:15:42 +0200468#define PNP_INFO_SVCLASS_ID 0x1200
469
470static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471{
472 u8 *ptr = data, *uuids_start = NULL;
473 struct bt_uuid *uuid;
474
475 if (len < 4)
476 return ptr;
477
478 list_for_each_entry(uuid, &hdev->uuids, list) {
479 u16 uuid16;
480
481 if (uuid->size != 16)
482 continue;
483
484 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 if (uuid16 < 0x1100)
486 continue;
487
488 if (uuid16 == PNP_INFO_SVCLASS_ID)
489 continue;
490
491 if (!uuids_start) {
492 uuids_start = ptr;
493 uuids_start[0] = 1;
494 uuids_start[1] = EIR_UUID16_ALL;
495 ptr += 2;
496 }
497
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u16) > len) {
500 uuids_start[1] = EIR_UUID16_SOME;
501 break;
502 }
503
504 *ptr++ = (uuid16 & 0x00ff);
505 *ptr++ = (uuid16 & 0xff00) >> 8;
506 uuids_start[0] += sizeof(uuid16);
507 }
508
509 return ptr;
510}
511
512static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513{
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
516
517 if (len < 6)
518 return ptr;
519
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 32)
522 continue;
523
524 if (!uuids_start) {
525 uuids_start = ptr;
526 uuids_start[0] = 1;
527 uuids_start[1] = EIR_UUID32_ALL;
528 ptr += 2;
529 }
530
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + sizeof(u32) > len) {
533 uuids_start[1] = EIR_UUID32_SOME;
534 break;
535 }
536
537 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 ptr += sizeof(u32);
539 uuids_start[0] += sizeof(u32);
540 }
541
542 return ptr;
543}
544
545static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546{
547 u8 *ptr = data, *uuids_start = NULL;
548 struct bt_uuid *uuid;
549
550 if (len < 18)
551 return ptr;
552
553 list_for_each_entry(uuid, &hdev->uuids, list) {
554 if (uuid->size != 128)
555 continue;
556
557 if (!uuids_start) {
558 uuids_start = ptr;
559 uuids_start[0] = 1;
560 uuids_start[1] = EIR_UUID128_ALL;
561 ptr += 2;
562 }
563
564 /* Stop if not enough space to put next UUID */
565 if ((ptr - data) + 16 > len) {
566 uuids_start[1] = EIR_UUID128_SOME;
567 break;
568 }
569
570 memcpy(ptr, uuid->uuid, 16);
571 ptr += 16;
572 uuids_start[0] += 16;
573 }
574
575 return ptr;
576}
577
578static void create_eir(struct hci_dev *hdev, u8 *data)
579{
580 u8 *ptr = data;
581 size_t name_len;
582
583 name_len = strlen(hdev->dev_name);
584
585 if (name_len > 0) {
586 /* EIR Data type */
587 if (name_len > 48) {
588 name_len = 48;
589 ptr[1] = EIR_NAME_SHORT;
590 } else
591 ptr[1] = EIR_NAME_COMPLETE;
592
593 /* EIR Data length */
594 ptr[0] = name_len + 1;
595
596 memcpy(ptr + 2, hdev->dev_name, name_len);
597
598 ptr += (name_len + 2);
599 }
600
601 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 ptr[0] = 2;
603 ptr[1] = EIR_TX_POWER;
604 ptr[2] = (u8) hdev->inq_tx_power;
605
606 ptr += 3;
607 }
608
609 if (hdev->devid_source > 0) {
610 ptr[0] = 9;
611 ptr[1] = EIR_DEVICE_ID;
612
613 put_unaligned_le16(hdev->devid_source, ptr + 2);
614 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 put_unaligned_le16(hdev->devid_product, ptr + 6);
616 put_unaligned_le16(hdev->devid_version, ptr + 8);
617
618 ptr += 10;
619 }
620
621 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624}
625
626void __hci_req_update_eir(struct hci_request *req)
627{
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_write_eir cp;
630
631 if (!hdev_is_powered(hdev))
632 return;
633
634 if (!lmp_ext_inq_capable(hdev))
635 return;
636
637 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 return;
639
640 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 return;
642
643 memset(&cp, 0, sizeof(cp));
644
645 create_eir(hdev, cp.data);
646
647 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 return;
649
650 memcpy(hdev->eir, cp.data, sizeof(cp.data));
651
652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653}
654
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530655void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200656{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200658
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700659 if (hdev->scanning_paused) {
660 bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 return;
662 }
663
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530664 if (use_ext_scan(hdev)) {
665 struct hci_cp_le_set_ext_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 &cp);
671 } else {
672 struct hci_cp_le_set_scan_enable cp;
673
674 memset(&cp, 0, sizeof(cp));
675 cp.enable = LE_SCAN_DISABLE;
676 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530678
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530679 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530680 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530681 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530682 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530683 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530684
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530685 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
686 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200687}
688
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700689static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
690 u8 bdaddr_type)
691{
692 struct hci_cp_le_del_from_white_list cp;
693
694 cp.bdaddr_type = bdaddr_type;
695 bacpy(&cp.bdaddr, bdaddr);
696
697 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
698 cp.bdaddr_type);
699 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530700
701 if (use_ll_privacy(req->hdev)) {
702 struct smp_irk *irk;
703
704 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
705 if (irk) {
706 struct hci_cp_le_del_from_resolv_list cp;
707
708 cp.bdaddr_type = bdaddr_type;
709 bacpy(&cp.bdaddr, bdaddr);
710
711 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
712 sizeof(cp), &cp);
713 }
714 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700715}
716
717/* Adds connection to white list if needed. On error, returns -1. */
718static int add_to_white_list(struct hci_request *req,
719 struct hci_conn_params *params, u8 *num_entries,
720 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200721{
722 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700723 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200724
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700725 /* Already in white list */
726 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
727 params->addr_type))
728 return 0;
729
730 /* Select filter policy to accept all advertising */
731 if (*num_entries >= hdev->le_white_list_size)
732 return -1;
733
734 /* White list can not be used with RPAs */
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530735 if (!allow_rpa && !use_ll_privacy(hdev) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700736 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
737 return -1;
738 }
739
740 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200741 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
742 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700743 return 0;
744
745 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200746 cp.bdaddr_type = params->addr_type;
747 bacpy(&cp.bdaddr, &params->addr);
748
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700749 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
750 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200751 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700752
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530753 if (use_ll_privacy(hdev)) {
754 struct smp_irk *irk;
755
756 irk = hci_find_irk_by_addr(hdev, &params->addr,
757 params->addr_type);
758 if (irk) {
759 struct hci_cp_le_add_to_resolv_list cp;
760
761 cp.bdaddr_type = params->addr_type;
762 bacpy(&cp.bdaddr, &params->addr);
763 memcpy(cp.peer_irk, irk->val, 16);
764
765 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
766 memcpy(cp.local_irk, hdev->irk, 16);
767 else
768 memset(cp.local_irk, 0, 16);
769
770 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
771 sizeof(cp), &cp);
772 }
773 }
774
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700775 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200776}
777
778static u8 update_white_list(struct hci_request *req)
779{
780 struct hci_dev *hdev = req->hdev;
781 struct hci_conn_params *params;
782 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700783 u8 num_entries = 0;
784 bool pend_conn, pend_report;
785 /* We allow whitelisting even with RPAs in suspend. In the worst case,
786 * we won't be able to wake from devices that use the privacy1.2
787 * features. Additionally, once we support privacy1.2 and IRK
788 * offloading, we can update this to also check for those conditions.
789 */
790 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200791
792 /* Go through the current white list programmed into the
793 * controller one by one and check if that address is still
794 * in the list of pending connections or list of devices to
795 * report. If not present in either list, then queue the
796 * command to remove it from the controller.
797 */
798 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700799 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
800 &b->bdaddr,
801 b->bdaddr_type);
802 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
803 &b->bdaddr,
804 b->bdaddr_type);
805
806 /* If the device is not likely to connect or report,
807 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500808 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700809 if (!pend_conn && !pend_report) {
810 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200811 continue;
812 }
813
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700814 /* White list can not be used with RPAs */
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530815 if (!allow_rpa && !use_ll_privacy(hdev) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700816 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500817 return 0x00;
818 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200819
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700820 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200821 }
822
823 /* Since all no longer valid white list entries have been
824 * removed, walk through the list of pending connections
825 * and ensure that any new device gets programmed into
826 * the controller.
827 *
828 * If the list of the devices is larger than the list of
829 * available white list entries in the controller, then
830 * just abort and return filer policy value to not use the
831 * white list.
832 */
833 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700834 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200835 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200836 }
837
838 /* After adding all new pending connections, walk through
839 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700840 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200841 */
842 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700843 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200844 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200845 }
846
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200847 /* Once the controller offloading of advertisement monitor is in place,
848 * the if condition should include the support of MSFT extension
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700849 * support. If suspend is ongoing, whitelist should be the default to
850 * prevent waking by random advertisements.
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200851 */
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700852 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200853 return 0x00;
854
Johan Hedberg0857dd32014-12-19 13:40:20 +0200855 /* Select filter policy to use white list */
856 return 0x01;
857}
858
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200859static bool scan_use_rpa(struct hci_dev *hdev)
860{
861 return hci_dev_test_flag(hdev, HCI_PRIVACY);
862}
863
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530864static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530865 u16 window, u8 own_addr_type, u8 filter_policy,
866 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200867{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530868 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530869
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700870 if (hdev->scanning_paused) {
871 bt_dev_dbg(hdev, "Scanning is paused for suspend");
872 return;
873 }
874
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530875 if (use_ll_privacy(hdev) &&
876 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
877 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530878 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530879
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530880 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
881 }
882
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530883 /* Use ext scanning if set ext scan param and ext scan enable is
884 * supported
885 */
886 if (use_ext_scan(hdev)) {
887 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
888 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
889 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530890 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
891 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530892
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530893 ext_param_cp = (void *)data;
894 phy_params = (void *)ext_param_cp->data;
895
896 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
897 ext_param_cp->own_addr_type = own_addr_type;
898 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530899
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530900 plen = sizeof(*ext_param_cp);
901
902 if (scan_1m(hdev) || scan_2m(hdev)) {
903 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
904
905 memset(phy_params, 0, sizeof(*phy_params));
906 phy_params->type = type;
907 phy_params->interval = cpu_to_le16(interval);
908 phy_params->window = cpu_to_le16(window);
909
910 plen += sizeof(*phy_params);
911 phy_params++;
912 }
913
914 if (scan_coded(hdev)) {
915 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
916
917 memset(phy_params, 0, sizeof(*phy_params));
918 phy_params->type = type;
919 phy_params->interval = cpu_to_le16(interval);
920 phy_params->window = cpu_to_le16(window);
921
922 plen += sizeof(*phy_params);
923 phy_params++;
924 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530925
926 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530927 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530928
929 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
930 ext_enable_cp.enable = LE_SCAN_ENABLE;
931 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
932
933 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
934 sizeof(ext_enable_cp), &ext_enable_cp);
935 } else {
936 struct hci_cp_le_set_scan_param param_cp;
937 struct hci_cp_le_set_scan_enable enable_cp;
938
939 memset(&param_cp, 0, sizeof(param_cp));
940 param_cp.type = type;
941 param_cp.interval = cpu_to_le16(interval);
942 param_cp.window = cpu_to_le16(window);
943 param_cp.own_address_type = own_addr_type;
944 param_cp.filter_policy = filter_policy;
945 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
946 &param_cp);
947
948 memset(&enable_cp, 0, sizeof(enable_cp));
949 enable_cp.enable = LE_SCAN_ENABLE;
950 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
951 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
952 &enable_cp);
953 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530954}
955
Alain Michaud9a9373f2020-07-31 01:05:34 +0000956/* Returns true if an le connection is in the scanning state */
957static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
958{
959 struct hci_conn_hash *h = &hdev->conn_hash;
960 struct hci_conn *c;
961
962 rcu_read_lock();
963
964 list_for_each_entry_rcu(c, &h->list, list) {
965 if (c->type == LE_LINK && c->state == BT_CONNECT &&
966 test_bit(HCI_CONN_SCANNING, &c->flags)) {
967 rcu_read_unlock();
968 return true;
969 }
970 }
971
972 rcu_read_unlock();
973
974 return false;
975}
976
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530977/* Ensure to call hci_req_add_le_scan_disable() first to disable the
978 * controller based address resolution to be able to reconfigure
979 * resolving list.
980 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530981void hci_req_add_le_passive_scan(struct hci_request *req)
982{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200983 struct hci_dev *hdev = req->hdev;
984 u8 own_addr_type;
985 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700986 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530987 /* Background scanning should run with address resolution */
988 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700989
990 if (hdev->scanning_paused) {
991 bt_dev_dbg(hdev, "Scanning is paused for suspend");
992 return;
993 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200994
995 /* Set require_privacy to false since no SCAN_REQ are send
996 * during passive scanning. Not using an non-resolvable address
997 * here is important so that peer devices using direct
998 * advertising with our address will be correctly reported
999 * by the controller.
1000 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001001 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1002 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001003 return;
1004
1005 /* Adding or removing entries from the white list must
1006 * happen before enabling scanning. The controller does
1007 * not allow white list modification while scanning.
1008 */
1009 filter_policy = update_white_list(req);
1010
1011 /* When the controller is using random resolvable addresses and
1012 * with that having LE privacy enabled, then controllers with
1013 * Extended Scanner Filter Policies support can now enable support
1014 * for handling directed advertising.
1015 *
1016 * So instead of using filter polices 0x00 (no whitelist)
1017 * and 0x01 (whitelist enabled) use the new filter policies
1018 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1019 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001020 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001021 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1022 filter_policy |= 0x02;
1023
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001024 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +00001025 window = hdev->le_scan_window_suspend;
1026 interval = hdev->le_scan_int_suspend;
Alain Michaud9a9373f2020-07-31 01:05:34 +00001027 } else if (hci_is_le_conn_scanning(hdev)) {
1028 window = hdev->le_scan_window_connect;
1029 interval = hdev->le_scan_int_connect;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001030 } else {
1031 window = hdev->le_scan_window;
1032 interval = hdev->le_scan_interval;
1033 }
1034
1035 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1036 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301037 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001038}
1039
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301040static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1041{
1042 struct adv_info *adv_instance;
1043
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001044 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301045 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001046 return 1;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301047
1048 adv_instance = hci_find_adv_instance(hdev, instance);
1049 if (!adv_instance)
1050 return 0;
1051
1052 /* TODO: Take into account the "appearance" and "local-name" flags here.
1053 * These are currently being ignored as they are not supported.
1054 */
1055 return adv_instance->scan_rsp_len;
1056}
1057
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001058static void hci_req_clear_event_filter(struct hci_request *req)
1059{
1060 struct hci_cp_set_event_filter f;
1061
1062 memset(&f, 0, sizeof(f));
1063 f.flt_type = HCI_FLT_CLEAR_ALL;
1064 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1065
1066 /* Update page scan state (since we may have modified it when setting
1067 * the event filter).
1068 */
1069 __hci_req_update_scan(req);
1070}
1071
1072static void hci_req_set_event_filter(struct hci_request *req)
1073{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001074 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001075 struct hci_cp_set_event_filter f;
1076 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001077 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001078
1079 /* Always clear event filter when starting */
1080 hci_req_clear_event_filter(req);
1081
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001082 list_for_each_entry(b, &hdev->whitelist, list) {
1083 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1084 b->current_flags))
1085 continue;
1086
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001087 memset(&f, 0, sizeof(f));
1088 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1089 f.flt_type = HCI_FLT_CONN_SETUP;
1090 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1091 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1092
1093 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1094 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001095 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001096 }
1097
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001098 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1099}
1100
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001101static void hci_req_config_le_suspend_scan(struct hci_request *req)
1102{
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001103 /* Before changing params disable scan if enabled */
1104 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301105 hci_req_add_le_scan_disable(req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001106
1107 /* Configure params and enable scanning */
1108 hci_req_add_le_passive_scan(req);
1109
1110 /* Block suspend notifier on response */
1111 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1112}
1113
Daniel Winkler53274472020-09-15 14:14:27 -07001114static void cancel_adv_timeout(struct hci_dev *hdev)
1115{
1116 if (hdev->adv_instance_timeout) {
1117 hdev->adv_instance_timeout = 0;
1118 cancel_delayed_work(&hdev->adv_instance_expire);
1119 }
1120}
1121
1122/* This function requires the caller holds hdev->lock */
1123static void hci_suspend_adv_instances(struct hci_request *req)
1124{
1125 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1126
1127 /* Call to disable any advertisements active on the controller.
1128 * This will succeed even if no advertisements are configured.
1129 */
1130 __hci_req_disable_advertising(req);
1131
1132 /* If we are using software rotation, pause the loop */
1133 if (!ext_adv_capable(req->hdev))
1134 cancel_adv_timeout(req->hdev);
1135}
1136
1137/* This function requires the caller holds hdev->lock */
1138static void hci_resume_adv_instances(struct hci_request *req)
1139{
1140 struct adv_info *adv;
1141
1142 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1143
1144 if (ext_adv_capable(req->hdev)) {
1145 /* Call for each tracked instance to be re-enabled */
1146 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1147 __hci_req_enable_ext_advertising(req,
1148 adv->instance);
1149 }
1150
1151 } else {
1152 /* Schedule for most recent instance to be restarted and begin
1153 * the software rotation loop
1154 */
1155 __hci_req_schedule_adv_instance(req,
1156 req->hdev->cur_adv_instance,
1157 true);
1158 }
1159}
1160
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001161static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1162{
1163 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1164 status);
1165 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1166 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1167 wake_up(&hdev->suspend_wait_q);
1168 }
1169}
1170
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001171/* Call with hci_dev_lock */
1172void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1173{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001174 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001175 struct hci_conn *conn;
1176 struct hci_request req;
1177 u8 page_scan;
1178 int disconnect_counter;
1179
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001180 if (next == hdev->suspend_state) {
1181 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1182 goto done;
1183 }
1184
1185 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001186 hci_req_init(&req, hdev);
1187
1188 if (next == BT_SUSPEND_DISCONNECT) {
1189 /* Mark device as suspended */
1190 hdev->suspended = true;
1191
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001192 /* Pause discovery if not already stopped */
1193 old_state = hdev->discovery.state;
1194 if (old_state != DISCOVERY_STOPPED) {
1195 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1196 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1197 queue_work(hdev->req_workqueue, &hdev->discov_update);
1198 }
1199
1200 hdev->discovery_paused = true;
1201 hdev->discovery_old_state = old_state;
1202
Daniel Winkler53274472020-09-15 14:14:27 -07001203 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001204 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1205 if (old_state) {
1206 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1207 cancel_delayed_work(&hdev->discov_off);
1208 queue_delayed_work(hdev->req_workqueue,
1209 &hdev->discov_off, 0);
1210 }
1211
Daniel Winkler53274472020-09-15 14:14:27 -07001212 /* Pause other advertisements */
1213 if (hdev->adv_instance_cnt)
1214 hci_suspend_adv_instances(&req);
1215
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001216 hdev->advertising_paused = true;
1217 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001218 /* Disable page scan */
1219 page_scan = SCAN_DISABLED;
1220 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1221
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001222 /* Disable LE passive scan if enabled */
1223 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301224 hci_req_add_le_scan_disable(&req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001225
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001226 /* Mark task needing completion */
1227 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1228
1229 /* Prevent disconnects from causing scanning to be re-enabled */
1230 hdev->scanning_paused = true;
1231
1232 /* Run commands before disconnecting */
1233 hci_req_run(&req, suspend_req_complete);
1234
1235 disconnect_counter = 0;
1236 /* Soft disconnect everything (power off) */
1237 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1238 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1239 disconnect_counter++;
1240 }
1241
1242 if (disconnect_counter > 0) {
1243 bt_dev_dbg(hdev,
1244 "Had %d disconnects. Will wait on them",
1245 disconnect_counter);
1246 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1247 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001248 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001249 /* Unpause to take care of updating scanning params */
1250 hdev->scanning_paused = false;
1251 /* Enable event filter for paired devices */
1252 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001253 /* Enable passive scan at lower duty cycle */
1254 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001255 /* Pause scan changes again. */
1256 hdev->scanning_paused = true;
1257 hci_req_run(&req, suspend_req_complete);
1258 } else {
1259 hdev->suspended = false;
1260 hdev->scanning_paused = false;
1261
1262 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001263 /* Reset passive/background scanning to normal */
1264 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001265
Daniel Winkler53274472020-09-15 14:14:27 -07001266 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001267 hdev->advertising_paused = false;
1268 if (hdev->advertising_old_state) {
1269 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1270 hdev->suspend_tasks);
1271 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1272 queue_work(hdev->req_workqueue,
1273 &hdev->discoverable_update);
1274 hdev->advertising_old_state = 0;
1275 }
1276
Daniel Winkler53274472020-09-15 14:14:27 -07001277 /* Resume other advertisements */
1278 if (hdev->adv_instance_cnt)
1279 hci_resume_adv_instances(&req);
1280
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001281 /* Unpause discovery */
1282 hdev->discovery_paused = false;
1283 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1284 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1285 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1286 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1287 queue_work(hdev->req_workqueue, &hdev->discov_update);
1288 }
1289
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001290 hci_req_run(&req, suspend_req_complete);
1291 }
1292
1293 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001294
1295done:
1296 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1297 wake_up(&hdev->suspend_wait_q);
1298}
1299
Johan Hedbergf2252572015-11-18 12:49:20 +02001300static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1301{
Johan Hedbergcab054a2015-11-30 11:21:45 +02001302 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001303 struct adv_info *adv_instance;
1304
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001305 /* Instance 0x00 always set local name */
Johan Hedbergf2252572015-11-18 12:49:20 +02001306 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001307 return 1;
Johan Hedbergf2252572015-11-18 12:49:20 +02001308
1309 adv_instance = hci_find_adv_instance(hdev, instance);
1310 if (!adv_instance)
1311 return 0;
1312
1313 /* TODO: Take into account the "appearance" and "local-name" flags here.
1314 * These are currently being ignored as they are not supported.
1315 */
1316 return adv_instance->scan_rsp_len;
1317}
1318
1319void __hci_req_disable_advertising(struct hci_request *req)
1320{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301321 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001322 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001323
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301324 } else {
1325 u8 enable = 0x00;
1326
1327 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1328 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001329}
1330
1331static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1332{
1333 u32 flags;
1334 struct adv_info *adv_instance;
1335
1336 if (instance == 0x00) {
1337 /* Instance 0 always manages the "Tx Power" and "Flags"
1338 * fields
1339 */
1340 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1341
1342 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1343 * corresponds to the "connectable" instance flag.
1344 */
1345 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1346 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1347
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001348 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1349 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1350 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001351 flags |= MGMT_ADV_FLAG_DISCOV;
1352
Johan Hedbergf2252572015-11-18 12:49:20 +02001353 return flags;
1354 }
1355
1356 adv_instance = hci_find_adv_instance(hdev, instance);
1357
1358 /* Return 0 when we got an invalid instance identifier. */
1359 if (!adv_instance)
1360 return 0;
1361
1362 return adv_instance->flags;
1363}
1364
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001365static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1366{
1367 /* If privacy is not enabled don't use RPA */
1368 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1369 return false;
1370
1371 /* If basic privacy mode is enabled use RPA */
1372 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1373 return true;
1374
1375 /* If limited privacy mode is enabled don't use RPA if we're
1376 * both discoverable and bondable.
1377 */
1378 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1379 hci_dev_test_flag(hdev, HCI_BONDABLE))
1380 return false;
1381
1382 /* We're neither bondable nor discoverable in the limited
1383 * privacy mode, therefore use RPA.
1384 */
1385 return true;
1386}
1387
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001388static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1389{
1390 /* If there is no connection we are OK to advertise. */
1391 if (hci_conn_num(hdev, LE_LINK) == 0)
1392 return true;
1393
1394 /* Check le_states if there is any connection in slave role. */
1395 if (hdev->conn_hash.le_num_slave > 0) {
1396 /* Slave connection state and non connectable mode bit 20. */
1397 if (!connectable && !(hdev->le_states[2] & 0x10))
1398 return false;
1399
1400 /* Slave connection state and connectable mode bit 38
1401 * and scannable bit 21.
1402 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001403 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1404 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001405 return false;
1406 }
1407
1408 /* Check le_states if there is any connection in master role. */
1409 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1410 /* Master connection state and non connectable mode bit 18. */
1411 if (!connectable && !(hdev->le_states[2] & 0x02))
1412 return false;
1413
1414 /* Master connection state and connectable mode bit 35 and
1415 * scannable 19.
1416 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001417 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001418 !(hdev->le_states[2] & 0x08)))
1419 return false;
1420 }
1421
1422 return true;
1423}
1424
Johan Hedbergf2252572015-11-18 12:49:20 +02001425void __hci_req_enable_advertising(struct hci_request *req)
1426{
1427 struct hci_dev *hdev = req->hdev;
1428 struct hci_cp_le_set_adv_param cp;
1429 u8 own_addr_type, enable = 0x01;
1430 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301431 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001432 u32 flags;
1433
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001434 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1435
1436 /* If the "connectable" instance flag was not set, then choose between
1437 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1438 */
1439 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1440 mgmt_get_connectable(hdev);
1441
1442 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001443 return;
1444
1445 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1446 __hci_req_disable_advertising(req);
1447
1448 /* Clear the HCI_LE_ADV bit temporarily so that the
1449 * hci_update_random_address knows that it's safe to go ahead
1450 * and write a new random address. The flag will be set back on
1451 * as soon as the SET_ADV_ENABLE HCI command completes.
1452 */
1453 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1454
Johan Hedbergf2252572015-11-18 12:49:20 +02001455 /* Set require_privacy to true only when non-connectable
1456 * advertising is used. In that case it is fine to use a
1457 * non-resolvable private address.
1458 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001459 if (hci_update_random_address(req, !connectable,
1460 adv_use_rpa(hdev, flags),
1461 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001462 return;
1463
1464 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001465
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301466 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001467 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001468
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301469 adv_min_interval = hdev->le_adv_min_interval;
1470 adv_max_interval = hdev->le_adv_max_interval;
1471 } else {
1472 if (get_cur_adv_instance_scan_rsp_len(hdev))
1473 cp.type = LE_ADV_SCAN_IND;
1474 else
1475 cp.type = LE_ADV_NONCONN_IND;
1476
1477 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1478 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1479 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1480 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1481 } else {
1482 adv_min_interval = hdev->le_adv_min_interval;
1483 adv_max_interval = hdev->le_adv_max_interval;
1484 }
1485 }
1486
1487 cp.min_interval = cpu_to_le16(adv_min_interval);
1488 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001489 cp.own_address_type = own_addr_type;
1490 cp.channel_map = hdev->le_adv_channel_map;
1491
1492 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1493
1494 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1495}
1496
Michał Narajowskif61851f2016-10-19 10:20:27 +02001497u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001498{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001499 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001500 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001501
Michał Narajowskif61851f2016-10-19 10:20:27 +02001502 /* no space left for name (+ NULL + type + len) */
1503 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1504 return ad_len;
1505
1506 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001507 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001508 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001509 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001510 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001511
Michał Narajowskif61851f2016-10-19 10:20:27 +02001512 /* use short name if present */
1513 short_len = strlen(hdev->short_name);
1514 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001515 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001516 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001517
Michał Narajowskif61851f2016-10-19 10:20:27 +02001518 /* use shortened full name if present, we already know that name
1519 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1520 */
1521 if (complete_len) {
1522 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1523
1524 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1525 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1526
1527 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1528 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001529 }
1530
1531 return ad_len;
1532}
1533
Michał Narajowski1b422062016-10-05 12:28:27 +02001534static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1535{
1536 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1537}
1538
Michał Narajowski7c295c42016-09-18 12:50:02 +02001539static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1540{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001541 u8 scan_rsp_len = 0;
1542
1543 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001544 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001545 }
1546
Michał Narajowski1b422062016-10-05 12:28:27 +02001547 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001548}
1549
Johan Hedbergf2252572015-11-18 12:49:20 +02001550static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1551 u8 *ptr)
1552{
1553 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001554 u32 instance_flags;
1555 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001556
1557 adv_instance = hci_find_adv_instance(hdev, instance);
1558 if (!adv_instance)
1559 return 0;
1560
Michał Narajowski7c295c42016-09-18 12:50:02 +02001561 instance_flags = adv_instance->flags;
1562
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001563 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001564 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001565 }
1566
Michał Narajowski1b422062016-10-05 12:28:27 +02001567 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001568 adv_instance->scan_rsp_len);
1569
Michał Narajowski7c295c42016-09-18 12:50:02 +02001570 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001571
1572 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1573 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1574
1575 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001576}
1577
Johan Hedbergcab054a2015-11-30 11:21:45 +02001578void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001579{
1580 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001581 u8 len;
1582
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1584 return;
1585
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301586 if (ext_adv_capable(hdev)) {
1587 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001588
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301589 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001590
Abhishek Pandit-Subedi6baf8a62020-09-11 15:32:20 -07001591 /* Extended scan response data doesn't allow a response to be
1592 * set if the instance isn't scannable.
1593 */
1594 if (get_adv_instance_scan_rsp_len(hdev, instance))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301595 len = create_instance_scan_rsp_data(hdev, instance,
1596 cp.data);
1597 else
Abhishek Pandit-Subedi6baf8a62020-09-11 15:32:20 -07001598 len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001599
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301600 if (hdev->scan_rsp_data_len == len &&
1601 !memcmp(cp.data, hdev->scan_rsp_data, len))
1602 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001603
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301604 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1605 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001606
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001607 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301608 cp.length = len;
1609 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1610 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1611
1612 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1613 &cp);
1614 } else {
1615 struct hci_cp_le_set_scan_rsp_data cp;
1616
1617 memset(&cp, 0, sizeof(cp));
1618
1619 if (instance)
1620 len = create_instance_scan_rsp_data(hdev, instance,
1621 cp.data);
1622 else
1623 len = create_default_scan_rsp_data(hdev, cp.data);
1624
1625 if (hdev->scan_rsp_data_len == len &&
1626 !memcmp(cp.data, hdev->scan_rsp_data, len))
1627 return;
1628
1629 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1630 hdev->scan_rsp_data_len = len;
1631
1632 cp.length = len;
1633
1634 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1635 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001636}
1637
Johan Hedbergf2252572015-11-18 12:49:20 +02001638static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1639{
1640 struct adv_info *adv_instance = NULL;
1641 u8 ad_len = 0, flags = 0;
1642 u32 instance_flags;
1643
1644 /* Return 0 when the current instance identifier is invalid. */
1645 if (instance) {
1646 adv_instance = hci_find_adv_instance(hdev, instance);
1647 if (!adv_instance)
1648 return 0;
1649 }
1650
1651 instance_flags = get_adv_instance_flags(hdev, instance);
1652
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001653 /* If instance already has the flags set skip adding it once
1654 * again.
1655 */
1656 if (adv_instance && eir_get_data(adv_instance->adv_data,
1657 adv_instance->adv_data_len, EIR_FLAGS,
1658 NULL))
1659 goto skip_flags;
1660
Johan Hedbergf2252572015-11-18 12:49:20 +02001661 /* The Add Advertising command allows userspace to set both the general
1662 * and limited discoverable flags.
1663 */
1664 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1665 flags |= LE_AD_GENERAL;
1666
1667 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1668 flags |= LE_AD_LIMITED;
1669
Johan Hedbergf18ba582016-04-06 13:09:05 +03001670 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1671 flags |= LE_AD_NO_BREDR;
1672
Johan Hedbergf2252572015-11-18 12:49:20 +02001673 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1674 /* If a discovery flag wasn't provided, simply use the global
1675 * settings.
1676 */
1677 if (!flags)
1678 flags |= mgmt_get_adv_discov_flags(hdev);
1679
Johan Hedbergf2252572015-11-18 12:49:20 +02001680 /* If flags would still be empty, then there is no need to
1681 * include the "Flags" AD field".
1682 */
1683 if (flags) {
1684 ptr[0] = 0x02;
1685 ptr[1] = EIR_FLAGS;
1686 ptr[2] = flags;
1687
1688 ad_len += 3;
1689 ptr += 3;
1690 }
1691 }
1692
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001693skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001694 if (adv_instance) {
1695 memcpy(ptr, adv_instance->adv_data,
1696 adv_instance->adv_data_len);
1697 ad_len += adv_instance->adv_data_len;
1698 ptr += adv_instance->adv_data_len;
1699 }
1700
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301701 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1702 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001703
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301704 if (ext_adv_capable(hdev)) {
1705 if (adv_instance)
1706 adv_tx_power = adv_instance->tx_power;
1707 else
1708 adv_tx_power = hdev->adv_tx_power;
1709 } else {
1710 adv_tx_power = hdev->adv_tx_power;
1711 }
1712
1713 /* Provide Tx Power only if we can provide a valid value for it */
1714 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1715 ptr[0] = 0x02;
1716 ptr[1] = EIR_TX_POWER;
1717 ptr[2] = (u8)adv_tx_power;
1718
1719 ad_len += 3;
1720 ptr += 3;
1721 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001722 }
1723
1724 return ad_len;
1725}
1726
Johan Hedbergcab054a2015-11-30 11:21:45 +02001727void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001728{
1729 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001730 u8 len;
1731
1732 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1733 return;
1734
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301735 if (ext_adv_capable(hdev)) {
1736 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001737
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301738 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001739
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301740 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001741
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301742 /* There's nothing to do if the data hasn't changed */
1743 if (hdev->adv_data_len == len &&
1744 memcmp(cp.data, hdev->adv_data, len) == 0)
1745 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001746
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301747 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1748 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001749
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301750 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001751 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301752 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1753 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1754
1755 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1756 } else {
1757 struct hci_cp_le_set_adv_data cp;
1758
1759 memset(&cp, 0, sizeof(cp));
1760
1761 len = create_instance_adv_data(hdev, instance, cp.data);
1762
1763 /* There's nothing to do if the data hasn't changed */
1764 if (hdev->adv_data_len == len &&
1765 memcmp(cp.data, hdev->adv_data, len) == 0)
1766 return;
1767
1768 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1769 hdev->adv_data_len = len;
1770
1771 cp.length = len;
1772
1773 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1774 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001775}
1776
Johan Hedbergcab054a2015-11-30 11:21:45 +02001777int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001778{
1779 struct hci_request req;
1780
1781 hci_req_init(&req, hdev);
1782 __hci_req_update_adv_data(&req, instance);
1783
1784 return hci_req_run(&req, NULL);
1785}
1786
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301787static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1788 u16 opcode)
1789{
1790 BT_DBG("%s status %u", hdev->name, status);
1791}
1792
1793void hci_req_disable_address_resolution(struct hci_dev *hdev)
1794{
1795 struct hci_request req;
1796 __u8 enable = 0x00;
1797
1798 if (!use_ll_privacy(hdev) &&
1799 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1800 return;
1801
1802 hci_req_init(&req, hdev);
1803
1804 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1805
1806 hci_req_run(&req, enable_addr_resolution_complete);
1807}
1808
Johan Hedbergf2252572015-11-18 12:49:20 +02001809static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1810{
1811 BT_DBG("%s status %u", hdev->name, status);
1812}
1813
1814void hci_req_reenable_advertising(struct hci_dev *hdev)
1815{
1816 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001817
1818 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001819 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001820 return;
1821
Johan Hedbergf2252572015-11-18 12:49:20 +02001822 hci_req_init(&req, hdev);
1823
Johan Hedbergcab054a2015-11-30 11:21:45 +02001824 if (hdev->cur_adv_instance) {
1825 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1826 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001827 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301828 if (ext_adv_capable(hdev)) {
1829 __hci_req_start_ext_adv(&req, 0x00);
1830 } else {
1831 __hci_req_update_adv_data(&req, 0x00);
1832 __hci_req_update_scan_rsp_data(&req, 0x00);
1833 __hci_req_enable_advertising(&req);
1834 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001835 }
1836
1837 hci_req_run(&req, adv_enable_complete);
1838}
1839
1840static void adv_timeout_expire(struct work_struct *work)
1841{
1842 struct hci_dev *hdev = container_of(work, struct hci_dev,
1843 adv_instance_expire.work);
1844
1845 struct hci_request req;
1846 u8 instance;
1847
1848 BT_DBG("%s", hdev->name);
1849
1850 hci_dev_lock(hdev);
1851
1852 hdev->adv_instance_timeout = 0;
1853
Johan Hedbergcab054a2015-11-30 11:21:45 +02001854 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001855 if (instance == 0x00)
1856 goto unlock;
1857
1858 hci_req_init(&req, hdev);
1859
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001860 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001861
1862 if (list_empty(&hdev->adv_instances))
1863 __hci_req_disable_advertising(&req);
1864
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001865 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001866
1867unlock:
1868 hci_dev_unlock(hdev);
1869}
1870
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301871int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1872 bool use_rpa, struct adv_info *adv_instance,
1873 u8 *own_addr_type, bdaddr_t *rand_addr)
1874{
1875 int err;
1876
1877 bacpy(rand_addr, BDADDR_ANY);
1878
1879 /* If privacy is enabled use a resolvable private address. If
1880 * current RPA has expired then generate a new one.
1881 */
1882 if (use_rpa) {
1883 int to;
1884
1885 *own_addr_type = ADDR_LE_DEV_RANDOM;
1886
1887 if (adv_instance) {
1888 if (!adv_instance->rpa_expired &&
1889 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1890 return 0;
1891
1892 adv_instance->rpa_expired = false;
1893 } else {
1894 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1895 !bacmp(&hdev->random_addr, &hdev->rpa))
1896 return 0;
1897 }
1898
1899 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1900 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001901 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301902 return err;
1903 }
1904
1905 bacpy(rand_addr, &hdev->rpa);
1906
1907 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1908 if (adv_instance)
1909 queue_delayed_work(hdev->workqueue,
1910 &adv_instance->rpa_expired_cb, to);
1911 else
1912 queue_delayed_work(hdev->workqueue,
1913 &hdev->rpa_expired, to);
1914
1915 return 0;
1916 }
1917
1918 /* In case of required privacy without resolvable private address,
1919 * use an non-resolvable private address. This is useful for
1920 * non-connectable advertising.
1921 */
1922 if (require_privacy) {
1923 bdaddr_t nrpa;
1924
1925 while (true) {
1926 /* The non-resolvable private address is generated
1927 * from random six bytes with the two most significant
1928 * bits cleared.
1929 */
1930 get_random_bytes(&nrpa, 6);
1931 nrpa.b[5] &= 0x3f;
1932
1933 /* The non-resolvable private address shall not be
1934 * equal to the public address.
1935 */
1936 if (bacmp(&hdev->bdaddr, &nrpa))
1937 break;
1938 }
1939
1940 *own_addr_type = ADDR_LE_DEV_RANDOM;
1941 bacpy(rand_addr, &nrpa);
1942
1943 return 0;
1944 }
1945
1946 /* No privacy so use a public address. */
1947 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1948
1949 return 0;
1950}
1951
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301952void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1953{
1954 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1955}
1956
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301957int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301958{
1959 struct hci_cp_le_set_ext_adv_params cp;
1960 struct hci_dev *hdev = req->hdev;
1961 bool connectable;
1962 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301963 bdaddr_t random_addr;
1964 u8 own_addr_type;
1965 int err;
1966 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301967 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301968
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301969 if (instance > 0) {
1970 adv_instance = hci_find_adv_instance(hdev, instance);
1971 if (!adv_instance)
1972 return -EINVAL;
1973 } else {
1974 adv_instance = NULL;
1975 }
1976
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301977 flags = get_adv_instance_flags(hdev, instance);
1978
1979 /* If the "connectable" instance flag was not set, then choose between
1980 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1981 */
1982 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1983 mgmt_get_connectable(hdev);
1984
Colin Ian King75edd1f2018-11-09 13:27:36 +00001985 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301986 return -EPERM;
1987
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301988 /* Set require_privacy to true only when non-connectable
1989 * advertising is used. In that case it is fine to use a
1990 * non-resolvable private address.
1991 */
1992 err = hci_get_random_address(hdev, !connectable,
1993 adv_use_rpa(hdev, flags), adv_instance,
1994 &own_addr_type, &random_addr);
1995 if (err < 0)
1996 return err;
1997
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301998 memset(&cp, 0, sizeof(cp));
1999
Alain Michaud5cbd3eb2020-06-22 13:30:28 +00002000 /* In ext adv set param interval is 3 octets */
2001 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2002 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302003
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302004 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2005
2006 if (connectable) {
2007 if (secondary_adv)
2008 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2009 else
2010 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2011 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2012 if (secondary_adv)
2013 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2014 else
2015 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2016 } else {
2017 if (secondary_adv)
2018 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2019 else
2020 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2021 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302022
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302023 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302024 cp.channel_map = hdev->le_adv_channel_map;
2025 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002026 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302027
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302028 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2029 cp.primary_phy = HCI_ADV_PHY_1M;
2030 cp.secondary_phy = HCI_ADV_PHY_2M;
2031 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2032 cp.primary_phy = HCI_ADV_PHY_CODED;
2033 cp.secondary_phy = HCI_ADV_PHY_CODED;
2034 } else {
2035 /* In all other cases use 1M */
2036 cp.primary_phy = HCI_ADV_PHY_1M;
2037 cp.secondary_phy = HCI_ADV_PHY_1M;
2038 }
2039
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302040 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2041
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302042 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2043 bacmp(&random_addr, BDADDR_ANY)) {
2044 struct hci_cp_le_set_adv_set_rand_addr cp;
2045
2046 /* Check if random address need to be updated */
2047 if (adv_instance) {
2048 if (!bacmp(&random_addr, &adv_instance->random_addr))
2049 return 0;
2050 } else {
2051 if (!bacmp(&random_addr, &hdev->random_addr))
2052 return 0;
2053 }
2054
2055 memset(&cp, 0, sizeof(cp));
2056
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07002057 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302058 bacpy(&cp.bdaddr, &random_addr);
2059
2060 hci_req_add(req,
2061 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2062 sizeof(cp), &cp);
2063 }
2064
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302065 return 0;
2066}
2067
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002068int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302069{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002070 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302071 struct hci_cp_le_set_ext_adv_enable *cp;
2072 struct hci_cp_ext_adv_set *adv_set;
2073 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002074 struct adv_info *adv_instance;
2075
2076 if (instance > 0) {
2077 adv_instance = hci_find_adv_instance(hdev, instance);
2078 if (!adv_instance)
2079 return -EINVAL;
2080 } else {
2081 adv_instance = NULL;
2082 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302083
2084 cp = (void *) data;
2085 adv_set = (void *) cp->data;
2086
2087 memset(cp, 0, sizeof(*cp));
2088
2089 cp->enable = 0x01;
2090 cp->num_of_sets = 0x01;
2091
2092 memset(adv_set, 0, sizeof(*adv_set));
2093
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002094 adv_set->handle = instance;
2095
2096 /* Set duration per instance since controller is responsible for
2097 * scheduling it.
2098 */
2099 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03002100 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002101
2102 /* Time = N * 10 ms */
2103 adv_set->duration = cpu_to_le16(duration / 10);
2104 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302105
2106 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2107 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2108 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002109
2110 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302111}
2112
Daniel Winkler37adf702020-07-14 14:16:00 -07002113int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2114{
2115 struct hci_dev *hdev = req->hdev;
2116 struct hci_cp_le_set_ext_adv_enable *cp;
2117 struct hci_cp_ext_adv_set *adv_set;
2118 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2119 u8 req_size;
2120
2121 /* If request specifies an instance that doesn't exist, fail */
2122 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2123 return -EINVAL;
2124
2125 memset(data, 0, sizeof(data));
2126
2127 cp = (void *)data;
2128 adv_set = (void *)cp->data;
2129
2130 /* Instance 0x00 indicates all advertising instances will be disabled */
2131 cp->num_of_sets = !!instance;
2132 cp->enable = 0x00;
2133
2134 adv_set->handle = instance;
2135
2136 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2137 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2138
2139 return 0;
2140}
2141
2142int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2143{
2144 struct hci_dev *hdev = req->hdev;
2145
2146 /* If request specifies an instance that doesn't exist, fail */
2147 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2148 return -EINVAL;
2149
2150 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2151
2152 return 0;
2153}
2154
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302155int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2156{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302157 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002158 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302159 int err;
2160
Daniel Winkler37adf702020-07-14 14:16:00 -07002161 /* If instance isn't pending, the chip knows about it, and it's safe to
2162 * disable
2163 */
2164 if (adv_instance && !adv_instance->pending)
2165 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302166
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302167 err = __hci_req_setup_ext_adv_instance(req, instance);
2168 if (err < 0)
2169 return err;
2170
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302171 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002172 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302173
2174 return 0;
2175}
2176
Johan Hedbergf2252572015-11-18 12:49:20 +02002177int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2178 bool force)
2179{
2180 struct hci_dev *hdev = req->hdev;
2181 struct adv_info *adv_instance = NULL;
2182 u16 timeout;
2183
2184 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002185 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002186 return -EPERM;
2187
2188 if (hdev->adv_instance_timeout)
2189 return -EBUSY;
2190
2191 adv_instance = hci_find_adv_instance(hdev, instance);
2192 if (!adv_instance)
2193 return -ENOENT;
2194
2195 /* A zero timeout means unlimited advertising. As long as there is
2196 * only one instance, duration should be ignored. We still set a timeout
2197 * in case further instances are being added later on.
2198 *
2199 * If the remaining lifetime of the instance is more than the duration
2200 * then the timeout corresponds to the duration, otherwise it will be
2201 * reduced to the remaining instance lifetime.
2202 */
2203 if (adv_instance->timeout == 0 ||
2204 adv_instance->duration <= adv_instance->remaining_time)
2205 timeout = adv_instance->duration;
2206 else
2207 timeout = adv_instance->remaining_time;
2208
2209 /* The remaining time is being reduced unless the instance is being
2210 * advertised without time limit.
2211 */
2212 if (adv_instance->timeout)
2213 adv_instance->remaining_time =
2214 adv_instance->remaining_time - timeout;
2215
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002216 /* Only use work for scheduling instances with legacy advertising */
2217 if (!ext_adv_capable(hdev)) {
2218 hdev->adv_instance_timeout = timeout;
2219 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002220 &hdev->adv_instance_expire,
2221 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002222 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002223
2224 /* If we're just re-scheduling the same instance again then do not
2225 * execute any HCI commands. This happens when a single instance is
2226 * being advertised.
2227 */
2228 if (!force && hdev->cur_adv_instance == instance &&
2229 hci_dev_test_flag(hdev, HCI_LE_ADV))
2230 return 0;
2231
2232 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302233 if (ext_adv_capable(hdev)) {
2234 __hci_req_start_ext_adv(req, instance);
2235 } else {
2236 __hci_req_update_adv_data(req, instance);
2237 __hci_req_update_scan_rsp_data(req, instance);
2238 __hci_req_enable_advertising(req);
2239 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002240
2241 return 0;
2242}
2243
Johan Hedbergf2252572015-11-18 12:49:20 +02002244/* For a single instance:
2245 * - force == true: The instance will be removed even when its remaining
2246 * lifetime is not zero.
2247 * - force == false: the instance will be deactivated but kept stored unless
2248 * the remaining lifetime is zero.
2249 *
2250 * For instance == 0x00:
2251 * - force == true: All instances will be removed regardless of their timeout
2252 * setting.
2253 * - force == false: Only instances that have a timeout will be removed.
2254 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002255void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2256 struct hci_request *req, u8 instance,
2257 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002258{
2259 struct adv_info *adv_instance, *n, *next_instance = NULL;
2260 int err;
2261 u8 rem_inst;
2262
2263 /* Cancel any timeout concerning the removed instance(s). */
2264 if (!instance || hdev->cur_adv_instance == instance)
2265 cancel_adv_timeout(hdev);
2266
2267 /* Get the next instance to advertise BEFORE we remove
2268 * the current one. This can be the same instance again
2269 * if there is only one instance.
2270 */
2271 if (instance && hdev->cur_adv_instance == instance)
2272 next_instance = hci_get_next_instance(hdev, instance);
2273
2274 if (instance == 0x00) {
2275 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2276 list) {
2277 if (!(force || adv_instance->timeout))
2278 continue;
2279
2280 rem_inst = adv_instance->instance;
2281 err = hci_remove_adv_instance(hdev, rem_inst);
2282 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002283 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002284 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002285 } else {
2286 adv_instance = hci_find_adv_instance(hdev, instance);
2287
2288 if (force || (adv_instance && adv_instance->timeout &&
2289 !adv_instance->remaining_time)) {
2290 /* Don't advertise a removed instance. */
2291 if (next_instance &&
2292 next_instance->instance == instance)
2293 next_instance = NULL;
2294
2295 err = hci_remove_adv_instance(hdev, instance);
2296 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002297 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002298 }
2299 }
2300
Johan Hedbergf2252572015-11-18 12:49:20 +02002301 if (!req || !hdev_is_powered(hdev) ||
2302 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2303 return;
2304
Daniel Winkler37adf702020-07-14 14:16:00 -07002305 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002306 __hci_req_schedule_adv_instance(req, next_instance->instance,
2307 false);
2308}
2309
Johan Hedberg0857dd32014-12-19 13:40:20 +02002310static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2311{
2312 struct hci_dev *hdev = req->hdev;
2313
2314 /* If we're advertising or initiating an LE connection we can't
2315 * go ahead and change the random address at this time. This is
2316 * because the eventual initiator address used for the
2317 * subsequently created connection will be undefined (some
2318 * controllers use the new address and others the one we had
2319 * when the operation started).
2320 *
2321 * In this kind of scenario skip the update and let the random
2322 * address be updated at the next cycle.
2323 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002324 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002325 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002326 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002327 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002328 return;
2329 }
2330
2331 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2332}
2333
2334int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002335 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002336{
2337 struct hci_dev *hdev = req->hdev;
2338 int err;
2339
2340 /* If privacy is enabled use a resolvable private address. If
2341 * current RPA has expired or there is something else than
2342 * the current RPA in use, then generate a new one.
2343 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002344 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002345 int to;
2346
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302347 /* If Controller supports LL Privacy use own address type is
2348 * 0x03
2349 */
2350 if (use_ll_privacy(hdev))
2351 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2352 else
2353 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002354
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002355 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002356 !bacmp(&hdev->random_addr, &hdev->rpa))
2357 return 0;
2358
2359 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2360 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002361 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002362 return err;
2363 }
2364
2365 set_random_addr(req, &hdev->rpa);
2366
2367 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2368 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2369
2370 return 0;
2371 }
2372
2373 /* In case of required privacy without resolvable private address,
2374 * use an non-resolvable private address. This is useful for active
2375 * scanning and non-connectable advertising.
2376 */
2377 if (require_privacy) {
2378 bdaddr_t nrpa;
2379
2380 while (true) {
2381 /* The non-resolvable private address is generated
2382 * from random six bytes with the two most significant
2383 * bits cleared.
2384 */
2385 get_random_bytes(&nrpa, 6);
2386 nrpa.b[5] &= 0x3f;
2387
2388 /* The non-resolvable private address shall not be
2389 * equal to the public address.
2390 */
2391 if (bacmp(&hdev->bdaddr, &nrpa))
2392 break;
2393 }
2394
2395 *own_addr_type = ADDR_LE_DEV_RANDOM;
2396 set_random_addr(req, &nrpa);
2397 return 0;
2398 }
2399
2400 /* If forcing static address is in use or there is no public
2401 * address use the static address as random address (but skip
2402 * the HCI command if the current random address is already the
2403 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002404 *
2405 * In case BR/EDR has been disabled on a dual-mode controller
2406 * and a static address has been configured, then use that
2407 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002408 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002409 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002410 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002411 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002412 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002413 *own_addr_type = ADDR_LE_DEV_RANDOM;
2414 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2415 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2416 &hdev->static_addr);
2417 return 0;
2418 }
2419
2420 /* Neither privacy nor static address is being used so use a
2421 * public address.
2422 */
2423 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2424
2425 return 0;
2426}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002427
Johan Hedberg405a2612014-12-19 23:18:22 +02002428static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2429{
2430 struct bdaddr_list *b;
2431
2432 list_for_each_entry(b, &hdev->whitelist, list) {
2433 struct hci_conn *conn;
2434
2435 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2436 if (!conn)
2437 return true;
2438
2439 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2440 return true;
2441 }
2442
2443 return false;
2444}
2445
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002446void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002447{
2448 struct hci_dev *hdev = req->hdev;
2449 u8 scan;
2450
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002451 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002452 return;
2453
2454 if (!hdev_is_powered(hdev))
2455 return;
2456
2457 if (mgmt_powering_down(hdev))
2458 return;
2459
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002460 if (hdev->scanning_paused)
2461 return;
2462
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002463 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002464 disconnected_whitelist_entries(hdev))
2465 scan = SCAN_PAGE;
2466 else
2467 scan = SCAN_DISABLED;
2468
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002469 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002470 scan |= SCAN_INQUIRY;
2471
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002472 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2473 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2474 return;
2475
Johan Hedberg405a2612014-12-19 23:18:22 +02002476 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2477}
2478
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002479static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002480{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002481 hci_dev_lock(req->hdev);
2482 __hci_req_update_scan(req);
2483 hci_dev_unlock(req->hdev);
2484 return 0;
2485}
Johan Hedberg405a2612014-12-19 23:18:22 +02002486
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002487static void scan_update_work(struct work_struct *work)
2488{
2489 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2490
2491 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002492}
2493
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002494static int connectable_update(struct hci_request *req, unsigned long opt)
2495{
2496 struct hci_dev *hdev = req->hdev;
2497
2498 hci_dev_lock(hdev);
2499
2500 __hci_req_update_scan(req);
2501
2502 /* If BR/EDR is not enabled and we disable advertising as a
2503 * by-product of disabling connectable, we need to update the
2504 * advertising flags.
2505 */
2506 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002507 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002508
2509 /* Update the advertising parameters if necessary */
2510 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302511 !list_empty(&hdev->adv_instances)) {
2512 if (ext_adv_capable(hdev))
2513 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2514 else
2515 __hci_req_enable_advertising(req);
2516 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002517
2518 __hci_update_background_scan(req);
2519
2520 hci_dev_unlock(hdev);
2521
2522 return 0;
2523}
2524
2525static void connectable_update_work(struct work_struct *work)
2526{
2527 struct hci_dev *hdev = container_of(work, struct hci_dev,
2528 connectable_update);
2529 u8 status;
2530
2531 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2532 mgmt_set_connectable_complete(hdev, status);
2533}
2534
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002535static u8 get_service_classes(struct hci_dev *hdev)
2536{
2537 struct bt_uuid *uuid;
2538 u8 val = 0;
2539
2540 list_for_each_entry(uuid, &hdev->uuids, list)
2541 val |= uuid->svc_hint;
2542
2543 return val;
2544}
2545
2546void __hci_req_update_class(struct hci_request *req)
2547{
2548 struct hci_dev *hdev = req->hdev;
2549 u8 cod[3];
2550
2551 BT_DBG("%s", hdev->name);
2552
2553 if (!hdev_is_powered(hdev))
2554 return;
2555
2556 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2557 return;
2558
2559 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2560 return;
2561
2562 cod[0] = hdev->minor_class;
2563 cod[1] = hdev->major_class;
2564 cod[2] = get_service_classes(hdev);
2565
2566 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2567 cod[1] |= 0x20;
2568
2569 if (memcmp(cod, hdev->dev_class, 3) == 0)
2570 return;
2571
2572 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2573}
2574
Johan Hedbergaed1a882015-11-22 17:24:44 +03002575static void write_iac(struct hci_request *req)
2576{
2577 struct hci_dev *hdev = req->hdev;
2578 struct hci_cp_write_current_iac_lap cp;
2579
2580 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2581 return;
2582
2583 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2584 /* Limited discoverable mode */
2585 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2586 cp.iac_lap[0] = 0x00; /* LIAC */
2587 cp.iac_lap[1] = 0x8b;
2588 cp.iac_lap[2] = 0x9e;
2589 cp.iac_lap[3] = 0x33; /* GIAC */
2590 cp.iac_lap[4] = 0x8b;
2591 cp.iac_lap[5] = 0x9e;
2592 } else {
2593 /* General discoverable mode */
2594 cp.num_iac = 1;
2595 cp.iac_lap[0] = 0x33; /* GIAC */
2596 cp.iac_lap[1] = 0x8b;
2597 cp.iac_lap[2] = 0x9e;
2598 }
2599
2600 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2601 (cp.num_iac * 3) + 1, &cp);
2602}
2603
2604static int discoverable_update(struct hci_request *req, unsigned long opt)
2605{
2606 struct hci_dev *hdev = req->hdev;
2607
2608 hci_dev_lock(hdev);
2609
2610 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2611 write_iac(req);
2612 __hci_req_update_scan(req);
2613 __hci_req_update_class(req);
2614 }
2615
2616 /* Advertising instances don't use the global discoverable setting, so
2617 * only update AD if advertising was enabled using Set Advertising.
2618 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002619 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002620 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002621
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002622 /* Discoverable mode affects the local advertising
2623 * address in limited privacy mode.
2624 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302625 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2626 if (ext_adv_capable(hdev))
2627 __hci_req_start_ext_adv(req, 0x00);
2628 else
2629 __hci_req_enable_advertising(req);
2630 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002631 }
2632
Johan Hedbergaed1a882015-11-22 17:24:44 +03002633 hci_dev_unlock(hdev);
2634
2635 return 0;
2636}
2637
2638static void discoverable_update_work(struct work_struct *work)
2639{
2640 struct hci_dev *hdev = container_of(work, struct hci_dev,
2641 discoverable_update);
2642 u8 status;
2643
2644 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2645 mgmt_set_discoverable_complete(hdev, status);
2646}
2647
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002648void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2649 u8 reason)
2650{
2651 switch (conn->state) {
2652 case BT_CONNECTED:
2653 case BT_CONFIG:
2654 if (conn->type == AMP_LINK) {
2655 struct hci_cp_disconn_phy_link cp;
2656
2657 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2658 cp.reason = reason;
2659 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2660 &cp);
2661 } else {
2662 struct hci_cp_disconnect dc;
2663
2664 dc.handle = cpu_to_le16(conn->handle);
2665 dc.reason = reason;
2666 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2667 }
2668
2669 conn->state = BT_DISCONN;
2670
2671 break;
2672 case BT_CONNECT:
2673 if (conn->type == LE_LINK) {
2674 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2675 break;
2676 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2677 0, NULL);
2678 } else if (conn->type == ACL_LINK) {
2679 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2680 break;
2681 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2682 6, &conn->dst);
2683 }
2684 break;
2685 case BT_CONNECT2:
2686 if (conn->type == ACL_LINK) {
2687 struct hci_cp_reject_conn_req rej;
2688
2689 bacpy(&rej.bdaddr, &conn->dst);
2690 rej.reason = reason;
2691
2692 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2693 sizeof(rej), &rej);
2694 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2695 struct hci_cp_reject_sync_conn_req rej;
2696
2697 bacpy(&rej.bdaddr, &conn->dst);
2698
2699 /* SCO rejection has its own limited set of
2700 * allowed error values (0x0D-0x0F) which isn't
2701 * compatible with most values passed to this
2702 * function. To be safe hard-code one of the
2703 * values that's suitable for SCO.
2704 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002705 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002706
2707 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2708 sizeof(rej), &rej);
2709 }
2710 break;
2711 default:
2712 conn->state = BT_CLOSED;
2713 break;
2714 }
2715}
2716
2717static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2718{
2719 if (status)
2720 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2721}
2722
2723int hci_abort_conn(struct hci_conn *conn, u8 reason)
2724{
2725 struct hci_request req;
2726 int err;
2727
2728 hci_req_init(&req, conn->hdev);
2729
2730 __hci_abort_conn(&req, conn, reason);
2731
2732 err = hci_req_run(&req, abort_conn_complete);
2733 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002734 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002735 return err;
2736 }
2737
2738 return 0;
2739}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002740
Johan Hedberga1d01db2015-11-11 08:11:25 +02002741static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002742{
2743 hci_dev_lock(req->hdev);
2744 __hci_update_background_scan(req);
2745 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002746 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002747}
2748
2749static void bg_scan_update(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev,
2752 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002753 struct hci_conn *conn;
2754 u8 status;
2755 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002756
Johan Hedberg84235d22015-11-11 08:11:20 +02002757 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2758 if (!err)
2759 return;
2760
2761 hci_dev_lock(hdev);
2762
2763 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2764 if (conn)
2765 hci_le_conn_failed(conn, status);
2766
2767 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002768}
2769
Johan Hedberga1d01db2015-11-11 08:11:25 +02002770static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002771{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302772 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002773 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002774}
2775
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002776static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2777{
2778 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002779 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2780 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002781 struct hci_cp_inquiry cp;
2782
2783 BT_DBG("%s", req->hdev->name);
2784
2785 hci_dev_lock(req->hdev);
2786 hci_inquiry_cache_flush(req->hdev);
2787 hci_dev_unlock(req->hdev);
2788
2789 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002790
2791 if (req->hdev->discovery.limited)
2792 memcpy(&cp.lap, liac, sizeof(cp.lap));
2793 else
2794 memcpy(&cp.lap, giac, sizeof(cp.lap));
2795
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002796 cp.length = length;
2797
2798 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2799
2800 return 0;
2801}
2802
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002803static void le_scan_disable_work(struct work_struct *work)
2804{
2805 struct hci_dev *hdev = container_of(work, struct hci_dev,
2806 le_scan_disable.work);
2807 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002808
2809 BT_DBG("%s", hdev->name);
2810
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002811 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002812 return;
2813
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002814 cancel_delayed_work(&hdev->le_scan_restart);
2815
2816 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2817 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002818 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2819 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002820 return;
2821 }
2822
2823 hdev->discovery.scan_start = 0;
2824
2825 /* If we were running LE only scan, change discovery state. If
2826 * we were running both LE and BR/EDR inquiry simultaneously,
2827 * and BR/EDR inquiry is already finished, stop discovery,
2828 * otherwise BR/EDR inquiry will stop discovery when finished.
2829 * If we will resolve remote device name, do not change
2830 * discovery state.
2831 */
2832
2833 if (hdev->discovery.type == DISCOV_TYPE_LE)
2834 goto discov_stopped;
2835
2836 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2837 return;
2838
2839 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2840 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2841 hdev->discovery.state != DISCOVERY_RESOLVING)
2842 goto discov_stopped;
2843
2844 return;
2845 }
2846
2847 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2848 HCI_CMD_TIMEOUT, &status);
2849 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002850 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002851 goto discov_stopped;
2852 }
2853
2854 return;
2855
2856discov_stopped:
2857 hci_dev_lock(hdev);
2858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2859 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002860}
2861
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002862static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002863{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002864 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002865
2866 /* If controller is not scanning we are done. */
2867 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2868 return 0;
2869
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002870 if (hdev->scanning_paused) {
2871 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2872 return 0;
2873 }
2874
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302875 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002876
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302877 if (use_ext_scan(hdev)) {
2878 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2879
2880 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2881 ext_enable_cp.enable = LE_SCAN_ENABLE;
2882 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2883
2884 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2885 sizeof(ext_enable_cp), &ext_enable_cp);
2886 } else {
2887 struct hci_cp_le_set_scan_enable cp;
2888
2889 memset(&cp, 0, sizeof(cp));
2890 cp.enable = LE_SCAN_ENABLE;
2891 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2892 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2893 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002894
2895 return 0;
2896}
2897
2898static void le_scan_restart_work(struct work_struct *work)
2899{
2900 struct hci_dev *hdev = container_of(work, struct hci_dev,
2901 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002902 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002903 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002904
2905 BT_DBG("%s", hdev->name);
2906
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002907 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002908 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002909 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2910 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002911 return;
2912 }
2913
2914 hci_dev_lock(hdev);
2915
2916 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2917 !hdev->discovery.scan_start)
2918 goto unlock;
2919
2920 /* When the scan was started, hdev->le_scan_disable has been queued
2921 * after duration from scan_start. During scan restart this job
2922 * has been canceled, and we need to queue it again after proper
2923 * timeout, to make sure that scan does not run indefinitely.
2924 */
2925 duration = hdev->discovery.scan_duration;
2926 scan_start = hdev->discovery.scan_start;
2927 now = jiffies;
2928 if (now - scan_start <= duration) {
2929 int elapsed;
2930
2931 if (now >= scan_start)
2932 elapsed = now - scan_start;
2933 else
2934 elapsed = ULONG_MAX - scan_start + now;
2935
2936 timeout = duration - elapsed;
2937 } else {
2938 timeout = 0;
2939 }
2940
2941 queue_delayed_work(hdev->req_workqueue,
2942 &hdev->le_scan_disable, timeout);
2943
2944unlock:
2945 hci_dev_unlock(hdev);
2946}
2947
Johan Hedberge68f0722015-11-11 08:30:30 +02002948static int active_scan(struct hci_request *req, unsigned long opt)
2949{
2950 uint16_t interval = opt;
2951 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002952 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002953 /* White list is not used for discovery */
2954 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302955 /* Discovery doesn't require controller address resolution */
2956 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002957 int err;
2958
2959 BT_DBG("%s", hdev->name);
2960
Johan Hedberge68f0722015-11-11 08:30:30 +02002961 /* If controller is scanning, it means the background scanning is
2962 * running. Thus, we should temporarily stop it in order to set the
2963 * discovery scanning parameters.
2964 */
2965 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302966 hci_req_add_le_scan_disable(req, false);
Johan Hedberge68f0722015-11-11 08:30:30 +02002967
2968 /* All active scans will be done with either a resolvable private
2969 * address (when privacy feature has been enabled) or non-resolvable
2970 * private address.
2971 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002972 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2973 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002974 if (err < 0)
2975 own_addr_type = ADDR_LE_DEV_PUBLIC;
2976
Alain Michaudd4edda02020-06-29 17:04:15 +00002977 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2978 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302979 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002980 return 0;
2981}
2982
2983static int interleaved_discov(struct hci_request *req, unsigned long opt)
2984{
2985 int err;
2986
2987 BT_DBG("%s", req->hdev->name);
2988
2989 err = active_scan(req, opt);
2990 if (err)
2991 return err;
2992
Johan Hedberg7df26b52015-11-11 12:24:21 +02002993 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002994}
2995
2996static void start_discovery(struct hci_dev *hdev, u8 *status)
2997{
2998 unsigned long timeout;
2999
3000 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3001
3002 switch (hdev->discovery.type) {
3003 case DISCOV_TYPE_BREDR:
3004 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02003005 hci_req_sync(hdev, bredr_inquiry,
3006 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003007 status);
3008 return;
3009 case DISCOV_TYPE_INTERLEAVED:
3010 /* When running simultaneous discovery, the LE scanning time
3011 * should occupy the whole discovery time sine BR/EDR inquiry
3012 * and LE scanning are scheduled by the controller.
3013 *
3014 * For interleaving discovery in comparison, BR/EDR inquiry
3015 * and LE scanning are done sequentially with separate
3016 * timeouts.
3017 */
3018 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3019 &hdev->quirks)) {
3020 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3021 /* During simultaneous discovery, we double LE scan
3022 * interval. We must leave some time for the controller
3023 * to do BR/EDR inquiry.
3024 */
3025 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00003026 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003027 status);
3028 break;
3029 }
3030
3031 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00003032 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003033 HCI_CMD_TIMEOUT, status);
3034 break;
3035 case DISCOV_TYPE_LE:
3036 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00003037 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003038 HCI_CMD_TIMEOUT, status);
3039 break;
3040 default:
3041 *status = HCI_ERROR_UNSPECIFIED;
3042 return;
3043 }
3044
3045 if (*status)
3046 return;
3047
3048 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3049
3050 /* When service discovery is used and the controller has a
3051 * strict duplicate filter, it is important to remember the
3052 * start and duration of the scan. This is required for
3053 * restarting scanning during the discovery phase.
3054 */
3055 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3056 hdev->discovery.result_filtering) {
3057 hdev->discovery.scan_start = jiffies;
3058 hdev->discovery.scan_duration = timeout;
3059 }
3060
3061 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3062 timeout);
3063}
3064
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003065bool hci_req_stop_discovery(struct hci_request *req)
3066{
3067 struct hci_dev *hdev = req->hdev;
3068 struct discovery_state *d = &hdev->discovery;
3069 struct hci_cp_remote_name_req_cancel cp;
3070 struct inquiry_entry *e;
3071 bool ret = false;
3072
3073 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3074
3075 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3076 if (test_bit(HCI_INQUIRY, &hdev->flags))
3077 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3078
3079 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3080 cancel_delayed_work(&hdev->le_scan_disable);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303081 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003082 }
3083
3084 ret = true;
3085 } else {
3086 /* Passive scanning */
3087 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303088 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003089 ret = true;
3090 }
3091 }
3092
3093 /* No further actions needed for LE-only discovery */
3094 if (d->type == DISCOV_TYPE_LE)
3095 return ret;
3096
3097 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3098 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3099 NAME_PENDING);
3100 if (!e)
3101 return ret;
3102
3103 bacpy(&cp.bdaddr, &e->data.bdaddr);
3104 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3105 &cp);
3106 ret = true;
3107 }
3108
3109 return ret;
3110}
3111
3112static int stop_discovery(struct hci_request *req, unsigned long opt)
3113{
3114 hci_dev_lock(req->hdev);
3115 hci_req_stop_discovery(req);
3116 hci_dev_unlock(req->hdev);
3117
3118 return 0;
3119}
3120
Johan Hedberge68f0722015-11-11 08:30:30 +02003121static void discov_update(struct work_struct *work)
3122{
3123 struct hci_dev *hdev = container_of(work, struct hci_dev,
3124 discov_update);
3125 u8 status = 0;
3126
3127 switch (hdev->discovery.state) {
3128 case DISCOVERY_STARTING:
3129 start_discovery(hdev, &status);
3130 mgmt_start_discovery_complete(hdev, status);
3131 if (status)
3132 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3133 else
3134 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3135 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003136 case DISCOVERY_STOPPING:
3137 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3138 mgmt_stop_discovery_complete(hdev, status);
3139 if (!status)
3140 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3141 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003142 case DISCOVERY_STOPPED:
3143 default:
3144 return;
3145 }
3146}
3147
Johan Hedbergc366f552015-11-23 15:43:06 +02003148static void discov_off(struct work_struct *work)
3149{
3150 struct hci_dev *hdev = container_of(work, struct hci_dev,
3151 discov_off.work);
3152
3153 BT_DBG("%s", hdev->name);
3154
3155 hci_dev_lock(hdev);
3156
3157 /* When discoverable timeout triggers, then just make sure
3158 * the limited discoverable flag is cleared. Even in the case
3159 * of a timeout triggered from general discoverable, it is
3160 * safe to unconditionally clear the flag.
3161 */
3162 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3163 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3164 hdev->discov_timeout = 0;
3165
3166 hci_dev_unlock(hdev);
3167
3168 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3169 mgmt_new_settings(hdev);
3170}
3171
Johan Hedberg2ff13892015-11-25 16:15:44 +02003172static int powered_update_hci(struct hci_request *req, unsigned long opt)
3173{
3174 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003175 u8 link_sec;
3176
3177 hci_dev_lock(hdev);
3178
3179 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3180 !lmp_host_ssp_capable(hdev)) {
3181 u8 mode = 0x01;
3182
3183 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3184
3185 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3186 u8 support = 0x01;
3187
3188 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3189 sizeof(support), &support);
3190 }
3191 }
3192
3193 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3194 lmp_bredr_capable(hdev)) {
3195 struct hci_cp_write_le_host_supported cp;
3196
3197 cp.le = 0x01;
3198 cp.simul = 0x00;
3199
3200 /* Check first if we already have the right
3201 * host state (host features set)
3202 */
3203 if (cp.le != lmp_host_le_capable(hdev) ||
3204 cp.simul != lmp_host_le_br_capable(hdev))
3205 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3206 sizeof(cp), &cp);
3207 }
3208
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003209 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003210 /* Make sure the controller has a good default for
3211 * advertising data. This also applies to the case
3212 * where BR/EDR was toggled during the AUTO_OFF phase.
3213 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003214 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3215 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303216 int err;
3217
3218 if (ext_adv_capable(hdev)) {
3219 err = __hci_req_setup_ext_adv_instance(req,
3220 0x00);
3221 if (!err)
3222 __hci_req_update_scan_rsp_data(req,
3223 0x00);
3224 } else {
3225 err = 0;
3226 __hci_req_update_adv_data(req, 0x00);
3227 __hci_req_update_scan_rsp_data(req, 0x00);
3228 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003229
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303230 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303231 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303232 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303233 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003234 __hci_req_enable_ext_advertising(req,
3235 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303236 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003237 } else if (!list_empty(&hdev->adv_instances)) {
3238 struct adv_info *adv_instance;
3239
Johan Hedberg2ff13892015-11-25 16:15:44 +02003240 adv_instance = list_first_entry(&hdev->adv_instances,
3241 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003242 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003243 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003244 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003245 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003246 }
3247
3248 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3249 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3250 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3251 sizeof(link_sec), &link_sec);
3252
3253 if (lmp_bredr_capable(hdev)) {
3254 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3255 __hci_req_write_fast_connectable(req, true);
3256 else
3257 __hci_req_write_fast_connectable(req, false);
3258 __hci_req_update_scan(req);
3259 __hci_req_update_class(req);
3260 __hci_req_update_name(req);
3261 __hci_req_update_eir(req);
3262 }
3263
3264 hci_dev_unlock(hdev);
3265 return 0;
3266}
3267
3268int __hci_req_hci_power_on(struct hci_dev *hdev)
3269{
3270 /* Register the available SMP channels (BR/EDR and LE) only when
3271 * successfully powering on the controller. This late
3272 * registration is required so that LE SMP can clearly decide if
3273 * the public address or static address is used.
3274 */
3275 smp_register(hdev);
3276
3277 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3278 NULL);
3279}
3280
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003281void hci_request_setup(struct hci_dev *hdev)
3282{
Johan Hedberge68f0722015-11-11 08:30:30 +02003283 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003284 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003285 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003286 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003287 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003288 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003289 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3290 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003291 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003292}
3293
3294void hci_request_cancel_all(struct hci_dev *hdev)
3295{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003296 hci_req_sync_cancel(hdev, ENODEV);
3297
Johan Hedberge68f0722015-11-11 08:30:30 +02003298 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003299 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003300 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003301 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003302 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003303 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003304 cancel_delayed_work_sync(&hdev->le_scan_disable);
3305 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003306
3307 if (hdev->adv_instance_timeout) {
3308 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3309 hdev->adv_instance_timeout = 0;
3310 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003311}