blob: 53179ae856aedf204354ac83c716361443a5ba75 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368 /* default 1.28 sec page scan */
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381}
382
Johan Hedberg196a5e92015-11-22 18:55:44 +0200383/* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
386 *
387 * This function requires the caller holds hdev->lock.
388 */
389static void __hci_update_background_scan(struct hci_request *req)
390{
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405 /* If discovery is active don't interfere with it */
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
411 *
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
415 */
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
422 * scanning.
423 */
424
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
435 */
436
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
439 * the same time.
440 */
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
446 */
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454}
455
Johan Hedberg00cf5042015-11-25 16:15:41 +0200456void __hci_req_update_name(struct hci_request *req)
457{
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464}
465
Johan Hedbergb1a89172015-11-25 16:15:42 +0200466#define PNP_INFO_SVCLASS_ID 0x1200
467
468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469{
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496 /* Stop if not enough space to put next UUID */
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508}
509
510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511{
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529 /* Stop if not enough space to put next UUID */
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541}
542
543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544{
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562 /* Stop if not enough space to put next UUID */
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574}
575
576static void create_eir(struct hci_dev *hdev, u8 *data)
577{
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584 /* EIR Data type */
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591 /* EIR Data length */
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622}
623
624void __hci_req_update_eir(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651}
652
Johan Hedberg0857dd32014-12-19 13:40:20 +0200653void hci_req_add_le_scan_disable(struct hci_request *req)
654{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530655 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200656
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, &params->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
696 */
697 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
700 */
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200706
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200712 continue;
713 }
714
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716 /* White list can not be used with RPAs */
717 return 0x00;
718 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200719
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500720 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200721 }
722
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
726 * the controller.
727 *
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
731 * white list.
732 */
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 &params->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739 /* Select filter policy to accept all advertising */
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, &params->addr,
744 params->addr_type)) {
745 /* White list can not be used with RPAs */
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
756 */
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 &params->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763 /* Select filter policy to accept all advertising */
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, &params->addr,
768 params->addr_type)) {
769 /* White list can not be used with RPAs */
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777 /* Select filter policy to use white list */
778 return 0x01;
779}
780
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200788{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530789 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530790
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530791 /* Use ext scanning if set ext scan param and ext scan enable is
792 * supported
793 */
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530800
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530807
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530835 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(&param_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 &param_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530862}
863
864void hci_req_add_le_passive_scan(struct hci_request *req)
865{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
874 * by the controller.
875 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200878 return;
879
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
883 */
884 filter_policy = update_white_list(req);
885
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
890 *
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200901}
902
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904{
905 struct adv_info *adv_instance;
906
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +0300907 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530908 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +0300909 return 1;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
917 */
918 return adv_instance->scan_rsp_len;
919}
920
Johan Hedbergf2252572015-11-18 12:49:20 +0200921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200923 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200924 struct adv_info *adv_instance;
925
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +0300926 /* Instance 0x00 always set local name */
Johan Hedbergf2252572015-11-18 12:49:20 +0200927 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +0300928 return 1;
Johan Hedbergf2252572015-11-18 12:49:20 +0200929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
936 */
937 return adv_instance->scan_rsp_len;
938}
939
940void __hci_req_disable_advertising(struct hci_request *req)
941{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
Johan Hedbergf2252572015-11-18 12:49:20 +0200944
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530945 cp.enable = 0x00;
946 /* Disable all sets since we only support one set at the moment */
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
Johan Hedbergf2252572015-11-18 12:49:20 +0200955}
956
957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958{
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
964 * fields
965 */
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
970 */
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200977 flags |= MGMT_ADV_FLAG_DISCOV;
978
Johan Hedbergf2252572015-11-18 12:49:20 +0200979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984 /* Return 0 when we got an invalid instance identifier. */
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989}
990
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992{
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1003 */
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1010 */
1011 return true;
1012}
1013
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1028 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001031 return false;
1032 }
1033
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040 /* Master connection state and connectable mode bit 35 and
1041 * scannable 19.
1042 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049}
1050
Johan Hedbergf2252572015-11-18 12:49:20 +02001051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301057 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001058 u32 flags;
1059
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001060 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1061
1062 /* If the "connectable" instance flag was not set, then choose between
1063 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1064 */
1065 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1066 mgmt_get_connectable(hdev);
1067
1068 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001069 return;
1070
1071 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1072 __hci_req_disable_advertising(req);
1073
1074 /* Clear the HCI_LE_ADV bit temporarily so that the
1075 * hci_update_random_address knows that it's safe to go ahead
1076 * and write a new random address. The flag will be set back on
1077 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 */
1079 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1080
Johan Hedbergf2252572015-11-18 12:49:20 +02001081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1084 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001085 if (hci_update_random_address(req, !connectable,
1086 adv_use_rpa(hdev, flags),
1087 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001088 return;
1089
1090 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001091
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301092 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001093 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001094
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301095 adv_min_interval = hdev->le_adv_min_interval;
1096 adv_max_interval = hdev->le_adv_max_interval;
1097 } else {
1098 if (get_cur_adv_instance_scan_rsp_len(hdev))
1099 cp.type = LE_ADV_SCAN_IND;
1100 else
1101 cp.type = LE_ADV_NONCONN_IND;
1102
1103 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1104 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1105 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1106 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1107 } else {
1108 adv_min_interval = hdev->le_adv_min_interval;
1109 adv_max_interval = hdev->le_adv_max_interval;
1110 }
1111 }
1112
1113 cp.min_interval = cpu_to_le16(adv_min_interval);
1114 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001115 cp.own_address_type = own_addr_type;
1116 cp.channel_map = hdev->le_adv_channel_map;
1117
1118 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1119
1120 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1121}
1122
Michał Narajowskif61851f2016-10-19 10:20:27 +02001123u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001124{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001125 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001126 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001127
Michał Narajowskif61851f2016-10-19 10:20:27 +02001128 /* no space left for name (+ NULL + type + len) */
1129 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1130 return ad_len;
1131
1132 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001133 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001134 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001135 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001136 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001137
Michał Narajowskif61851f2016-10-19 10:20:27 +02001138 /* use short name if present */
1139 short_len = strlen(hdev->short_name);
1140 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001141 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001142 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001143
Michał Narajowskif61851f2016-10-19 10:20:27 +02001144 /* use shortened full name if present, we already know that name
1145 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1146 */
1147 if (complete_len) {
1148 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1149
1150 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1151 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1152
1153 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1154 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001155 }
1156
1157 return ad_len;
1158}
1159
Michał Narajowski1b422062016-10-05 12:28:27 +02001160static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1161{
1162 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1163}
1164
Michał Narajowski7c295c42016-09-18 12:50:02 +02001165static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1166{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001167 u8 scan_rsp_len = 0;
1168
1169 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001170 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001171 }
1172
Michał Narajowski1b422062016-10-05 12:28:27 +02001173 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001174}
1175
Johan Hedbergf2252572015-11-18 12:49:20 +02001176static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1177 u8 *ptr)
1178{
1179 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001180 u32 instance_flags;
1181 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001182
1183 adv_instance = hci_find_adv_instance(hdev, instance);
1184 if (!adv_instance)
1185 return 0;
1186
Michał Narajowski7c295c42016-09-18 12:50:02 +02001187 instance_flags = adv_instance->flags;
1188
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001189 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001190 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001191 }
1192
Michał Narajowski1b422062016-10-05 12:28:27 +02001193 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001194 adv_instance->scan_rsp_len);
1195
Michał Narajowski7c295c42016-09-18 12:50:02 +02001196 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001197
1198 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1199 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1200
1201 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001202}
1203
Johan Hedbergcab054a2015-11-30 11:21:45 +02001204void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001205{
1206 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001207 u8 len;
1208
1209 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1210 return;
1211
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301212 if (ext_adv_capable(hdev)) {
1213 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001214
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301215 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001216
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301217 if (instance)
1218 len = create_instance_scan_rsp_data(hdev, instance,
1219 cp.data);
1220 else
1221 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001222
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301223 if (hdev->scan_rsp_data_len == len &&
1224 !memcmp(cp.data, hdev->scan_rsp_data, len))
1225 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001226
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301227 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1228 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001229
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301230 cp.handle = 0;
1231 cp.length = len;
1232 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1233 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1234
1235 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1236 &cp);
1237 } else {
1238 struct hci_cp_le_set_scan_rsp_data cp;
1239
1240 memset(&cp, 0, sizeof(cp));
1241
1242 if (instance)
1243 len = create_instance_scan_rsp_data(hdev, instance,
1244 cp.data);
1245 else
1246 len = create_default_scan_rsp_data(hdev, cp.data);
1247
1248 if (hdev->scan_rsp_data_len == len &&
1249 !memcmp(cp.data, hdev->scan_rsp_data, len))
1250 return;
1251
1252 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1253 hdev->scan_rsp_data_len = len;
1254
1255 cp.length = len;
1256
1257 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1258 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001259}
1260
Johan Hedbergf2252572015-11-18 12:49:20 +02001261static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1262{
1263 struct adv_info *adv_instance = NULL;
1264 u8 ad_len = 0, flags = 0;
1265 u32 instance_flags;
1266
1267 /* Return 0 when the current instance identifier is invalid. */
1268 if (instance) {
1269 adv_instance = hci_find_adv_instance(hdev, instance);
1270 if (!adv_instance)
1271 return 0;
1272 }
1273
1274 instance_flags = get_adv_instance_flags(hdev, instance);
1275
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001276 /* If instance already has the flags set skip adding it once
1277 * again.
1278 */
1279 if (adv_instance && eir_get_data(adv_instance->adv_data,
1280 adv_instance->adv_data_len, EIR_FLAGS,
1281 NULL))
1282 goto skip_flags;
1283
Johan Hedbergf2252572015-11-18 12:49:20 +02001284 /* The Add Advertising command allows userspace to set both the general
1285 * and limited discoverable flags.
1286 */
1287 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1288 flags |= LE_AD_GENERAL;
1289
1290 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1291 flags |= LE_AD_LIMITED;
1292
Johan Hedbergf18ba582016-04-06 13:09:05 +03001293 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1294 flags |= LE_AD_NO_BREDR;
1295
Johan Hedbergf2252572015-11-18 12:49:20 +02001296 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1297 /* If a discovery flag wasn't provided, simply use the global
1298 * settings.
1299 */
1300 if (!flags)
1301 flags |= mgmt_get_adv_discov_flags(hdev);
1302
Johan Hedbergf2252572015-11-18 12:49:20 +02001303 /* If flags would still be empty, then there is no need to
1304 * include the "Flags" AD field".
1305 */
1306 if (flags) {
1307 ptr[0] = 0x02;
1308 ptr[1] = EIR_FLAGS;
1309 ptr[2] = flags;
1310
1311 ad_len += 3;
1312 ptr += 3;
1313 }
1314 }
1315
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001316skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001317 if (adv_instance) {
1318 memcpy(ptr, adv_instance->adv_data,
1319 adv_instance->adv_data_len);
1320 ad_len += adv_instance->adv_data_len;
1321 ptr += adv_instance->adv_data_len;
1322 }
1323
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301324 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1325 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001326
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301327 if (ext_adv_capable(hdev)) {
1328 if (adv_instance)
1329 adv_tx_power = adv_instance->tx_power;
1330 else
1331 adv_tx_power = hdev->adv_tx_power;
1332 } else {
1333 adv_tx_power = hdev->adv_tx_power;
1334 }
1335
1336 /* Provide Tx Power only if we can provide a valid value for it */
1337 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1338 ptr[0] = 0x02;
1339 ptr[1] = EIR_TX_POWER;
1340 ptr[2] = (u8)adv_tx_power;
1341
1342 ad_len += 3;
1343 ptr += 3;
1344 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001345 }
1346
1347 return ad_len;
1348}
1349
Johan Hedbergcab054a2015-11-30 11:21:45 +02001350void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001351{
1352 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001353 u8 len;
1354
1355 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1356 return;
1357
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301358 if (ext_adv_capable(hdev)) {
1359 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001360
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301361 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001362
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301363 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001364
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301365 /* There's nothing to do if the data hasn't changed */
1366 if (hdev->adv_data_len == len &&
1367 memcmp(cp.data, hdev->adv_data, len) == 0)
1368 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001369
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301370 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1371 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001372
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301373 cp.length = len;
1374 cp.handle = 0;
1375 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1376 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1377
1378 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1379 } else {
1380 struct hci_cp_le_set_adv_data cp;
1381
1382 memset(&cp, 0, sizeof(cp));
1383
1384 len = create_instance_adv_data(hdev, instance, cp.data);
1385
1386 /* There's nothing to do if the data hasn't changed */
1387 if (hdev->adv_data_len == len &&
1388 memcmp(cp.data, hdev->adv_data, len) == 0)
1389 return;
1390
1391 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1392 hdev->adv_data_len = len;
1393
1394 cp.length = len;
1395
1396 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1397 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001398}
1399
Johan Hedbergcab054a2015-11-30 11:21:45 +02001400int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001401{
1402 struct hci_request req;
1403
1404 hci_req_init(&req, hdev);
1405 __hci_req_update_adv_data(&req, instance);
1406
1407 return hci_req_run(&req, NULL);
1408}
1409
1410static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1411{
1412 BT_DBG("%s status %u", hdev->name, status);
1413}
1414
1415void hci_req_reenable_advertising(struct hci_dev *hdev)
1416{
1417 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001418
1419 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001420 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001421 return;
1422
Johan Hedbergf2252572015-11-18 12:49:20 +02001423 hci_req_init(&req, hdev);
1424
Johan Hedbergcab054a2015-11-30 11:21:45 +02001425 if (hdev->cur_adv_instance) {
1426 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1427 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001428 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301429 if (ext_adv_capable(hdev)) {
1430 __hci_req_start_ext_adv(&req, 0x00);
1431 } else {
1432 __hci_req_update_adv_data(&req, 0x00);
1433 __hci_req_update_scan_rsp_data(&req, 0x00);
1434 __hci_req_enable_advertising(&req);
1435 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001436 }
1437
1438 hci_req_run(&req, adv_enable_complete);
1439}
1440
1441static void adv_timeout_expire(struct work_struct *work)
1442{
1443 struct hci_dev *hdev = container_of(work, struct hci_dev,
1444 adv_instance_expire.work);
1445
1446 struct hci_request req;
1447 u8 instance;
1448
1449 BT_DBG("%s", hdev->name);
1450
1451 hci_dev_lock(hdev);
1452
1453 hdev->adv_instance_timeout = 0;
1454
Johan Hedbergcab054a2015-11-30 11:21:45 +02001455 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001456 if (instance == 0x00)
1457 goto unlock;
1458
1459 hci_req_init(&req, hdev);
1460
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001461 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001462
1463 if (list_empty(&hdev->adv_instances))
1464 __hci_req_disable_advertising(&req);
1465
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001466 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001467
1468unlock:
1469 hci_dev_unlock(hdev);
1470}
1471
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301472int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1473 bool use_rpa, struct adv_info *adv_instance,
1474 u8 *own_addr_type, bdaddr_t *rand_addr)
1475{
1476 int err;
1477
1478 bacpy(rand_addr, BDADDR_ANY);
1479
1480 /* If privacy is enabled use a resolvable private address. If
1481 * current RPA has expired then generate a new one.
1482 */
1483 if (use_rpa) {
1484 int to;
1485
1486 *own_addr_type = ADDR_LE_DEV_RANDOM;
1487
1488 if (adv_instance) {
1489 if (!adv_instance->rpa_expired &&
1490 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1491 return 0;
1492
1493 adv_instance->rpa_expired = false;
1494 } else {
1495 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1496 !bacmp(&hdev->random_addr, &hdev->rpa))
1497 return 0;
1498 }
1499
1500 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1501 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001502 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301503 return err;
1504 }
1505
1506 bacpy(rand_addr, &hdev->rpa);
1507
1508 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1509 if (adv_instance)
1510 queue_delayed_work(hdev->workqueue,
1511 &adv_instance->rpa_expired_cb, to);
1512 else
1513 queue_delayed_work(hdev->workqueue,
1514 &hdev->rpa_expired, to);
1515
1516 return 0;
1517 }
1518
1519 /* In case of required privacy without resolvable private address,
1520 * use an non-resolvable private address. This is useful for
1521 * non-connectable advertising.
1522 */
1523 if (require_privacy) {
1524 bdaddr_t nrpa;
1525
1526 while (true) {
1527 /* The non-resolvable private address is generated
1528 * from random six bytes with the two most significant
1529 * bits cleared.
1530 */
1531 get_random_bytes(&nrpa, 6);
1532 nrpa.b[5] &= 0x3f;
1533
1534 /* The non-resolvable private address shall not be
1535 * equal to the public address.
1536 */
1537 if (bacmp(&hdev->bdaddr, &nrpa))
1538 break;
1539 }
1540
1541 *own_addr_type = ADDR_LE_DEV_RANDOM;
1542 bacpy(rand_addr, &nrpa);
1543
1544 return 0;
1545 }
1546
1547 /* No privacy so use a public address. */
1548 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1549
1550 return 0;
1551}
1552
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301553void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1554{
1555 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1556}
1557
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301558int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301559{
1560 struct hci_cp_le_set_ext_adv_params cp;
1561 struct hci_dev *hdev = req->hdev;
1562 bool connectable;
1563 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301564 bdaddr_t random_addr;
1565 u8 own_addr_type;
1566 int err;
1567 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301568 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301569 /* In ext adv set param interval is 3 octets */
1570 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1571
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301572 if (instance > 0) {
1573 adv_instance = hci_find_adv_instance(hdev, instance);
1574 if (!adv_instance)
1575 return -EINVAL;
1576 } else {
1577 adv_instance = NULL;
1578 }
1579
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301580 flags = get_adv_instance_flags(hdev, instance);
1581
1582 /* If the "connectable" instance flag was not set, then choose between
1583 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1584 */
1585 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1586 mgmt_get_connectable(hdev);
1587
Colin Ian King75edd1f2018-11-09 13:27:36 +00001588 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301589 return -EPERM;
1590
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301591 /* Set require_privacy to true only when non-connectable
1592 * advertising is used. In that case it is fine to use a
1593 * non-resolvable private address.
1594 */
1595 err = hci_get_random_address(hdev, !connectable,
1596 adv_use_rpa(hdev, flags), adv_instance,
1597 &own_addr_type, &random_addr);
1598 if (err < 0)
1599 return err;
1600
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301601 memset(&cp, 0, sizeof(cp));
1602
1603 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1604 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1605
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301606 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1607
1608 if (connectable) {
1609 if (secondary_adv)
1610 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1611 else
1612 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1613 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1614 if (secondary_adv)
1615 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1616 else
1617 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1618 } else {
1619 if (secondary_adv)
1620 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1621 else
1622 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1623 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301624
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301625 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301626 cp.channel_map = hdev->le_adv_channel_map;
1627 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001628 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301629
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301630 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1631 cp.primary_phy = HCI_ADV_PHY_1M;
1632 cp.secondary_phy = HCI_ADV_PHY_2M;
1633 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1634 cp.primary_phy = HCI_ADV_PHY_CODED;
1635 cp.secondary_phy = HCI_ADV_PHY_CODED;
1636 } else {
1637 /* In all other cases use 1M */
1638 cp.primary_phy = HCI_ADV_PHY_1M;
1639 cp.secondary_phy = HCI_ADV_PHY_1M;
1640 }
1641
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301642 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1643
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301644 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1645 bacmp(&random_addr, BDADDR_ANY)) {
1646 struct hci_cp_le_set_adv_set_rand_addr cp;
1647
1648 /* Check if random address need to be updated */
1649 if (adv_instance) {
1650 if (!bacmp(&random_addr, &adv_instance->random_addr))
1651 return 0;
1652 } else {
1653 if (!bacmp(&random_addr, &hdev->random_addr))
1654 return 0;
1655 }
1656
1657 memset(&cp, 0, sizeof(cp));
1658
1659 cp.handle = 0;
1660 bacpy(&cp.bdaddr, &random_addr);
1661
1662 hci_req_add(req,
1663 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1664 sizeof(cp), &cp);
1665 }
1666
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301667 return 0;
1668}
1669
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001670int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301671{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001672 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301673 struct hci_cp_le_set_ext_adv_enable *cp;
1674 struct hci_cp_ext_adv_set *adv_set;
1675 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001676 struct adv_info *adv_instance;
1677
1678 if (instance > 0) {
1679 adv_instance = hci_find_adv_instance(hdev, instance);
1680 if (!adv_instance)
1681 return -EINVAL;
1682 } else {
1683 adv_instance = NULL;
1684 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301685
1686 cp = (void *) data;
1687 adv_set = (void *) cp->data;
1688
1689 memset(cp, 0, sizeof(*cp));
1690
1691 cp->enable = 0x01;
1692 cp->num_of_sets = 0x01;
1693
1694 memset(adv_set, 0, sizeof(*adv_set));
1695
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001696 adv_set->handle = instance;
1697
1698 /* Set duration per instance since controller is responsible for
1699 * scheduling it.
1700 */
1701 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001702 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001703
1704 /* Time = N * 10 ms */
1705 adv_set->duration = cpu_to_le16(duration / 10);
1706 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301707
1708 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1709 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1710 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001711
1712 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301713}
1714
1715int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1716{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301717 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301718 int err;
1719
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301720 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1721 __hci_req_disable_advertising(req);
1722
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301723 err = __hci_req_setup_ext_adv_instance(req, instance);
1724 if (err < 0)
1725 return err;
1726
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301727 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001728 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301729
1730 return 0;
1731}
1732
Johan Hedbergf2252572015-11-18 12:49:20 +02001733int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1734 bool force)
1735{
1736 struct hci_dev *hdev = req->hdev;
1737 struct adv_info *adv_instance = NULL;
1738 u16 timeout;
1739
1740 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001741 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001742 return -EPERM;
1743
1744 if (hdev->adv_instance_timeout)
1745 return -EBUSY;
1746
1747 adv_instance = hci_find_adv_instance(hdev, instance);
1748 if (!adv_instance)
1749 return -ENOENT;
1750
1751 /* A zero timeout means unlimited advertising. As long as there is
1752 * only one instance, duration should be ignored. We still set a timeout
1753 * in case further instances are being added later on.
1754 *
1755 * If the remaining lifetime of the instance is more than the duration
1756 * then the timeout corresponds to the duration, otherwise it will be
1757 * reduced to the remaining instance lifetime.
1758 */
1759 if (adv_instance->timeout == 0 ||
1760 adv_instance->duration <= adv_instance->remaining_time)
1761 timeout = adv_instance->duration;
1762 else
1763 timeout = adv_instance->remaining_time;
1764
1765 /* The remaining time is being reduced unless the instance is being
1766 * advertised without time limit.
1767 */
1768 if (adv_instance->timeout)
1769 adv_instance->remaining_time =
1770 adv_instance->remaining_time - timeout;
1771
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001772 /* Only use work for scheduling instances with legacy advertising */
1773 if (!ext_adv_capable(hdev)) {
1774 hdev->adv_instance_timeout = timeout;
1775 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02001776 &hdev->adv_instance_expire,
1777 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001778 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001779
1780 /* If we're just re-scheduling the same instance again then do not
1781 * execute any HCI commands. This happens when a single instance is
1782 * being advertised.
1783 */
1784 if (!force && hdev->cur_adv_instance == instance &&
1785 hci_dev_test_flag(hdev, HCI_LE_ADV))
1786 return 0;
1787
1788 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301789 if (ext_adv_capable(hdev)) {
1790 __hci_req_start_ext_adv(req, instance);
1791 } else {
1792 __hci_req_update_adv_data(req, instance);
1793 __hci_req_update_scan_rsp_data(req, instance);
1794 __hci_req_enable_advertising(req);
1795 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001796
1797 return 0;
1798}
1799
1800static void cancel_adv_timeout(struct hci_dev *hdev)
1801{
1802 if (hdev->adv_instance_timeout) {
1803 hdev->adv_instance_timeout = 0;
1804 cancel_delayed_work(&hdev->adv_instance_expire);
1805 }
1806}
1807
1808/* For a single instance:
1809 * - force == true: The instance will be removed even when its remaining
1810 * lifetime is not zero.
1811 * - force == false: the instance will be deactivated but kept stored unless
1812 * the remaining lifetime is zero.
1813 *
1814 * For instance == 0x00:
1815 * - force == true: All instances will be removed regardless of their timeout
1816 * setting.
1817 * - force == false: Only instances that have a timeout will be removed.
1818 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001819void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1820 struct hci_request *req, u8 instance,
1821 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001822{
1823 struct adv_info *adv_instance, *n, *next_instance = NULL;
1824 int err;
1825 u8 rem_inst;
1826
1827 /* Cancel any timeout concerning the removed instance(s). */
1828 if (!instance || hdev->cur_adv_instance == instance)
1829 cancel_adv_timeout(hdev);
1830
1831 /* Get the next instance to advertise BEFORE we remove
1832 * the current one. This can be the same instance again
1833 * if there is only one instance.
1834 */
1835 if (instance && hdev->cur_adv_instance == instance)
1836 next_instance = hci_get_next_instance(hdev, instance);
1837
1838 if (instance == 0x00) {
1839 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1840 list) {
1841 if (!(force || adv_instance->timeout))
1842 continue;
1843
1844 rem_inst = adv_instance->instance;
1845 err = hci_remove_adv_instance(hdev, rem_inst);
1846 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001847 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001848 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001849 } else {
1850 adv_instance = hci_find_adv_instance(hdev, instance);
1851
1852 if (force || (adv_instance && adv_instance->timeout &&
1853 !adv_instance->remaining_time)) {
1854 /* Don't advertise a removed instance. */
1855 if (next_instance &&
1856 next_instance->instance == instance)
1857 next_instance = NULL;
1858
1859 err = hci_remove_adv_instance(hdev, instance);
1860 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001861 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001862 }
1863 }
1864
Johan Hedbergf2252572015-11-18 12:49:20 +02001865 if (!req || !hdev_is_powered(hdev) ||
1866 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1867 return;
1868
1869 if (next_instance)
1870 __hci_req_schedule_adv_instance(req, next_instance->instance,
1871 false);
1872}
1873
Johan Hedberg0857dd32014-12-19 13:40:20 +02001874static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1875{
1876 struct hci_dev *hdev = req->hdev;
1877
1878 /* If we're advertising or initiating an LE connection we can't
1879 * go ahead and change the random address at this time. This is
1880 * because the eventual initiator address used for the
1881 * subsequently created connection will be undefined (some
1882 * controllers use the new address and others the one we had
1883 * when the operation started).
1884 *
1885 * In this kind of scenario skip the update and let the random
1886 * address be updated at the next cycle.
1887 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001888 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001889 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001890 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001891 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001892 return;
1893 }
1894
1895 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1896}
1897
1898int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001899 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001900{
1901 struct hci_dev *hdev = req->hdev;
1902 int err;
1903
1904 /* If privacy is enabled use a resolvable private address. If
1905 * current RPA has expired or there is something else than
1906 * the current RPA in use, then generate a new one.
1907 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001908 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001909 int to;
1910
1911 *own_addr_type = ADDR_LE_DEV_RANDOM;
1912
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001913 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001914 !bacmp(&hdev->random_addr, &hdev->rpa))
1915 return 0;
1916
1917 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1918 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001919 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001920 return err;
1921 }
1922
1923 set_random_addr(req, &hdev->rpa);
1924
1925 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1926 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1927
1928 return 0;
1929 }
1930
1931 /* In case of required privacy without resolvable private address,
1932 * use an non-resolvable private address. This is useful for active
1933 * scanning and non-connectable advertising.
1934 */
1935 if (require_privacy) {
1936 bdaddr_t nrpa;
1937
1938 while (true) {
1939 /* The non-resolvable private address is generated
1940 * from random six bytes with the two most significant
1941 * bits cleared.
1942 */
1943 get_random_bytes(&nrpa, 6);
1944 nrpa.b[5] &= 0x3f;
1945
1946 /* The non-resolvable private address shall not be
1947 * equal to the public address.
1948 */
1949 if (bacmp(&hdev->bdaddr, &nrpa))
1950 break;
1951 }
1952
1953 *own_addr_type = ADDR_LE_DEV_RANDOM;
1954 set_random_addr(req, &nrpa);
1955 return 0;
1956 }
1957
1958 /* If forcing static address is in use or there is no public
1959 * address use the static address as random address (but skip
1960 * the HCI command if the current random address is already the
1961 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001962 *
1963 * In case BR/EDR has been disabled on a dual-mode controller
1964 * and a static address has been configured, then use that
1965 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001966 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001967 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001968 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001969 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001970 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001971 *own_addr_type = ADDR_LE_DEV_RANDOM;
1972 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1973 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1974 &hdev->static_addr);
1975 return 0;
1976 }
1977
1978 /* Neither privacy nor static address is being used so use a
1979 * public address.
1980 */
1981 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1982
1983 return 0;
1984}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001985
Johan Hedberg405a2612014-12-19 23:18:22 +02001986static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1987{
1988 struct bdaddr_list *b;
1989
1990 list_for_each_entry(b, &hdev->whitelist, list) {
1991 struct hci_conn *conn;
1992
1993 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1994 if (!conn)
1995 return true;
1996
1997 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1998 return true;
1999 }
2000
2001 return false;
2002}
2003
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002004void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002005{
2006 struct hci_dev *hdev = req->hdev;
2007 u8 scan;
2008
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002009 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002010 return;
2011
2012 if (!hdev_is_powered(hdev))
2013 return;
2014
2015 if (mgmt_powering_down(hdev))
2016 return;
2017
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002018 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002019 disconnected_whitelist_entries(hdev))
2020 scan = SCAN_PAGE;
2021 else
2022 scan = SCAN_DISABLED;
2023
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002024 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002025 scan |= SCAN_INQUIRY;
2026
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002027 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2028 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2029 return;
2030
Johan Hedberg405a2612014-12-19 23:18:22 +02002031 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2032}
2033
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002034static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002035{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002036 hci_dev_lock(req->hdev);
2037 __hci_req_update_scan(req);
2038 hci_dev_unlock(req->hdev);
2039 return 0;
2040}
Johan Hedberg405a2612014-12-19 23:18:22 +02002041
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002042static void scan_update_work(struct work_struct *work)
2043{
2044 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2045
2046 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002047}
2048
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002049static int connectable_update(struct hci_request *req, unsigned long opt)
2050{
2051 struct hci_dev *hdev = req->hdev;
2052
2053 hci_dev_lock(hdev);
2054
2055 __hci_req_update_scan(req);
2056
2057 /* If BR/EDR is not enabled and we disable advertising as a
2058 * by-product of disabling connectable, we need to update the
2059 * advertising flags.
2060 */
2061 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002062 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002063
2064 /* Update the advertising parameters if necessary */
2065 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302066 !list_empty(&hdev->adv_instances)) {
2067 if (ext_adv_capable(hdev))
2068 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2069 else
2070 __hci_req_enable_advertising(req);
2071 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002072
2073 __hci_update_background_scan(req);
2074
2075 hci_dev_unlock(hdev);
2076
2077 return 0;
2078}
2079
2080static void connectable_update_work(struct work_struct *work)
2081{
2082 struct hci_dev *hdev = container_of(work, struct hci_dev,
2083 connectable_update);
2084 u8 status;
2085
2086 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2087 mgmt_set_connectable_complete(hdev, status);
2088}
2089
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002090static u8 get_service_classes(struct hci_dev *hdev)
2091{
2092 struct bt_uuid *uuid;
2093 u8 val = 0;
2094
2095 list_for_each_entry(uuid, &hdev->uuids, list)
2096 val |= uuid->svc_hint;
2097
2098 return val;
2099}
2100
2101void __hci_req_update_class(struct hci_request *req)
2102{
2103 struct hci_dev *hdev = req->hdev;
2104 u8 cod[3];
2105
2106 BT_DBG("%s", hdev->name);
2107
2108 if (!hdev_is_powered(hdev))
2109 return;
2110
2111 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2112 return;
2113
2114 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2115 return;
2116
2117 cod[0] = hdev->minor_class;
2118 cod[1] = hdev->major_class;
2119 cod[2] = get_service_classes(hdev);
2120
2121 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2122 cod[1] |= 0x20;
2123
2124 if (memcmp(cod, hdev->dev_class, 3) == 0)
2125 return;
2126
2127 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2128}
2129
Johan Hedbergaed1a882015-11-22 17:24:44 +03002130static void write_iac(struct hci_request *req)
2131{
2132 struct hci_dev *hdev = req->hdev;
2133 struct hci_cp_write_current_iac_lap cp;
2134
2135 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2136 return;
2137
2138 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2139 /* Limited discoverable mode */
2140 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2141 cp.iac_lap[0] = 0x00; /* LIAC */
2142 cp.iac_lap[1] = 0x8b;
2143 cp.iac_lap[2] = 0x9e;
2144 cp.iac_lap[3] = 0x33; /* GIAC */
2145 cp.iac_lap[4] = 0x8b;
2146 cp.iac_lap[5] = 0x9e;
2147 } else {
2148 /* General discoverable mode */
2149 cp.num_iac = 1;
2150 cp.iac_lap[0] = 0x33; /* GIAC */
2151 cp.iac_lap[1] = 0x8b;
2152 cp.iac_lap[2] = 0x9e;
2153 }
2154
2155 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2156 (cp.num_iac * 3) + 1, &cp);
2157}
2158
2159static int discoverable_update(struct hci_request *req, unsigned long opt)
2160{
2161 struct hci_dev *hdev = req->hdev;
2162
2163 hci_dev_lock(hdev);
2164
2165 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2166 write_iac(req);
2167 __hci_req_update_scan(req);
2168 __hci_req_update_class(req);
2169 }
2170
2171 /* Advertising instances don't use the global discoverable setting, so
2172 * only update AD if advertising was enabled using Set Advertising.
2173 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002174 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002175 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002176
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002177 /* Discoverable mode affects the local advertising
2178 * address in limited privacy mode.
2179 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302180 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2181 if (ext_adv_capable(hdev))
2182 __hci_req_start_ext_adv(req, 0x00);
2183 else
2184 __hci_req_enable_advertising(req);
2185 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002186 }
2187
Johan Hedbergaed1a882015-11-22 17:24:44 +03002188 hci_dev_unlock(hdev);
2189
2190 return 0;
2191}
2192
2193static void discoverable_update_work(struct work_struct *work)
2194{
2195 struct hci_dev *hdev = container_of(work, struct hci_dev,
2196 discoverable_update);
2197 u8 status;
2198
2199 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2200 mgmt_set_discoverable_complete(hdev, status);
2201}
2202
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002203void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2204 u8 reason)
2205{
2206 switch (conn->state) {
2207 case BT_CONNECTED:
2208 case BT_CONFIG:
2209 if (conn->type == AMP_LINK) {
2210 struct hci_cp_disconn_phy_link cp;
2211
2212 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2213 cp.reason = reason;
2214 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2215 &cp);
2216 } else {
2217 struct hci_cp_disconnect dc;
2218
2219 dc.handle = cpu_to_le16(conn->handle);
2220 dc.reason = reason;
2221 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2222 }
2223
2224 conn->state = BT_DISCONN;
2225
2226 break;
2227 case BT_CONNECT:
2228 if (conn->type == LE_LINK) {
2229 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2230 break;
2231 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2232 0, NULL);
2233 } else if (conn->type == ACL_LINK) {
2234 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2235 break;
2236 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2237 6, &conn->dst);
2238 }
2239 break;
2240 case BT_CONNECT2:
2241 if (conn->type == ACL_LINK) {
2242 struct hci_cp_reject_conn_req rej;
2243
2244 bacpy(&rej.bdaddr, &conn->dst);
2245 rej.reason = reason;
2246
2247 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2248 sizeof(rej), &rej);
2249 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2250 struct hci_cp_reject_sync_conn_req rej;
2251
2252 bacpy(&rej.bdaddr, &conn->dst);
2253
2254 /* SCO rejection has its own limited set of
2255 * allowed error values (0x0D-0x0F) which isn't
2256 * compatible with most values passed to this
2257 * function. To be safe hard-code one of the
2258 * values that's suitable for SCO.
2259 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002260 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002261
2262 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2263 sizeof(rej), &rej);
2264 }
2265 break;
2266 default:
2267 conn->state = BT_CLOSED;
2268 break;
2269 }
2270}
2271
2272static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2273{
2274 if (status)
2275 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2276}
2277
2278int hci_abort_conn(struct hci_conn *conn, u8 reason)
2279{
2280 struct hci_request req;
2281 int err;
2282
2283 hci_req_init(&req, conn->hdev);
2284
2285 __hci_abort_conn(&req, conn, reason);
2286
2287 err = hci_req_run(&req, abort_conn_complete);
2288 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002289 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002290 return err;
2291 }
2292
2293 return 0;
2294}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002295
Johan Hedberga1d01db2015-11-11 08:11:25 +02002296static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002297{
2298 hci_dev_lock(req->hdev);
2299 __hci_update_background_scan(req);
2300 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002301 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002302}
2303
2304static void bg_scan_update(struct work_struct *work)
2305{
2306 struct hci_dev *hdev = container_of(work, struct hci_dev,
2307 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002308 struct hci_conn *conn;
2309 u8 status;
2310 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002311
Johan Hedberg84235d22015-11-11 08:11:20 +02002312 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2313 if (!err)
2314 return;
2315
2316 hci_dev_lock(hdev);
2317
2318 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2319 if (conn)
2320 hci_le_conn_failed(conn, status);
2321
2322 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002323}
2324
Johan Hedberga1d01db2015-11-11 08:11:25 +02002325static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002326{
2327 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002328 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002329}
2330
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002331static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2332{
2333 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002334 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2335 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002336 struct hci_cp_inquiry cp;
2337
2338 BT_DBG("%s", req->hdev->name);
2339
2340 hci_dev_lock(req->hdev);
2341 hci_inquiry_cache_flush(req->hdev);
2342 hci_dev_unlock(req->hdev);
2343
2344 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002345
2346 if (req->hdev->discovery.limited)
2347 memcpy(&cp.lap, liac, sizeof(cp.lap));
2348 else
2349 memcpy(&cp.lap, giac, sizeof(cp.lap));
2350
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002351 cp.length = length;
2352
2353 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2354
2355 return 0;
2356}
2357
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002358static void le_scan_disable_work(struct work_struct *work)
2359{
2360 struct hci_dev *hdev = container_of(work, struct hci_dev,
2361 le_scan_disable.work);
2362 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002363
2364 BT_DBG("%s", hdev->name);
2365
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002366 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002367 return;
2368
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002369 cancel_delayed_work(&hdev->le_scan_restart);
2370
2371 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2372 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002373 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2374 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002375 return;
2376 }
2377
2378 hdev->discovery.scan_start = 0;
2379
2380 /* If we were running LE only scan, change discovery state. If
2381 * we were running both LE and BR/EDR inquiry simultaneously,
2382 * and BR/EDR inquiry is already finished, stop discovery,
2383 * otherwise BR/EDR inquiry will stop discovery when finished.
2384 * If we will resolve remote device name, do not change
2385 * discovery state.
2386 */
2387
2388 if (hdev->discovery.type == DISCOV_TYPE_LE)
2389 goto discov_stopped;
2390
2391 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2392 return;
2393
2394 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2395 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2396 hdev->discovery.state != DISCOVERY_RESOLVING)
2397 goto discov_stopped;
2398
2399 return;
2400 }
2401
2402 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2403 HCI_CMD_TIMEOUT, &status);
2404 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002405 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002406 goto discov_stopped;
2407 }
2408
2409 return;
2410
2411discov_stopped:
2412 hci_dev_lock(hdev);
2413 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2414 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002415}
2416
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002417static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002418{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002419 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002420
2421 /* If controller is not scanning we are done. */
2422 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2423 return 0;
2424
2425 hci_req_add_le_scan_disable(req);
2426
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302427 if (use_ext_scan(hdev)) {
2428 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2429
2430 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2431 ext_enable_cp.enable = LE_SCAN_ENABLE;
2432 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2433
2434 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2435 sizeof(ext_enable_cp), &ext_enable_cp);
2436 } else {
2437 struct hci_cp_le_set_scan_enable cp;
2438
2439 memset(&cp, 0, sizeof(cp));
2440 cp.enable = LE_SCAN_ENABLE;
2441 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2442 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2443 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002444
2445 return 0;
2446}
2447
2448static void le_scan_restart_work(struct work_struct *work)
2449{
2450 struct hci_dev *hdev = container_of(work, struct hci_dev,
2451 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002452 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002453 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002454
2455 BT_DBG("%s", hdev->name);
2456
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002457 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002458 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002459 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2460 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002461 return;
2462 }
2463
2464 hci_dev_lock(hdev);
2465
2466 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2467 !hdev->discovery.scan_start)
2468 goto unlock;
2469
2470 /* When the scan was started, hdev->le_scan_disable has been queued
2471 * after duration from scan_start. During scan restart this job
2472 * has been canceled, and we need to queue it again after proper
2473 * timeout, to make sure that scan does not run indefinitely.
2474 */
2475 duration = hdev->discovery.scan_duration;
2476 scan_start = hdev->discovery.scan_start;
2477 now = jiffies;
2478 if (now - scan_start <= duration) {
2479 int elapsed;
2480
2481 if (now >= scan_start)
2482 elapsed = now - scan_start;
2483 else
2484 elapsed = ULONG_MAX - scan_start + now;
2485
2486 timeout = duration - elapsed;
2487 } else {
2488 timeout = 0;
2489 }
2490
2491 queue_delayed_work(hdev->req_workqueue,
2492 &hdev->le_scan_disable, timeout);
2493
2494unlock:
2495 hci_dev_unlock(hdev);
2496}
2497
Johan Hedberge68f0722015-11-11 08:30:30 +02002498static int active_scan(struct hci_request *req, unsigned long opt)
2499{
2500 uint16_t interval = opt;
2501 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002502 u8 own_addr_type;
2503 int err;
2504
2505 BT_DBG("%s", hdev->name);
2506
2507 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2508 hci_dev_lock(hdev);
2509
2510 /* Don't let discovery abort an outgoing connection attempt
2511 * that's using directed advertising.
2512 */
2513 if (hci_lookup_le_connect(hdev)) {
2514 hci_dev_unlock(hdev);
2515 return -EBUSY;
2516 }
2517
2518 cancel_adv_timeout(hdev);
2519 hci_dev_unlock(hdev);
2520
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302521 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002522 }
2523
2524 /* If controller is scanning, it means the background scanning is
2525 * running. Thus, we should temporarily stop it in order to set the
2526 * discovery scanning parameters.
2527 */
2528 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2529 hci_req_add_le_scan_disable(req);
2530
2531 /* All active scans will be done with either a resolvable private
2532 * address (when privacy feature has been enabled) or non-resolvable
2533 * private address.
2534 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002535 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2536 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002537 if (err < 0)
2538 own_addr_type = ADDR_LE_DEV_PUBLIC;
2539
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302540 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2541 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002542 return 0;
2543}
2544
2545static int interleaved_discov(struct hci_request *req, unsigned long opt)
2546{
2547 int err;
2548
2549 BT_DBG("%s", req->hdev->name);
2550
2551 err = active_scan(req, opt);
2552 if (err)
2553 return err;
2554
Johan Hedberg7df26b52015-11-11 12:24:21 +02002555 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002556}
2557
2558static void start_discovery(struct hci_dev *hdev, u8 *status)
2559{
2560 unsigned long timeout;
2561
2562 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2563
2564 switch (hdev->discovery.type) {
2565 case DISCOV_TYPE_BREDR:
2566 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002567 hci_req_sync(hdev, bredr_inquiry,
2568 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002569 status);
2570 return;
2571 case DISCOV_TYPE_INTERLEAVED:
2572 /* When running simultaneous discovery, the LE scanning time
2573 * should occupy the whole discovery time sine BR/EDR inquiry
2574 * and LE scanning are scheduled by the controller.
2575 *
2576 * For interleaving discovery in comparison, BR/EDR inquiry
2577 * and LE scanning are done sequentially with separate
2578 * timeouts.
2579 */
2580 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2581 &hdev->quirks)) {
2582 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2583 /* During simultaneous discovery, we double LE scan
2584 * interval. We must leave some time for the controller
2585 * to do BR/EDR inquiry.
2586 */
2587 hci_req_sync(hdev, interleaved_discov,
2588 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2589 status);
2590 break;
2591 }
2592
2593 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2594 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2595 HCI_CMD_TIMEOUT, status);
2596 break;
2597 case DISCOV_TYPE_LE:
2598 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2599 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2600 HCI_CMD_TIMEOUT, status);
2601 break;
2602 default:
2603 *status = HCI_ERROR_UNSPECIFIED;
2604 return;
2605 }
2606
2607 if (*status)
2608 return;
2609
2610 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2611
2612 /* When service discovery is used and the controller has a
2613 * strict duplicate filter, it is important to remember the
2614 * start and duration of the scan. This is required for
2615 * restarting scanning during the discovery phase.
2616 */
2617 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2618 hdev->discovery.result_filtering) {
2619 hdev->discovery.scan_start = jiffies;
2620 hdev->discovery.scan_duration = timeout;
2621 }
2622
2623 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2624 timeout);
2625}
2626
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002627bool hci_req_stop_discovery(struct hci_request *req)
2628{
2629 struct hci_dev *hdev = req->hdev;
2630 struct discovery_state *d = &hdev->discovery;
2631 struct hci_cp_remote_name_req_cancel cp;
2632 struct inquiry_entry *e;
2633 bool ret = false;
2634
2635 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2636
2637 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2638 if (test_bit(HCI_INQUIRY, &hdev->flags))
2639 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2640
2641 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2642 cancel_delayed_work(&hdev->le_scan_disable);
2643 hci_req_add_le_scan_disable(req);
2644 }
2645
2646 ret = true;
2647 } else {
2648 /* Passive scanning */
2649 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2650 hci_req_add_le_scan_disable(req);
2651 ret = true;
2652 }
2653 }
2654
2655 /* No further actions needed for LE-only discovery */
2656 if (d->type == DISCOV_TYPE_LE)
2657 return ret;
2658
2659 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2660 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2661 NAME_PENDING);
2662 if (!e)
2663 return ret;
2664
2665 bacpy(&cp.bdaddr, &e->data.bdaddr);
2666 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2667 &cp);
2668 ret = true;
2669 }
2670
2671 return ret;
2672}
2673
2674static int stop_discovery(struct hci_request *req, unsigned long opt)
2675{
2676 hci_dev_lock(req->hdev);
2677 hci_req_stop_discovery(req);
2678 hci_dev_unlock(req->hdev);
2679
2680 return 0;
2681}
2682
Johan Hedberge68f0722015-11-11 08:30:30 +02002683static void discov_update(struct work_struct *work)
2684{
2685 struct hci_dev *hdev = container_of(work, struct hci_dev,
2686 discov_update);
2687 u8 status = 0;
2688
2689 switch (hdev->discovery.state) {
2690 case DISCOVERY_STARTING:
2691 start_discovery(hdev, &status);
2692 mgmt_start_discovery_complete(hdev, status);
2693 if (status)
2694 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2695 else
2696 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2697 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002698 case DISCOVERY_STOPPING:
2699 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2700 mgmt_stop_discovery_complete(hdev, status);
2701 if (!status)
2702 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2703 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002704 case DISCOVERY_STOPPED:
2705 default:
2706 return;
2707 }
2708}
2709
Johan Hedbergc366f552015-11-23 15:43:06 +02002710static void discov_off(struct work_struct *work)
2711{
2712 struct hci_dev *hdev = container_of(work, struct hci_dev,
2713 discov_off.work);
2714
2715 BT_DBG("%s", hdev->name);
2716
2717 hci_dev_lock(hdev);
2718
2719 /* When discoverable timeout triggers, then just make sure
2720 * the limited discoverable flag is cleared. Even in the case
2721 * of a timeout triggered from general discoverable, it is
2722 * safe to unconditionally clear the flag.
2723 */
2724 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2725 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2726 hdev->discov_timeout = 0;
2727
2728 hci_dev_unlock(hdev);
2729
2730 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2731 mgmt_new_settings(hdev);
2732}
2733
Johan Hedberg2ff13892015-11-25 16:15:44 +02002734static int powered_update_hci(struct hci_request *req, unsigned long opt)
2735{
2736 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002737 u8 link_sec;
2738
2739 hci_dev_lock(hdev);
2740
2741 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2742 !lmp_host_ssp_capable(hdev)) {
2743 u8 mode = 0x01;
2744
2745 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2746
2747 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2748 u8 support = 0x01;
2749
2750 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2751 sizeof(support), &support);
2752 }
2753 }
2754
2755 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2756 lmp_bredr_capable(hdev)) {
2757 struct hci_cp_write_le_host_supported cp;
2758
2759 cp.le = 0x01;
2760 cp.simul = 0x00;
2761
2762 /* Check first if we already have the right
2763 * host state (host features set)
2764 */
2765 if (cp.le != lmp_host_le_capable(hdev) ||
2766 cp.simul != lmp_host_le_br_capable(hdev))
2767 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2768 sizeof(cp), &cp);
2769 }
2770
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002771 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002772 /* Make sure the controller has a good default for
2773 * advertising data. This also applies to the case
2774 * where BR/EDR was toggled during the AUTO_OFF phase.
2775 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002776 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2777 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302778 int err;
2779
2780 if (ext_adv_capable(hdev)) {
2781 err = __hci_req_setup_ext_adv_instance(req,
2782 0x00);
2783 if (!err)
2784 __hci_req_update_scan_rsp_data(req,
2785 0x00);
2786 } else {
2787 err = 0;
2788 __hci_req_update_adv_data(req, 0x00);
2789 __hci_req_update_scan_rsp_data(req, 0x00);
2790 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002791
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302792 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302793 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302794 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302795 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002796 __hci_req_enable_ext_advertising(req,
2797 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302798 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002799 } else if (!list_empty(&hdev->adv_instances)) {
2800 struct adv_info *adv_instance;
2801
Johan Hedberg2ff13892015-11-25 16:15:44 +02002802 adv_instance = list_first_entry(&hdev->adv_instances,
2803 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002804 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002805 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002806 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002807 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002808 }
2809
2810 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2811 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2812 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2813 sizeof(link_sec), &link_sec);
2814
2815 if (lmp_bredr_capable(hdev)) {
2816 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2817 __hci_req_write_fast_connectable(req, true);
2818 else
2819 __hci_req_write_fast_connectable(req, false);
2820 __hci_req_update_scan(req);
2821 __hci_req_update_class(req);
2822 __hci_req_update_name(req);
2823 __hci_req_update_eir(req);
2824 }
2825
2826 hci_dev_unlock(hdev);
2827 return 0;
2828}
2829
2830int __hci_req_hci_power_on(struct hci_dev *hdev)
2831{
2832 /* Register the available SMP channels (BR/EDR and LE) only when
2833 * successfully powering on the controller. This late
2834 * registration is required so that LE SMP can clearly decide if
2835 * the public address or static address is used.
2836 */
2837 smp_register(hdev);
2838
2839 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2840 NULL);
2841}
2842
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002843void hci_request_setup(struct hci_dev *hdev)
2844{
Johan Hedberge68f0722015-11-11 08:30:30 +02002845 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002846 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002847 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002848 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002849 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002850 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002851 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2852 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002853 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002854}
2855
2856void hci_request_cancel_all(struct hci_dev *hdev)
2857{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002858 hci_req_sync_cancel(hdev, ENODEV);
2859
Johan Hedberge68f0722015-11-11 08:30:30 +02002860 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002861 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002862 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002863 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002864 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002865 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002866 cancel_delayed_work_sync(&hdev->le_scan_disable);
2867 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002868
2869 if (hdev->adv_instance_timeout) {
2870 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2871 hdev->adv_instance_timeout = 0;
2872 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002873}