blob: 621f1a97d8035bf9e9c0452f09280ca7fbd51979 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368 /* default 1.28 sec page scan */
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381}
382
Johan Hedberg196a5e92015-11-22 18:55:44 +0200383/* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
386 *
387 * This function requires the caller holds hdev->lock.
388 */
389static void __hci_update_background_scan(struct hci_request *req)
390{
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405 /* If discovery is active don't interfere with it */
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
411 *
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
415 */
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
422 * scanning.
423 */
424
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
435 */
436
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
439 * the same time.
440 */
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
446 */
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454}
455
Johan Hedberg00cf5042015-11-25 16:15:41 +0200456void __hci_req_update_name(struct hci_request *req)
457{
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464}
465
Johan Hedbergb1a89172015-11-25 16:15:42 +0200466#define PNP_INFO_SVCLASS_ID 0x1200
467
468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469{
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496 /* Stop if not enough space to put next UUID */
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508}
509
510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511{
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529 /* Stop if not enough space to put next UUID */
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541}
542
543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544{
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562 /* Stop if not enough space to put next UUID */
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574}
575
576static void create_eir(struct hci_dev *hdev, u8 *data)
577{
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584 /* EIR Data type */
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591 /* EIR Data length */
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622}
623
624void __hci_req_update_eir(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651}
652
Johan Hedberg0857dd32014-12-19 13:40:20 +0200653void hci_req_add_le_scan_disable(struct hci_request *req)
654{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530655 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200656
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, &params->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
696 */
697 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
700 */
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200706
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200712 continue;
713 }
714
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716 /* White list can not be used with RPAs */
717 return 0x00;
718 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200719
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500720 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200721 }
722
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
726 * the controller.
727 *
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
731 * white list.
732 */
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 &params->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739 /* Select filter policy to accept all advertising */
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, &params->addr,
744 params->addr_type)) {
745 /* White list can not be used with RPAs */
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
756 */
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 &params->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763 /* Select filter policy to accept all advertising */
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, &params->addr,
768 params->addr_type)) {
769 /* White list can not be used with RPAs */
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777 /* Select filter policy to use white list */
778 return 0x01;
779}
780
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200788{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530789 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530790
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530791 /* Use ext scanning if set ext scan param and ext scan enable is
792 * supported
793 */
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530800
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530807
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530835 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(&param_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 &param_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530862}
863
864void hci_req_add_le_passive_scan(struct hci_request *req)
865{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
874 * by the controller.
875 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200878 return;
879
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
883 */
884 filter_policy = update_white_list(req);
885
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
890 *
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200901}
902
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904{
905 struct adv_info *adv_instance;
906
907 /* Ignore instance 0 */
908 if (instance == 0x00)
909 return 0;
910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
917 */
918 return adv_instance->scan_rsp_len;
919}
920
Johan Hedbergf2252572015-11-18 12:49:20 +0200921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200923 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200924 struct adv_info *adv_instance;
925
926 /* Ignore instance 0 */
927 if (instance == 0x00)
928 return 0;
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
936 */
937 return adv_instance->scan_rsp_len;
938}
939
940void __hci_req_disable_advertising(struct hci_request *req)
941{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
Johan Hedbergf2252572015-11-18 12:49:20 +0200944
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530945 cp.enable = 0x00;
946 /* Disable all sets since we only support one set at the moment */
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
Johan Hedbergf2252572015-11-18 12:49:20 +0200955}
956
957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958{
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
964 * fields
965 */
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
970 */
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200977 flags |= MGMT_ADV_FLAG_DISCOV;
978
Johan Hedbergf2252572015-11-18 12:49:20 +0200979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984 /* Return 0 when we got an invalid instance identifier. */
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989}
990
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992{
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1003 */
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1010 */
1011 return true;
1012}
1013
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1028 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001031 return false;
1032 }
1033
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040 /* Master connection state and connectable mode bit 35 and
1041 * scannable 19.
1042 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049}
1050
Johan Hedbergf2252572015-11-18 12:49:20 +02001051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +02001057 u32 flags;
1058
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001059 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1060
1061 /* If the "connectable" instance flag was not set, then choose between
1062 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1063 */
1064 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1065 mgmt_get_connectable(hdev);
1066
1067 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001068 return;
1069
1070 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1071 __hci_req_disable_advertising(req);
1072
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 */
1078 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1079
Johan Hedbergf2252572015-11-18 12:49:20 +02001080 /* Set require_privacy to true only when non-connectable
1081 * advertising is used. In that case it is fine to use a
1082 * non-resolvable private address.
1083 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001084 if (hci_update_random_address(req, !connectable,
1085 adv_use_rpa(hdev, flags),
1086 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001087 return;
1088
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092
1093 if (connectable)
1094 cp.type = LE_ADV_IND;
1095 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1096 cp.type = LE_ADV_SCAN_IND;
1097 else
1098 cp.type = LE_ADV_NONCONN_IND;
1099
1100 cp.own_address_type = own_addr_type;
1101 cp.channel_map = hdev->le_adv_channel_map;
1102
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1104
1105 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1106}
1107
Michał Narajowskif61851f2016-10-19 10:20:27 +02001108u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001109{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001110 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001111 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001112
Michał Narajowskif61851f2016-10-19 10:20:27 +02001113 /* no space left for name (+ NULL + type + len) */
1114 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1115 return ad_len;
1116
1117 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001118 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001119 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001120 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001121 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001122
Michał Narajowskif61851f2016-10-19 10:20:27 +02001123 /* use short name if present */
1124 short_len = strlen(hdev->short_name);
1125 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001126 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001127 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001128
Michał Narajowskif61851f2016-10-19 10:20:27 +02001129 /* use shortened full name if present, we already know that name
1130 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1131 */
1132 if (complete_len) {
1133 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1134
1135 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1136 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1137
1138 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1139 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001140 }
1141
1142 return ad_len;
1143}
1144
Michał Narajowski1b422062016-10-05 12:28:27 +02001145static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1146{
1147 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1148}
1149
Michał Narajowski7c295c42016-09-18 12:50:02 +02001150static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1151{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001152 u8 scan_rsp_len = 0;
1153
1154 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001155 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001156 }
1157
Michał Narajowski1b422062016-10-05 12:28:27 +02001158 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001159}
1160
Johan Hedbergf2252572015-11-18 12:49:20 +02001161static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1162 u8 *ptr)
1163{
1164 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001165 u32 instance_flags;
1166 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001167
1168 adv_instance = hci_find_adv_instance(hdev, instance);
1169 if (!adv_instance)
1170 return 0;
1171
Michał Narajowski7c295c42016-09-18 12:50:02 +02001172 instance_flags = adv_instance->flags;
1173
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001174 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001175 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001176 }
1177
Michał Narajowski1b422062016-10-05 12:28:27 +02001178 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001179 adv_instance->scan_rsp_len);
1180
Michał Narajowski7c295c42016-09-18 12:50:02 +02001181 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001182
1183 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1184 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1185
1186 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001187}
1188
Johan Hedbergcab054a2015-11-30 11:21:45 +02001189void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001190{
1191 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001192 u8 len;
1193
1194 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1195 return;
1196
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301197 if (ext_adv_capable(hdev)) {
1198 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001199
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301200 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001201
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301202 if (instance)
1203 len = create_instance_scan_rsp_data(hdev, instance,
1204 cp.data);
1205 else
1206 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001207
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301208 if (hdev->scan_rsp_data_len == len &&
1209 !memcmp(cp.data, hdev->scan_rsp_data, len))
1210 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001211
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301212 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1213 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001214
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301215 cp.handle = 0;
1216 cp.length = len;
1217 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1218 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1219
1220 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1221 &cp);
1222 } else {
1223 struct hci_cp_le_set_scan_rsp_data cp;
1224
1225 memset(&cp, 0, sizeof(cp));
1226
1227 if (instance)
1228 len = create_instance_scan_rsp_data(hdev, instance,
1229 cp.data);
1230 else
1231 len = create_default_scan_rsp_data(hdev, cp.data);
1232
1233 if (hdev->scan_rsp_data_len == len &&
1234 !memcmp(cp.data, hdev->scan_rsp_data, len))
1235 return;
1236
1237 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1238 hdev->scan_rsp_data_len = len;
1239
1240 cp.length = len;
1241
1242 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1243 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001244}
1245
Johan Hedbergf2252572015-11-18 12:49:20 +02001246static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1247{
1248 struct adv_info *adv_instance = NULL;
1249 u8 ad_len = 0, flags = 0;
1250 u32 instance_flags;
1251
1252 /* Return 0 when the current instance identifier is invalid. */
1253 if (instance) {
1254 adv_instance = hci_find_adv_instance(hdev, instance);
1255 if (!adv_instance)
1256 return 0;
1257 }
1258
1259 instance_flags = get_adv_instance_flags(hdev, instance);
1260
1261 /* The Add Advertising command allows userspace to set both the general
1262 * and limited discoverable flags.
1263 */
1264 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1265 flags |= LE_AD_GENERAL;
1266
1267 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1268 flags |= LE_AD_LIMITED;
1269
Johan Hedbergf18ba582016-04-06 13:09:05 +03001270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1271 flags |= LE_AD_NO_BREDR;
1272
Johan Hedbergf2252572015-11-18 12:49:20 +02001273 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1274 /* If a discovery flag wasn't provided, simply use the global
1275 * settings.
1276 */
1277 if (!flags)
1278 flags |= mgmt_get_adv_discov_flags(hdev);
1279
Johan Hedbergf2252572015-11-18 12:49:20 +02001280 /* If flags would still be empty, then there is no need to
1281 * include the "Flags" AD field".
1282 */
1283 if (flags) {
1284 ptr[0] = 0x02;
1285 ptr[1] = EIR_FLAGS;
1286 ptr[2] = flags;
1287
1288 ad_len += 3;
1289 ptr += 3;
1290 }
1291 }
1292
1293 if (adv_instance) {
1294 memcpy(ptr, adv_instance->adv_data,
1295 adv_instance->adv_data_len);
1296 ad_len += adv_instance->adv_data_len;
1297 ptr += adv_instance->adv_data_len;
1298 }
1299
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301300 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1301 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001302
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301303 if (ext_adv_capable(hdev)) {
1304 if (adv_instance)
1305 adv_tx_power = adv_instance->tx_power;
1306 else
1307 adv_tx_power = hdev->adv_tx_power;
1308 } else {
1309 adv_tx_power = hdev->adv_tx_power;
1310 }
1311
1312 /* Provide Tx Power only if we can provide a valid value for it */
1313 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1314 ptr[0] = 0x02;
1315 ptr[1] = EIR_TX_POWER;
1316 ptr[2] = (u8)adv_tx_power;
1317
1318 ad_len += 3;
1319 ptr += 3;
1320 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001321 }
1322
1323 return ad_len;
1324}
1325
Johan Hedbergcab054a2015-11-30 11:21:45 +02001326void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001327{
1328 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001329 u8 len;
1330
1331 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1332 return;
1333
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301334 if (ext_adv_capable(hdev)) {
1335 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001336
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301337 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001338
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301339 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001340
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301341 /* There's nothing to do if the data hasn't changed */
1342 if (hdev->adv_data_len == len &&
1343 memcmp(cp.data, hdev->adv_data, len) == 0)
1344 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001345
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301346 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1347 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001348
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301349 cp.length = len;
1350 cp.handle = 0;
1351 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1352 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1353
1354 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1355 } else {
1356 struct hci_cp_le_set_adv_data cp;
1357
1358 memset(&cp, 0, sizeof(cp));
1359
1360 len = create_instance_adv_data(hdev, instance, cp.data);
1361
1362 /* There's nothing to do if the data hasn't changed */
1363 if (hdev->adv_data_len == len &&
1364 memcmp(cp.data, hdev->adv_data, len) == 0)
1365 return;
1366
1367 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1368 hdev->adv_data_len = len;
1369
1370 cp.length = len;
1371
1372 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1373 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001374}
1375
Johan Hedbergcab054a2015-11-30 11:21:45 +02001376int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001377{
1378 struct hci_request req;
1379
1380 hci_req_init(&req, hdev);
1381 __hci_req_update_adv_data(&req, instance);
1382
1383 return hci_req_run(&req, NULL);
1384}
1385
1386static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1387{
1388 BT_DBG("%s status %u", hdev->name, status);
1389}
1390
1391void hci_req_reenable_advertising(struct hci_dev *hdev)
1392{
1393 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001394
1395 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001396 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001397 return;
1398
Johan Hedbergf2252572015-11-18 12:49:20 +02001399 hci_req_init(&req, hdev);
1400
Johan Hedbergcab054a2015-11-30 11:21:45 +02001401 if (hdev->cur_adv_instance) {
1402 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1403 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001404 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301405 if (ext_adv_capable(hdev)) {
1406 __hci_req_start_ext_adv(&req, 0x00);
1407 } else {
1408 __hci_req_update_adv_data(&req, 0x00);
1409 __hci_req_update_scan_rsp_data(&req, 0x00);
1410 __hci_req_enable_advertising(&req);
1411 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001412 }
1413
1414 hci_req_run(&req, adv_enable_complete);
1415}
1416
1417static void adv_timeout_expire(struct work_struct *work)
1418{
1419 struct hci_dev *hdev = container_of(work, struct hci_dev,
1420 adv_instance_expire.work);
1421
1422 struct hci_request req;
1423 u8 instance;
1424
1425 BT_DBG("%s", hdev->name);
1426
1427 hci_dev_lock(hdev);
1428
1429 hdev->adv_instance_timeout = 0;
1430
Johan Hedbergcab054a2015-11-30 11:21:45 +02001431 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001432 if (instance == 0x00)
1433 goto unlock;
1434
1435 hci_req_init(&req, hdev);
1436
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001437 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001438
1439 if (list_empty(&hdev->adv_instances))
1440 __hci_req_disable_advertising(&req);
1441
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001442 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001443
1444unlock:
1445 hci_dev_unlock(hdev);
1446}
1447
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301448int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1449 bool use_rpa, struct adv_info *adv_instance,
1450 u8 *own_addr_type, bdaddr_t *rand_addr)
1451{
1452 int err;
1453
1454 bacpy(rand_addr, BDADDR_ANY);
1455
1456 /* If privacy is enabled use a resolvable private address. If
1457 * current RPA has expired then generate a new one.
1458 */
1459 if (use_rpa) {
1460 int to;
1461
1462 *own_addr_type = ADDR_LE_DEV_RANDOM;
1463
1464 if (adv_instance) {
1465 if (!adv_instance->rpa_expired &&
1466 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1467 return 0;
1468
1469 adv_instance->rpa_expired = false;
1470 } else {
1471 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1472 !bacmp(&hdev->random_addr, &hdev->rpa))
1473 return 0;
1474 }
1475
1476 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1477 if (err < 0) {
1478 BT_ERR("%s failed to generate new RPA", hdev->name);
1479 return err;
1480 }
1481
1482 bacpy(rand_addr, &hdev->rpa);
1483
1484 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1485 if (adv_instance)
1486 queue_delayed_work(hdev->workqueue,
1487 &adv_instance->rpa_expired_cb, to);
1488 else
1489 queue_delayed_work(hdev->workqueue,
1490 &hdev->rpa_expired, to);
1491
1492 return 0;
1493 }
1494
1495 /* In case of required privacy without resolvable private address,
1496 * use an non-resolvable private address. This is useful for
1497 * non-connectable advertising.
1498 */
1499 if (require_privacy) {
1500 bdaddr_t nrpa;
1501
1502 while (true) {
1503 /* The non-resolvable private address is generated
1504 * from random six bytes with the two most significant
1505 * bits cleared.
1506 */
1507 get_random_bytes(&nrpa, 6);
1508 nrpa.b[5] &= 0x3f;
1509
1510 /* The non-resolvable private address shall not be
1511 * equal to the public address.
1512 */
1513 if (bacmp(&hdev->bdaddr, &nrpa))
1514 break;
1515 }
1516
1517 *own_addr_type = ADDR_LE_DEV_RANDOM;
1518 bacpy(rand_addr, &nrpa);
1519
1520 return 0;
1521 }
1522
1523 /* No privacy so use a public address. */
1524 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1525
1526 return 0;
1527}
1528
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301529void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1530{
1531 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1532}
1533
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301534int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301535{
1536 struct hci_cp_le_set_ext_adv_params cp;
1537 struct hci_dev *hdev = req->hdev;
1538 bool connectable;
1539 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301540 bdaddr_t random_addr;
1541 u8 own_addr_type;
1542 int err;
1543 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301544 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301545 /* In ext adv set param interval is 3 octets */
1546 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1547
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301548 if (instance > 0) {
1549 adv_instance = hci_find_adv_instance(hdev, instance);
1550 if (!adv_instance)
1551 return -EINVAL;
1552 } else {
1553 adv_instance = NULL;
1554 }
1555
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301556 flags = get_adv_instance_flags(hdev, instance);
1557
1558 /* If the "connectable" instance flag was not set, then choose between
1559 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1560 */
1561 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1562 mgmt_get_connectable(hdev);
1563
Colin Ian King75edd1f2018-11-09 13:27:36 +00001564 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301565 return -EPERM;
1566
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301567 /* Set require_privacy to true only when non-connectable
1568 * advertising is used. In that case it is fine to use a
1569 * non-resolvable private address.
1570 */
1571 err = hci_get_random_address(hdev, !connectable,
1572 adv_use_rpa(hdev, flags), adv_instance,
1573 &own_addr_type, &random_addr);
1574 if (err < 0)
1575 return err;
1576
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301577 memset(&cp, 0, sizeof(cp));
1578
1579 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1580 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1581
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301582 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1583
1584 if (connectable) {
1585 if (secondary_adv)
1586 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1587 else
1588 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1589 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1590 if (secondary_adv)
1591 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1592 else
1593 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1594 } else {
1595 if (secondary_adv)
1596 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1597 else
1598 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1599 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301600
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301601 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301602 cp.channel_map = hdev->le_adv_channel_map;
1603 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001604 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301605
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301606 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1607 cp.primary_phy = HCI_ADV_PHY_1M;
1608 cp.secondary_phy = HCI_ADV_PHY_2M;
1609 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1610 cp.primary_phy = HCI_ADV_PHY_CODED;
1611 cp.secondary_phy = HCI_ADV_PHY_CODED;
1612 } else {
1613 /* In all other cases use 1M */
1614 cp.primary_phy = HCI_ADV_PHY_1M;
1615 cp.secondary_phy = HCI_ADV_PHY_1M;
1616 }
1617
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301618 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1619
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301620 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1621 bacmp(&random_addr, BDADDR_ANY)) {
1622 struct hci_cp_le_set_adv_set_rand_addr cp;
1623
1624 /* Check if random address need to be updated */
1625 if (adv_instance) {
1626 if (!bacmp(&random_addr, &adv_instance->random_addr))
1627 return 0;
1628 } else {
1629 if (!bacmp(&random_addr, &hdev->random_addr))
1630 return 0;
1631 }
1632
1633 memset(&cp, 0, sizeof(cp));
1634
1635 cp.handle = 0;
1636 bacpy(&cp.bdaddr, &random_addr);
1637
1638 hci_req_add(req,
1639 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1640 sizeof(cp), &cp);
1641 }
1642
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301643 return 0;
1644}
1645
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001646int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301647{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001648 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301649 struct hci_cp_le_set_ext_adv_enable *cp;
1650 struct hci_cp_ext_adv_set *adv_set;
1651 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001652 struct adv_info *adv_instance;
1653
1654 if (instance > 0) {
1655 adv_instance = hci_find_adv_instance(hdev, instance);
1656 if (!adv_instance)
1657 return -EINVAL;
1658 } else {
1659 adv_instance = NULL;
1660 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301661
1662 cp = (void *) data;
1663 adv_set = (void *) cp->data;
1664
1665 memset(cp, 0, sizeof(*cp));
1666
1667 cp->enable = 0x01;
1668 cp->num_of_sets = 0x01;
1669
1670 memset(adv_set, 0, sizeof(*adv_set));
1671
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001672 adv_set->handle = instance;
1673
1674 /* Set duration per instance since controller is responsible for
1675 * scheduling it.
1676 */
1677 if (adv_instance && adv_instance->duration) {
1678 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1679
1680 /* Time = N * 10 ms */
1681 adv_set->duration = cpu_to_le16(duration / 10);
1682 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301683
1684 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1685 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1686 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001687
1688 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301689}
1690
1691int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1692{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301693 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301694 int err;
1695
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301696 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1697 __hci_req_disable_advertising(req);
1698
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301699 err = __hci_req_setup_ext_adv_instance(req, instance);
1700 if (err < 0)
1701 return err;
1702
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301703 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001704 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301705
1706 return 0;
1707}
1708
Johan Hedbergf2252572015-11-18 12:49:20 +02001709int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1710 bool force)
1711{
1712 struct hci_dev *hdev = req->hdev;
1713 struct adv_info *adv_instance = NULL;
1714 u16 timeout;
1715
1716 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001717 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001718 return -EPERM;
1719
1720 if (hdev->adv_instance_timeout)
1721 return -EBUSY;
1722
1723 adv_instance = hci_find_adv_instance(hdev, instance);
1724 if (!adv_instance)
1725 return -ENOENT;
1726
1727 /* A zero timeout means unlimited advertising. As long as there is
1728 * only one instance, duration should be ignored. We still set a timeout
1729 * in case further instances are being added later on.
1730 *
1731 * If the remaining lifetime of the instance is more than the duration
1732 * then the timeout corresponds to the duration, otherwise it will be
1733 * reduced to the remaining instance lifetime.
1734 */
1735 if (adv_instance->timeout == 0 ||
1736 adv_instance->duration <= adv_instance->remaining_time)
1737 timeout = adv_instance->duration;
1738 else
1739 timeout = adv_instance->remaining_time;
1740
1741 /* The remaining time is being reduced unless the instance is being
1742 * advertised without time limit.
1743 */
1744 if (adv_instance->timeout)
1745 adv_instance->remaining_time =
1746 adv_instance->remaining_time - timeout;
1747
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001748 /* Only use work for scheduling instances with legacy advertising */
1749 if (!ext_adv_capable(hdev)) {
1750 hdev->adv_instance_timeout = timeout;
1751 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02001752 &hdev->adv_instance_expire,
1753 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001754 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001755
1756 /* If we're just re-scheduling the same instance again then do not
1757 * execute any HCI commands. This happens when a single instance is
1758 * being advertised.
1759 */
1760 if (!force && hdev->cur_adv_instance == instance &&
1761 hci_dev_test_flag(hdev, HCI_LE_ADV))
1762 return 0;
1763
1764 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301765 if (ext_adv_capable(hdev)) {
1766 __hci_req_start_ext_adv(req, instance);
1767 } else {
1768 __hci_req_update_adv_data(req, instance);
1769 __hci_req_update_scan_rsp_data(req, instance);
1770 __hci_req_enable_advertising(req);
1771 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001772
1773 return 0;
1774}
1775
1776static void cancel_adv_timeout(struct hci_dev *hdev)
1777{
1778 if (hdev->adv_instance_timeout) {
1779 hdev->adv_instance_timeout = 0;
1780 cancel_delayed_work(&hdev->adv_instance_expire);
1781 }
1782}
1783
1784/* For a single instance:
1785 * - force == true: The instance will be removed even when its remaining
1786 * lifetime is not zero.
1787 * - force == false: the instance will be deactivated but kept stored unless
1788 * the remaining lifetime is zero.
1789 *
1790 * For instance == 0x00:
1791 * - force == true: All instances will be removed regardless of their timeout
1792 * setting.
1793 * - force == false: Only instances that have a timeout will be removed.
1794 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001795void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1796 struct hci_request *req, u8 instance,
1797 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001798{
1799 struct adv_info *adv_instance, *n, *next_instance = NULL;
1800 int err;
1801 u8 rem_inst;
1802
1803 /* Cancel any timeout concerning the removed instance(s). */
1804 if (!instance || hdev->cur_adv_instance == instance)
1805 cancel_adv_timeout(hdev);
1806
1807 /* Get the next instance to advertise BEFORE we remove
1808 * the current one. This can be the same instance again
1809 * if there is only one instance.
1810 */
1811 if (instance && hdev->cur_adv_instance == instance)
1812 next_instance = hci_get_next_instance(hdev, instance);
1813
1814 if (instance == 0x00) {
1815 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1816 list) {
1817 if (!(force || adv_instance->timeout))
1818 continue;
1819
1820 rem_inst = adv_instance->instance;
1821 err = hci_remove_adv_instance(hdev, rem_inst);
1822 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001823 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001824 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001825 } else {
1826 adv_instance = hci_find_adv_instance(hdev, instance);
1827
1828 if (force || (adv_instance && adv_instance->timeout &&
1829 !adv_instance->remaining_time)) {
1830 /* Don't advertise a removed instance. */
1831 if (next_instance &&
1832 next_instance->instance == instance)
1833 next_instance = NULL;
1834
1835 err = hci_remove_adv_instance(hdev, instance);
1836 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001837 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001838 }
1839 }
1840
Johan Hedbergf2252572015-11-18 12:49:20 +02001841 if (!req || !hdev_is_powered(hdev) ||
1842 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1843 return;
1844
1845 if (next_instance)
1846 __hci_req_schedule_adv_instance(req, next_instance->instance,
1847 false);
1848}
1849
Johan Hedberg0857dd32014-12-19 13:40:20 +02001850static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1851{
1852 struct hci_dev *hdev = req->hdev;
1853
1854 /* If we're advertising or initiating an LE connection we can't
1855 * go ahead and change the random address at this time. This is
1856 * because the eventual initiator address used for the
1857 * subsequently created connection will be undefined (some
1858 * controllers use the new address and others the one we had
1859 * when the operation started).
1860 *
1861 * In this kind of scenario skip the update and let the random
1862 * address be updated at the next cycle.
1863 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001864 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001865 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001866 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001867 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001868 return;
1869 }
1870
1871 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1872}
1873
1874int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001875 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001876{
1877 struct hci_dev *hdev = req->hdev;
1878 int err;
1879
1880 /* If privacy is enabled use a resolvable private address. If
1881 * current RPA has expired or there is something else than
1882 * the current RPA in use, then generate a new one.
1883 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001884 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001885 int to;
1886
1887 *own_addr_type = ADDR_LE_DEV_RANDOM;
1888
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001889 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001890 !bacmp(&hdev->random_addr, &hdev->rpa))
1891 return 0;
1892
1893 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1894 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001895 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001896 return err;
1897 }
1898
1899 set_random_addr(req, &hdev->rpa);
1900
1901 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1902 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1903
1904 return 0;
1905 }
1906
1907 /* In case of required privacy without resolvable private address,
1908 * use an non-resolvable private address. This is useful for active
1909 * scanning and non-connectable advertising.
1910 */
1911 if (require_privacy) {
1912 bdaddr_t nrpa;
1913
1914 while (true) {
1915 /* The non-resolvable private address is generated
1916 * from random six bytes with the two most significant
1917 * bits cleared.
1918 */
1919 get_random_bytes(&nrpa, 6);
1920 nrpa.b[5] &= 0x3f;
1921
1922 /* The non-resolvable private address shall not be
1923 * equal to the public address.
1924 */
1925 if (bacmp(&hdev->bdaddr, &nrpa))
1926 break;
1927 }
1928
1929 *own_addr_type = ADDR_LE_DEV_RANDOM;
1930 set_random_addr(req, &nrpa);
1931 return 0;
1932 }
1933
1934 /* If forcing static address is in use or there is no public
1935 * address use the static address as random address (but skip
1936 * the HCI command if the current random address is already the
1937 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001938 *
1939 * In case BR/EDR has been disabled on a dual-mode controller
1940 * and a static address has been configured, then use that
1941 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001942 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001943 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001944 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001945 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001946 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001947 *own_addr_type = ADDR_LE_DEV_RANDOM;
1948 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1949 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1950 &hdev->static_addr);
1951 return 0;
1952 }
1953
1954 /* Neither privacy nor static address is being used so use a
1955 * public address.
1956 */
1957 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1958
1959 return 0;
1960}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001961
Johan Hedberg405a2612014-12-19 23:18:22 +02001962static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1963{
1964 struct bdaddr_list *b;
1965
1966 list_for_each_entry(b, &hdev->whitelist, list) {
1967 struct hci_conn *conn;
1968
1969 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1970 if (!conn)
1971 return true;
1972
1973 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1974 return true;
1975 }
1976
1977 return false;
1978}
1979
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001980void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001981{
1982 struct hci_dev *hdev = req->hdev;
1983 u8 scan;
1984
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001985 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001986 return;
1987
1988 if (!hdev_is_powered(hdev))
1989 return;
1990
1991 if (mgmt_powering_down(hdev))
1992 return;
1993
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001994 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001995 disconnected_whitelist_entries(hdev))
1996 scan = SCAN_PAGE;
1997 else
1998 scan = SCAN_DISABLED;
1999
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002000 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002001 scan |= SCAN_INQUIRY;
2002
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002003 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2004 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2005 return;
2006
Johan Hedberg405a2612014-12-19 23:18:22 +02002007 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2008}
2009
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002010static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002011{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002012 hci_dev_lock(req->hdev);
2013 __hci_req_update_scan(req);
2014 hci_dev_unlock(req->hdev);
2015 return 0;
2016}
Johan Hedberg405a2612014-12-19 23:18:22 +02002017
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002018static void scan_update_work(struct work_struct *work)
2019{
2020 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2021
2022 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002023}
2024
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002025static int connectable_update(struct hci_request *req, unsigned long opt)
2026{
2027 struct hci_dev *hdev = req->hdev;
2028
2029 hci_dev_lock(hdev);
2030
2031 __hci_req_update_scan(req);
2032
2033 /* If BR/EDR is not enabled and we disable advertising as a
2034 * by-product of disabling connectable, we need to update the
2035 * advertising flags.
2036 */
2037 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002038 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002039
2040 /* Update the advertising parameters if necessary */
2041 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302042 !list_empty(&hdev->adv_instances)) {
2043 if (ext_adv_capable(hdev))
2044 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2045 else
2046 __hci_req_enable_advertising(req);
2047 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002048
2049 __hci_update_background_scan(req);
2050
2051 hci_dev_unlock(hdev);
2052
2053 return 0;
2054}
2055
2056static void connectable_update_work(struct work_struct *work)
2057{
2058 struct hci_dev *hdev = container_of(work, struct hci_dev,
2059 connectable_update);
2060 u8 status;
2061
2062 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2063 mgmt_set_connectable_complete(hdev, status);
2064}
2065
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002066static u8 get_service_classes(struct hci_dev *hdev)
2067{
2068 struct bt_uuid *uuid;
2069 u8 val = 0;
2070
2071 list_for_each_entry(uuid, &hdev->uuids, list)
2072 val |= uuid->svc_hint;
2073
2074 return val;
2075}
2076
2077void __hci_req_update_class(struct hci_request *req)
2078{
2079 struct hci_dev *hdev = req->hdev;
2080 u8 cod[3];
2081
2082 BT_DBG("%s", hdev->name);
2083
2084 if (!hdev_is_powered(hdev))
2085 return;
2086
2087 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2088 return;
2089
2090 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2091 return;
2092
2093 cod[0] = hdev->minor_class;
2094 cod[1] = hdev->major_class;
2095 cod[2] = get_service_classes(hdev);
2096
2097 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2098 cod[1] |= 0x20;
2099
2100 if (memcmp(cod, hdev->dev_class, 3) == 0)
2101 return;
2102
2103 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2104}
2105
Johan Hedbergaed1a882015-11-22 17:24:44 +03002106static void write_iac(struct hci_request *req)
2107{
2108 struct hci_dev *hdev = req->hdev;
2109 struct hci_cp_write_current_iac_lap cp;
2110
2111 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2112 return;
2113
2114 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2115 /* Limited discoverable mode */
2116 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2117 cp.iac_lap[0] = 0x00; /* LIAC */
2118 cp.iac_lap[1] = 0x8b;
2119 cp.iac_lap[2] = 0x9e;
2120 cp.iac_lap[3] = 0x33; /* GIAC */
2121 cp.iac_lap[4] = 0x8b;
2122 cp.iac_lap[5] = 0x9e;
2123 } else {
2124 /* General discoverable mode */
2125 cp.num_iac = 1;
2126 cp.iac_lap[0] = 0x33; /* GIAC */
2127 cp.iac_lap[1] = 0x8b;
2128 cp.iac_lap[2] = 0x9e;
2129 }
2130
2131 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2132 (cp.num_iac * 3) + 1, &cp);
2133}
2134
2135static int discoverable_update(struct hci_request *req, unsigned long opt)
2136{
2137 struct hci_dev *hdev = req->hdev;
2138
2139 hci_dev_lock(hdev);
2140
2141 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2142 write_iac(req);
2143 __hci_req_update_scan(req);
2144 __hci_req_update_class(req);
2145 }
2146
2147 /* Advertising instances don't use the global discoverable setting, so
2148 * only update AD if advertising was enabled using Set Advertising.
2149 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002150 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002151 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002152
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002153 /* Discoverable mode affects the local advertising
2154 * address in limited privacy mode.
2155 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302156 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2157 if (ext_adv_capable(hdev))
2158 __hci_req_start_ext_adv(req, 0x00);
2159 else
2160 __hci_req_enable_advertising(req);
2161 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002162 }
2163
Johan Hedbergaed1a882015-11-22 17:24:44 +03002164 hci_dev_unlock(hdev);
2165
2166 return 0;
2167}
2168
2169static void discoverable_update_work(struct work_struct *work)
2170{
2171 struct hci_dev *hdev = container_of(work, struct hci_dev,
2172 discoverable_update);
2173 u8 status;
2174
2175 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2176 mgmt_set_discoverable_complete(hdev, status);
2177}
2178
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002179void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2180 u8 reason)
2181{
2182 switch (conn->state) {
2183 case BT_CONNECTED:
2184 case BT_CONFIG:
2185 if (conn->type == AMP_LINK) {
2186 struct hci_cp_disconn_phy_link cp;
2187
2188 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2189 cp.reason = reason;
2190 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2191 &cp);
2192 } else {
2193 struct hci_cp_disconnect dc;
2194
2195 dc.handle = cpu_to_le16(conn->handle);
2196 dc.reason = reason;
2197 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2198 }
2199
2200 conn->state = BT_DISCONN;
2201
2202 break;
2203 case BT_CONNECT:
2204 if (conn->type == LE_LINK) {
2205 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2206 break;
2207 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2208 0, NULL);
2209 } else if (conn->type == ACL_LINK) {
2210 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2211 break;
2212 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2213 6, &conn->dst);
2214 }
2215 break;
2216 case BT_CONNECT2:
2217 if (conn->type == ACL_LINK) {
2218 struct hci_cp_reject_conn_req rej;
2219
2220 bacpy(&rej.bdaddr, &conn->dst);
2221 rej.reason = reason;
2222
2223 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2224 sizeof(rej), &rej);
2225 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2226 struct hci_cp_reject_sync_conn_req rej;
2227
2228 bacpy(&rej.bdaddr, &conn->dst);
2229
2230 /* SCO rejection has its own limited set of
2231 * allowed error values (0x0D-0x0F) which isn't
2232 * compatible with most values passed to this
2233 * function. To be safe hard-code one of the
2234 * values that's suitable for SCO.
2235 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002236 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002237
2238 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2239 sizeof(rej), &rej);
2240 }
2241 break;
2242 default:
2243 conn->state = BT_CLOSED;
2244 break;
2245 }
2246}
2247
2248static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2249{
2250 if (status)
2251 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2252}
2253
2254int hci_abort_conn(struct hci_conn *conn, u8 reason)
2255{
2256 struct hci_request req;
2257 int err;
2258
2259 hci_req_init(&req, conn->hdev);
2260
2261 __hci_abort_conn(&req, conn, reason);
2262
2263 err = hci_req_run(&req, abort_conn_complete);
2264 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002265 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002266 return err;
2267 }
2268
2269 return 0;
2270}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002271
Johan Hedberga1d01db2015-11-11 08:11:25 +02002272static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002273{
2274 hci_dev_lock(req->hdev);
2275 __hci_update_background_scan(req);
2276 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002277 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002278}
2279
2280static void bg_scan_update(struct work_struct *work)
2281{
2282 struct hci_dev *hdev = container_of(work, struct hci_dev,
2283 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002284 struct hci_conn *conn;
2285 u8 status;
2286 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002287
Johan Hedberg84235d22015-11-11 08:11:20 +02002288 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2289 if (!err)
2290 return;
2291
2292 hci_dev_lock(hdev);
2293
2294 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2295 if (conn)
2296 hci_le_conn_failed(conn, status);
2297
2298 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002299}
2300
Johan Hedberga1d01db2015-11-11 08:11:25 +02002301static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002302{
2303 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002304 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002305}
2306
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002307static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2308{
2309 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002310 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2311 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002312 struct hci_cp_inquiry cp;
2313
2314 BT_DBG("%s", req->hdev->name);
2315
2316 hci_dev_lock(req->hdev);
2317 hci_inquiry_cache_flush(req->hdev);
2318 hci_dev_unlock(req->hdev);
2319
2320 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002321
2322 if (req->hdev->discovery.limited)
2323 memcpy(&cp.lap, liac, sizeof(cp.lap));
2324 else
2325 memcpy(&cp.lap, giac, sizeof(cp.lap));
2326
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002327 cp.length = length;
2328
2329 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2330
2331 return 0;
2332}
2333
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002334static void le_scan_disable_work(struct work_struct *work)
2335{
2336 struct hci_dev *hdev = container_of(work, struct hci_dev,
2337 le_scan_disable.work);
2338 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002339
2340 BT_DBG("%s", hdev->name);
2341
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002342 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002343 return;
2344
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002345 cancel_delayed_work(&hdev->le_scan_restart);
2346
2347 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2348 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002349 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2350 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002351 return;
2352 }
2353
2354 hdev->discovery.scan_start = 0;
2355
2356 /* If we were running LE only scan, change discovery state. If
2357 * we were running both LE and BR/EDR inquiry simultaneously,
2358 * and BR/EDR inquiry is already finished, stop discovery,
2359 * otherwise BR/EDR inquiry will stop discovery when finished.
2360 * If we will resolve remote device name, do not change
2361 * discovery state.
2362 */
2363
2364 if (hdev->discovery.type == DISCOV_TYPE_LE)
2365 goto discov_stopped;
2366
2367 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2368 return;
2369
2370 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2371 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2372 hdev->discovery.state != DISCOVERY_RESOLVING)
2373 goto discov_stopped;
2374
2375 return;
2376 }
2377
2378 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2379 HCI_CMD_TIMEOUT, &status);
2380 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002381 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002382 goto discov_stopped;
2383 }
2384
2385 return;
2386
2387discov_stopped:
2388 hci_dev_lock(hdev);
2389 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2390 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002391}
2392
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002393static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002394{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002395 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002396
2397 /* If controller is not scanning we are done. */
2398 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2399 return 0;
2400
2401 hci_req_add_le_scan_disable(req);
2402
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302403 if (use_ext_scan(hdev)) {
2404 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2405
2406 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2407 ext_enable_cp.enable = LE_SCAN_ENABLE;
2408 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2409
2410 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2411 sizeof(ext_enable_cp), &ext_enable_cp);
2412 } else {
2413 struct hci_cp_le_set_scan_enable cp;
2414
2415 memset(&cp, 0, sizeof(cp));
2416 cp.enable = LE_SCAN_ENABLE;
2417 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2418 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2419 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002420
2421 return 0;
2422}
2423
2424static void le_scan_restart_work(struct work_struct *work)
2425{
2426 struct hci_dev *hdev = container_of(work, struct hci_dev,
2427 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002428 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002429 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002430
2431 BT_DBG("%s", hdev->name);
2432
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002433 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002434 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002435 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2436 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002437 return;
2438 }
2439
2440 hci_dev_lock(hdev);
2441
2442 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2443 !hdev->discovery.scan_start)
2444 goto unlock;
2445
2446 /* When the scan was started, hdev->le_scan_disable has been queued
2447 * after duration from scan_start. During scan restart this job
2448 * has been canceled, and we need to queue it again after proper
2449 * timeout, to make sure that scan does not run indefinitely.
2450 */
2451 duration = hdev->discovery.scan_duration;
2452 scan_start = hdev->discovery.scan_start;
2453 now = jiffies;
2454 if (now - scan_start <= duration) {
2455 int elapsed;
2456
2457 if (now >= scan_start)
2458 elapsed = now - scan_start;
2459 else
2460 elapsed = ULONG_MAX - scan_start + now;
2461
2462 timeout = duration - elapsed;
2463 } else {
2464 timeout = 0;
2465 }
2466
2467 queue_delayed_work(hdev->req_workqueue,
2468 &hdev->le_scan_disable, timeout);
2469
2470unlock:
2471 hci_dev_unlock(hdev);
2472}
2473
Johan Hedberge68f0722015-11-11 08:30:30 +02002474static int active_scan(struct hci_request *req, unsigned long opt)
2475{
2476 uint16_t interval = opt;
2477 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002478 u8 own_addr_type;
2479 int err;
2480
2481 BT_DBG("%s", hdev->name);
2482
2483 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2484 hci_dev_lock(hdev);
2485
2486 /* Don't let discovery abort an outgoing connection attempt
2487 * that's using directed advertising.
2488 */
2489 if (hci_lookup_le_connect(hdev)) {
2490 hci_dev_unlock(hdev);
2491 return -EBUSY;
2492 }
2493
2494 cancel_adv_timeout(hdev);
2495 hci_dev_unlock(hdev);
2496
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302497 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002498 }
2499
2500 /* If controller is scanning, it means the background scanning is
2501 * running. Thus, we should temporarily stop it in order to set the
2502 * discovery scanning parameters.
2503 */
2504 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2505 hci_req_add_le_scan_disable(req);
2506
2507 /* All active scans will be done with either a resolvable private
2508 * address (when privacy feature has been enabled) or non-resolvable
2509 * private address.
2510 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002511 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2512 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002513 if (err < 0)
2514 own_addr_type = ADDR_LE_DEV_PUBLIC;
2515
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302516 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2517 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002518 return 0;
2519}
2520
2521static int interleaved_discov(struct hci_request *req, unsigned long opt)
2522{
2523 int err;
2524
2525 BT_DBG("%s", req->hdev->name);
2526
2527 err = active_scan(req, opt);
2528 if (err)
2529 return err;
2530
Johan Hedberg7df26b52015-11-11 12:24:21 +02002531 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002532}
2533
2534static void start_discovery(struct hci_dev *hdev, u8 *status)
2535{
2536 unsigned long timeout;
2537
2538 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2539
2540 switch (hdev->discovery.type) {
2541 case DISCOV_TYPE_BREDR:
2542 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002543 hci_req_sync(hdev, bredr_inquiry,
2544 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002545 status);
2546 return;
2547 case DISCOV_TYPE_INTERLEAVED:
2548 /* When running simultaneous discovery, the LE scanning time
2549 * should occupy the whole discovery time sine BR/EDR inquiry
2550 * and LE scanning are scheduled by the controller.
2551 *
2552 * For interleaving discovery in comparison, BR/EDR inquiry
2553 * and LE scanning are done sequentially with separate
2554 * timeouts.
2555 */
2556 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2557 &hdev->quirks)) {
2558 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2559 /* During simultaneous discovery, we double LE scan
2560 * interval. We must leave some time for the controller
2561 * to do BR/EDR inquiry.
2562 */
2563 hci_req_sync(hdev, interleaved_discov,
2564 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2565 status);
2566 break;
2567 }
2568
2569 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2570 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2571 HCI_CMD_TIMEOUT, status);
2572 break;
2573 case DISCOV_TYPE_LE:
2574 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2575 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2576 HCI_CMD_TIMEOUT, status);
2577 break;
2578 default:
2579 *status = HCI_ERROR_UNSPECIFIED;
2580 return;
2581 }
2582
2583 if (*status)
2584 return;
2585
2586 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2587
2588 /* When service discovery is used and the controller has a
2589 * strict duplicate filter, it is important to remember the
2590 * start and duration of the scan. This is required for
2591 * restarting scanning during the discovery phase.
2592 */
2593 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2594 hdev->discovery.result_filtering) {
2595 hdev->discovery.scan_start = jiffies;
2596 hdev->discovery.scan_duration = timeout;
2597 }
2598
2599 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2600 timeout);
2601}
2602
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002603bool hci_req_stop_discovery(struct hci_request *req)
2604{
2605 struct hci_dev *hdev = req->hdev;
2606 struct discovery_state *d = &hdev->discovery;
2607 struct hci_cp_remote_name_req_cancel cp;
2608 struct inquiry_entry *e;
2609 bool ret = false;
2610
2611 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2612
2613 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2614 if (test_bit(HCI_INQUIRY, &hdev->flags))
2615 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2616
2617 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2618 cancel_delayed_work(&hdev->le_scan_disable);
2619 hci_req_add_le_scan_disable(req);
2620 }
2621
2622 ret = true;
2623 } else {
2624 /* Passive scanning */
2625 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2626 hci_req_add_le_scan_disable(req);
2627 ret = true;
2628 }
2629 }
2630
2631 /* No further actions needed for LE-only discovery */
2632 if (d->type == DISCOV_TYPE_LE)
2633 return ret;
2634
2635 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2636 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2637 NAME_PENDING);
2638 if (!e)
2639 return ret;
2640
2641 bacpy(&cp.bdaddr, &e->data.bdaddr);
2642 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2643 &cp);
2644 ret = true;
2645 }
2646
2647 return ret;
2648}
2649
2650static int stop_discovery(struct hci_request *req, unsigned long opt)
2651{
2652 hci_dev_lock(req->hdev);
2653 hci_req_stop_discovery(req);
2654 hci_dev_unlock(req->hdev);
2655
2656 return 0;
2657}
2658
Johan Hedberge68f0722015-11-11 08:30:30 +02002659static void discov_update(struct work_struct *work)
2660{
2661 struct hci_dev *hdev = container_of(work, struct hci_dev,
2662 discov_update);
2663 u8 status = 0;
2664
2665 switch (hdev->discovery.state) {
2666 case DISCOVERY_STARTING:
2667 start_discovery(hdev, &status);
2668 mgmt_start_discovery_complete(hdev, status);
2669 if (status)
2670 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2671 else
2672 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2673 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002674 case DISCOVERY_STOPPING:
2675 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2676 mgmt_stop_discovery_complete(hdev, status);
2677 if (!status)
2678 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2679 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002680 case DISCOVERY_STOPPED:
2681 default:
2682 return;
2683 }
2684}
2685
Johan Hedbergc366f552015-11-23 15:43:06 +02002686static void discov_off(struct work_struct *work)
2687{
2688 struct hci_dev *hdev = container_of(work, struct hci_dev,
2689 discov_off.work);
2690
2691 BT_DBG("%s", hdev->name);
2692
2693 hci_dev_lock(hdev);
2694
2695 /* When discoverable timeout triggers, then just make sure
2696 * the limited discoverable flag is cleared. Even in the case
2697 * of a timeout triggered from general discoverable, it is
2698 * safe to unconditionally clear the flag.
2699 */
2700 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2701 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2702 hdev->discov_timeout = 0;
2703
2704 hci_dev_unlock(hdev);
2705
2706 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2707 mgmt_new_settings(hdev);
2708}
2709
Johan Hedberg2ff13892015-11-25 16:15:44 +02002710static int powered_update_hci(struct hci_request *req, unsigned long opt)
2711{
2712 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002713 u8 link_sec;
2714
2715 hci_dev_lock(hdev);
2716
2717 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2718 !lmp_host_ssp_capable(hdev)) {
2719 u8 mode = 0x01;
2720
2721 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2722
2723 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2724 u8 support = 0x01;
2725
2726 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2727 sizeof(support), &support);
2728 }
2729 }
2730
2731 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2732 lmp_bredr_capable(hdev)) {
2733 struct hci_cp_write_le_host_supported cp;
2734
2735 cp.le = 0x01;
2736 cp.simul = 0x00;
2737
2738 /* Check first if we already have the right
2739 * host state (host features set)
2740 */
2741 if (cp.le != lmp_host_le_capable(hdev) ||
2742 cp.simul != lmp_host_le_br_capable(hdev))
2743 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2744 sizeof(cp), &cp);
2745 }
2746
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002747 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002748 /* Make sure the controller has a good default for
2749 * advertising data. This also applies to the case
2750 * where BR/EDR was toggled during the AUTO_OFF phase.
2751 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002752 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2753 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302754 int err;
2755
2756 if (ext_adv_capable(hdev)) {
2757 err = __hci_req_setup_ext_adv_instance(req,
2758 0x00);
2759 if (!err)
2760 __hci_req_update_scan_rsp_data(req,
2761 0x00);
2762 } else {
2763 err = 0;
2764 __hci_req_update_adv_data(req, 0x00);
2765 __hci_req_update_scan_rsp_data(req, 0x00);
2766 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002767
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302768 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302769 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302770 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302771 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002772 __hci_req_enable_ext_advertising(req,
2773 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302774 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002775 } else if (!list_empty(&hdev->adv_instances)) {
2776 struct adv_info *adv_instance;
2777
Johan Hedberg2ff13892015-11-25 16:15:44 +02002778 adv_instance = list_first_entry(&hdev->adv_instances,
2779 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002780 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002781 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002782 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002783 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002784 }
2785
2786 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2787 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2788 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2789 sizeof(link_sec), &link_sec);
2790
2791 if (lmp_bredr_capable(hdev)) {
2792 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2793 __hci_req_write_fast_connectable(req, true);
2794 else
2795 __hci_req_write_fast_connectable(req, false);
2796 __hci_req_update_scan(req);
2797 __hci_req_update_class(req);
2798 __hci_req_update_name(req);
2799 __hci_req_update_eir(req);
2800 }
2801
2802 hci_dev_unlock(hdev);
2803 return 0;
2804}
2805
2806int __hci_req_hci_power_on(struct hci_dev *hdev)
2807{
2808 /* Register the available SMP channels (BR/EDR and LE) only when
2809 * successfully powering on the controller. This late
2810 * registration is required so that LE SMP can clearly decide if
2811 * the public address or static address is used.
2812 */
2813 smp_register(hdev);
2814
2815 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2816 NULL);
2817}
2818
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002819void hci_request_setup(struct hci_dev *hdev)
2820{
Johan Hedberge68f0722015-11-11 08:30:30 +02002821 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002822 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002823 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002824 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002825 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002826 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002827 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2828 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002829 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002830}
2831
2832void hci_request_cancel_all(struct hci_dev *hdev)
2833{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002834 hci_req_sync_cancel(hdev, ENODEV);
2835
Johan Hedberge68f0722015-11-11 08:30:30 +02002836 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002837 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002838 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002839 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002840 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002841 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002842 cancel_delayed_work_sync(&hdev->le_scan_disable);
2843 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002844
2845 if (hdev->adv_instance_timeout) {
2846 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2847 hdev->adv_instance_timeout = 0;
2848 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002849}