blob: 76dcc3f14cea65be5208fe8ebb4af45ae67d5982 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
Johan Hedberge62144872015-04-02 13:41:08 +030049static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020051{
52 struct hci_dev *hdev = req->hdev;
53 struct sk_buff *skb;
54 unsigned long flags;
55
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
60 */
61 if (req->err) {
62 skb_queue_purge(&req->cmd_q);
63 return req->err;
64 }
65
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req->cmd_q))
68 return -ENODATA;
69
70 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020071 if (complete) {
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020077
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82 queue_work(hdev->workqueue, &hdev->cmd_work);
83
84 return 0;
85}
86
Johan Hedberge62144872015-04-02 13:41:08 +030087int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88{
89 return req_run(req, complete, NULL);
90}
91
92int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93{
94 return req_run(req, NULL, complete);
95}
96
Johan Hedbergbe91cd02015-11-10 09:44:54 +020097static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 struct sk_buff *skb)
99{
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 if (skb)
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
Johan Hedbergb5044302015-11-10 09:44:55 +0200111void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
124{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200125 struct hci_request req;
126 struct sk_buff *skb;
127 int err = 0;
128
129 BT_DBG("%s", hdev->name);
130
131 hci_req_init(&req, hdev);
132
133 hci_req_add_ev(&req, opcode, plen, param, event);
134
135 hdev->req_status = HCI_REQ_PEND;
136
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200137 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100138 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200139 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200140
John Keeping67d8cee2018-04-19 16:29:37 +0100141 err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200143
John Keeping67d8cee2018-04-19 16:29:37 +0100144 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
196 hci_req_init(&req, hdev);
197
198 hdev->req_status = HCI_REQ_PEND;
199
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 err = func(&req, opt);
201 if (err) {
202 if (hci_status)
203 *hci_status = HCI_ERROR_UNSPECIFIED;
204 return err;
205 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200206
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207 err = hci_req_run_skb(&req, hci_req_sync_complete);
208 if (err < 0) {
209 hdev->req_status = 0;
210
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
215 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200216 if (err == -ENODATA) {
217 if (hci_status)
218 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200219 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200220 }
221
222 if (hci_status)
223 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224
225 return err;
226 }
227
John Keeping67d8cee2018-04-19 16:29:37 +0100228 err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200230
John Keeping67d8cee2018-04-19 16:29:37 +0100231 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200232 return -EINTR;
233
234 switch (hdev->req_status) {
235 case HCI_REQ_DONE:
236 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200237 if (hci_status)
238 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200239 break;
240
241 case HCI_REQ_CANCELED:
242 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200243 if (hci_status)
244 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200245 break;
246
247 default:
248 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200251 break;
252 }
253
Frederic Dalleau9afee942016-08-23 07:59:19 +0200254 kfree_skb(hdev->req_skb);
255 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
Johan Hedberga1d01db2015-11-11 08:11:25 +0200263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200265 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200273 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200275 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276
277 return ret;
278}
279
Johan Hedberg0857dd32014-12-19 13:40:20 +0200280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
Johannes Berg4df864c2017-06-16 14:29:21 +0200291 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200296 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297
298 BT_DBG("skb len %d", skb->len);
299
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100323 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100332 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200343void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344{
345 struct hci_dev *hdev = req->hdev;
346 struct hci_cp_write_page_scan_activity acp;
347 u8 type;
348
349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350 return;
351
352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353 return;
354
355 if (enable) {
356 type = PAGE_SCAN_TYPE_INTERLACED;
357
358 /* 160 msec page scan interval */
359 acp.interval = cpu_to_le16(0x0100);
360 } else {
361 type = PAGE_SCAN_TYPE_STANDARD; /* default */
362
363 /* default 1.28 sec page scan */
364 acp.interval = cpu_to_le16(0x0800);
365 }
366
367 acp.window = cpu_to_le16(0x0012);
368
369 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372 sizeof(acp), &acp);
373
374 if (hdev->page_scan_type != type)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376}
377
Johan Hedberg196a5e92015-11-22 18:55:44 +0200378/* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
381 *
382 * This function requires the caller holds hdev->lock.
383 */
384static void __hci_update_background_scan(struct hci_request *req)
385{
386 struct hci_dev *hdev = req->hdev;
387
388 if (!test_bit(HCI_UP, &hdev->flags) ||
389 test_bit(HCI_INIT, &hdev->flags) ||
390 hci_dev_test_flag(hdev, HCI_SETUP) ||
391 hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 hci_dev_test_flag(hdev, HCI_UNREGISTER))
394 return;
395
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398 return;
399
400 /* If discovery is active don't interfere with it */
401 if (hdev->discovery.state != DISCOVERY_STOPPED)
402 return;
403
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
406 *
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
410 */
411 hci_discovery_filter_clear(hdev);
412
413 if (list_empty(&hdev->pend_le_conns) &&
414 list_empty(&hdev->pend_le_reports)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
417 * scanning.
418 */
419
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422 return;
423
424 hci_req_add_le_scan_disable(req);
425
426 BT_DBG("%s stopping background scanning", hdev->name);
427 } else {
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
430 */
431
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
434 * the same time.
435 */
436 if (hci_lookup_le_connect(hdev))
437 return;
438
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
441 */
442 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 hci_req_add_le_scan_disable(req);
444
445 hci_req_add_le_passive_scan(req);
446
447 BT_DBG("%s starting background scanning", hdev->name);
448 }
449}
450
Johan Hedberg00cf5042015-11-25 16:15:41 +0200451void __hci_req_update_name(struct hci_request *req)
452{
453 struct hci_dev *hdev = req->hdev;
454 struct hci_cp_write_local_name cp;
455
456 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457
458 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459}
460
Johan Hedbergb1a89172015-11-25 16:15:42 +0200461#define PNP_INFO_SVCLASS_ID 0x1200
462
463static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464{
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
467
468 if (len < 4)
469 return ptr;
470
471 list_for_each_entry(uuid, &hdev->uuids, list) {
472 u16 uuid16;
473
474 if (uuid->size != 16)
475 continue;
476
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 if (uuid16 < 0x1100)
479 continue;
480
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
482 continue;
483
484 if (!uuids_start) {
485 uuids_start = ptr;
486 uuids_start[0] = 1;
487 uuids_start[1] = EIR_UUID16_ALL;
488 ptr += 2;
489 }
490
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
494 break;
495 }
496
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
500 }
501
502 return ptr;
503}
504
505static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506{
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
509
510 if (len < 6)
511 return ptr;
512
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
515 continue;
516
517 if (!uuids_start) {
518 uuids_start = ptr;
519 uuids_start[0] = 1;
520 uuids_start[1] = EIR_UUID32_ALL;
521 ptr += 2;
522 }
523
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
527 break;
528 }
529
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 ptr += sizeof(u32);
532 uuids_start[0] += sizeof(u32);
533 }
534
535 return ptr;
536}
537
538static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539{
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
542
543 if (len < 18)
544 return ptr;
545
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID128_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
560 break;
561 }
562
563 memcpy(ptr, uuid->uuid, 16);
564 ptr += 16;
565 uuids_start[0] += 16;
566 }
567
568 return ptr;
569}
570
571static void create_eir(struct hci_dev *hdev, u8 *data)
572{
573 u8 *ptr = data;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577
578 if (name_len > 0) {
579 /* EIR Data type */
580 if (name_len > 48) {
581 name_len = 48;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 /* EIR Data length */
587 ptr[0] = name_len + 1;
588
589 memcpy(ptr + 2, hdev->dev_name, name_len);
590
591 ptr += (name_len + 2);
592 }
593
594 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595 ptr[0] = 2;
596 ptr[1] = EIR_TX_POWER;
597 ptr[2] = (u8) hdev->inq_tx_power;
598
599 ptr += 3;
600 }
601
602 if (hdev->devid_source > 0) {
603 ptr[0] = 9;
604 ptr[1] = EIR_DEVICE_ID;
605
606 put_unaligned_le16(hdev->devid_source, ptr + 2);
607 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 put_unaligned_le16(hdev->devid_product, ptr + 6);
609 put_unaligned_le16(hdev->devid_version, ptr + 8);
610
611 ptr += 10;
612 }
613
614 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617}
618
619void __hci_req_update_eir(struct hci_request *req)
620{
621 struct hci_dev *hdev = req->hdev;
622 struct hci_cp_write_eir cp;
623
624 if (!hdev_is_powered(hdev))
625 return;
626
627 if (!lmp_ext_inq_capable(hdev))
628 return;
629
630 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 return;
632
633 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634 return;
635
636 memset(&cp, 0, sizeof(cp));
637
638 create_eir(hdev, cp.data);
639
640 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641 return;
642
643 memcpy(hdev->eir, cp.data, sizeof(cp.data));
644
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646}
647
Johan Hedberg0857dd32014-12-19 13:40:20 +0200648void hci_req_add_le_scan_disable(struct hci_request *req)
649{
650 struct hci_cp_le_set_scan_enable cp;
651
652 memset(&cp, 0, sizeof(cp));
653 cp.enable = LE_SCAN_DISABLE;
654 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
655}
656
657static void add_to_white_list(struct hci_request *req,
658 struct hci_conn_params *params)
659{
660 struct hci_cp_le_add_to_white_list cp;
661
662 cp.bdaddr_type = params->addr_type;
663 bacpy(&cp.bdaddr, &params->addr);
664
665 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
666}
667
668static u8 update_white_list(struct hci_request *req)
669{
670 struct hci_dev *hdev = req->hdev;
671 struct hci_conn_params *params;
672 struct bdaddr_list *b;
673 uint8_t white_list_entries = 0;
674
675 /* Go through the current white list programmed into the
676 * controller one by one and check if that address is still
677 * in the list of pending connections or list of devices to
678 * report. If not present in either list, then queue the
679 * command to remove it from the controller.
680 */
681 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500682 /* If the device is neither in pend_le_conns nor
683 * pend_le_reports then remove it from the whitelist.
684 */
685 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
686 &b->bdaddr, b->bdaddr_type) &&
687 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
688 &b->bdaddr, b->bdaddr_type)) {
689 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200690
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500691 cp.bdaddr_type = b->bdaddr_type;
692 bacpy(&cp.bdaddr, &b->bdaddr);
693
694 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
695 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200696 continue;
697 }
698
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500699 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
700 /* White list can not be used with RPAs */
701 return 0x00;
702 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200703
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500704 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200705 }
706
707 /* Since all no longer valid white list entries have been
708 * removed, walk through the list of pending connections
709 * and ensure that any new device gets programmed into
710 * the controller.
711 *
712 * If the list of the devices is larger than the list of
713 * available white list entries in the controller, then
714 * just abort and return filer policy value to not use the
715 * white list.
716 */
717 list_for_each_entry(params, &hdev->pend_le_conns, action) {
718 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
719 &params->addr, params->addr_type))
720 continue;
721
722 if (white_list_entries >= hdev->le_white_list_size) {
723 /* Select filter policy to accept all advertising */
724 return 0x00;
725 }
726
727 if (hci_find_irk_by_addr(hdev, &params->addr,
728 params->addr_type)) {
729 /* White list can not be used with RPAs */
730 return 0x00;
731 }
732
733 white_list_entries++;
734 add_to_white_list(req, params);
735 }
736
737 /* After adding all new pending connections, walk through
738 * the list of pending reports and also add these to the
739 * white list if there is still space.
740 */
741 list_for_each_entry(params, &hdev->pend_le_reports, action) {
742 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
743 &params->addr, params->addr_type))
744 continue;
745
746 if (white_list_entries >= hdev->le_white_list_size) {
747 /* Select filter policy to accept all advertising */
748 return 0x00;
749 }
750
751 if (hci_find_irk_by_addr(hdev, &params->addr,
752 params->addr_type)) {
753 /* White list can not be used with RPAs */
754 return 0x00;
755 }
756
757 white_list_entries++;
758 add_to_white_list(req, params);
759 }
760
761 /* Select filter policy to use white list */
762 return 0x01;
763}
764
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200765static bool scan_use_rpa(struct hci_dev *hdev)
766{
767 return hci_dev_test_flag(hdev, HCI_PRIVACY);
768}
769
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530770static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
771 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200772{
773 struct hci_cp_le_set_scan_param param_cp;
774 struct hci_cp_le_set_scan_enable enable_cp;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530775
776 memset(&param_cp, 0, sizeof(param_cp));
777 param_cp.type = type;
778 param_cp.interval = cpu_to_le16(interval);
779 param_cp.window = cpu_to_le16(window);
780 param_cp.own_address_type = own_addr_type;
781 param_cp.filter_policy = filter_policy;
782 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
783 &param_cp);
784
785 memset(&enable_cp, 0, sizeof(enable_cp));
786 enable_cp.enable = LE_SCAN_ENABLE;
787 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
788 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
789 &enable_cp);
790}
791
792void hci_req_add_le_passive_scan(struct hci_request *req)
793{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200794 struct hci_dev *hdev = req->hdev;
795 u8 own_addr_type;
796 u8 filter_policy;
797
798 /* Set require_privacy to false since no SCAN_REQ are send
799 * during passive scanning. Not using an non-resolvable address
800 * here is important so that peer devices using direct
801 * advertising with our address will be correctly reported
802 * by the controller.
803 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200804 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
805 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200806 return;
807
808 /* Adding or removing entries from the white list must
809 * happen before enabling scanning. The controller does
810 * not allow white list modification while scanning.
811 */
812 filter_policy = update_white_list(req);
813
814 /* When the controller is using random resolvable addresses and
815 * with that having LE privacy enabled, then controllers with
816 * Extended Scanner Filter Policies support can now enable support
817 * for handling directed advertising.
818 *
819 * So instead of using filter polices 0x00 (no whitelist)
820 * and 0x01 (whitelist enabled) use the new filter policies
821 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
822 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700823 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200824 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
825 filter_policy |= 0x02;
826
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530827 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
828 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200829}
830
Johan Hedbergf2252572015-11-18 12:49:20 +0200831static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
832{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200833 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200834 struct adv_info *adv_instance;
835
836 /* Ignore instance 0 */
837 if (instance == 0x00)
838 return 0;
839
840 adv_instance = hci_find_adv_instance(hdev, instance);
841 if (!adv_instance)
842 return 0;
843
844 /* TODO: Take into account the "appearance" and "local-name" flags here.
845 * These are currently being ignored as they are not supported.
846 */
847 return adv_instance->scan_rsp_len;
848}
849
850void __hci_req_disable_advertising(struct hci_request *req)
851{
852 u8 enable = 0x00;
853
854 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
855}
856
857static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
858{
859 u32 flags;
860 struct adv_info *adv_instance;
861
862 if (instance == 0x00) {
863 /* Instance 0 always manages the "Tx Power" and "Flags"
864 * fields
865 */
866 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
867
868 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
869 * corresponds to the "connectable" instance flag.
870 */
871 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
872 flags |= MGMT_ADV_FLAG_CONNECTABLE;
873
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200874 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
875 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
876 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200877 flags |= MGMT_ADV_FLAG_DISCOV;
878
Johan Hedbergf2252572015-11-18 12:49:20 +0200879 return flags;
880 }
881
882 adv_instance = hci_find_adv_instance(hdev, instance);
883
884 /* Return 0 when we got an invalid instance identifier. */
885 if (!adv_instance)
886 return 0;
887
888 return adv_instance->flags;
889}
890
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200891static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
892{
893 /* If privacy is not enabled don't use RPA */
894 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
895 return false;
896
897 /* If basic privacy mode is enabled use RPA */
898 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
899 return true;
900
901 /* If limited privacy mode is enabled don't use RPA if we're
902 * both discoverable and bondable.
903 */
904 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
905 hci_dev_test_flag(hdev, HCI_BONDABLE))
906 return false;
907
908 /* We're neither bondable nor discoverable in the limited
909 * privacy mode, therefore use RPA.
910 */
911 return true;
912}
913
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100914static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
915{
916 /* If there is no connection we are OK to advertise. */
917 if (hci_conn_num(hdev, LE_LINK) == 0)
918 return true;
919
920 /* Check le_states if there is any connection in slave role. */
921 if (hdev->conn_hash.le_num_slave > 0) {
922 /* Slave connection state and non connectable mode bit 20. */
923 if (!connectable && !(hdev->le_states[2] & 0x10))
924 return false;
925
926 /* Slave connection state and connectable mode bit 38
927 * and scannable bit 21.
928 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100929 if (connectable && (!(hdev->le_states[4] & 0x40) ||
930 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100931 return false;
932 }
933
934 /* Check le_states if there is any connection in master role. */
935 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
936 /* Master connection state and non connectable mode bit 18. */
937 if (!connectable && !(hdev->le_states[2] & 0x02))
938 return false;
939
940 /* Master connection state and connectable mode bit 35 and
941 * scannable 19.
942 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100943 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100944 !(hdev->le_states[2] & 0x08)))
945 return false;
946 }
947
948 return true;
949}
950
Johan Hedbergf2252572015-11-18 12:49:20 +0200951void __hci_req_enable_advertising(struct hci_request *req)
952{
953 struct hci_dev *hdev = req->hdev;
954 struct hci_cp_le_set_adv_param cp;
955 u8 own_addr_type, enable = 0x01;
956 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +0200957 u32 flags;
958
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100959 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
960
961 /* If the "connectable" instance flag was not set, then choose between
962 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
963 */
964 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
965 mgmt_get_connectable(hdev);
966
967 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +0200968 return;
969
970 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
971 __hci_req_disable_advertising(req);
972
973 /* Clear the HCI_LE_ADV bit temporarily so that the
974 * hci_update_random_address knows that it's safe to go ahead
975 * and write a new random address. The flag will be set back on
976 * as soon as the SET_ADV_ENABLE HCI command completes.
977 */
978 hci_dev_clear_flag(hdev, HCI_LE_ADV);
979
Johan Hedbergf2252572015-11-18 12:49:20 +0200980 /* Set require_privacy to true only when non-connectable
981 * advertising is used. In that case it is fine to use a
982 * non-resolvable private address.
983 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200984 if (hci_update_random_address(req, !connectable,
985 adv_use_rpa(hdev, flags),
986 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +0200987 return;
988
989 memset(&cp, 0, sizeof(cp));
990 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
991 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
992
993 if (connectable)
994 cp.type = LE_ADV_IND;
995 else if (get_cur_adv_instance_scan_rsp_len(hdev))
996 cp.type = LE_ADV_SCAN_IND;
997 else
998 cp.type = LE_ADV_NONCONN_IND;
999
1000 cp.own_address_type = own_addr_type;
1001 cp.channel_map = hdev->le_adv_channel_map;
1002
1003 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1004
1005 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1006}
1007
Michał Narajowskif61851f2016-10-19 10:20:27 +02001008u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001009{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001010 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001011 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001012
Michał Narajowskif61851f2016-10-19 10:20:27 +02001013 /* no space left for name (+ NULL + type + len) */
1014 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1015 return ad_len;
1016
1017 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001018 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001019 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001020 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001021 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001022
Michał Narajowskif61851f2016-10-19 10:20:27 +02001023 /* use short name if present */
1024 short_len = strlen(hdev->short_name);
1025 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001026 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001027 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001028
Michał Narajowskif61851f2016-10-19 10:20:27 +02001029 /* use shortened full name if present, we already know that name
1030 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1031 */
1032 if (complete_len) {
1033 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1034
1035 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1036 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1037
1038 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1039 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001040 }
1041
1042 return ad_len;
1043}
1044
Michał Narajowski1b422062016-10-05 12:28:27 +02001045static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1046{
1047 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1048}
1049
Michał Narajowski7c295c42016-09-18 12:50:02 +02001050static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1051{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001052 u8 scan_rsp_len = 0;
1053
1054 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001055 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001056 }
1057
Michał Narajowski1b422062016-10-05 12:28:27 +02001058 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001059}
1060
Johan Hedbergf2252572015-11-18 12:49:20 +02001061static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1062 u8 *ptr)
1063{
1064 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001065 u32 instance_flags;
1066 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001067
1068 adv_instance = hci_find_adv_instance(hdev, instance);
1069 if (!adv_instance)
1070 return 0;
1071
Michał Narajowski7c295c42016-09-18 12:50:02 +02001072 instance_flags = adv_instance->flags;
1073
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001074 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001075 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001076 }
1077
Michał Narajowski1b422062016-10-05 12:28:27 +02001078 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001079 adv_instance->scan_rsp_len);
1080
Michał Narajowski7c295c42016-09-18 12:50:02 +02001081 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001082
1083 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1084 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1085
1086 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001087}
1088
Johan Hedbergcab054a2015-11-30 11:21:45 +02001089void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001090{
1091 struct hci_dev *hdev = req->hdev;
1092 struct hci_cp_le_set_scan_rsp_data cp;
1093 u8 len;
1094
1095 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1096 return;
1097
1098 memset(&cp, 0, sizeof(cp));
1099
1100 if (instance)
1101 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1102 else
1103 len = create_default_scan_rsp_data(hdev, cp.data);
1104
1105 if (hdev->scan_rsp_data_len == len &&
1106 !memcmp(cp.data, hdev->scan_rsp_data, len))
1107 return;
1108
1109 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1110 hdev->scan_rsp_data_len = len;
1111
1112 cp.length = len;
1113
1114 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1115}
1116
Johan Hedbergf2252572015-11-18 12:49:20 +02001117static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1118{
1119 struct adv_info *adv_instance = NULL;
1120 u8 ad_len = 0, flags = 0;
1121 u32 instance_flags;
1122
1123 /* Return 0 when the current instance identifier is invalid. */
1124 if (instance) {
1125 adv_instance = hci_find_adv_instance(hdev, instance);
1126 if (!adv_instance)
1127 return 0;
1128 }
1129
1130 instance_flags = get_adv_instance_flags(hdev, instance);
1131
1132 /* The Add Advertising command allows userspace to set both the general
1133 * and limited discoverable flags.
1134 */
1135 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1136 flags |= LE_AD_GENERAL;
1137
1138 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1139 flags |= LE_AD_LIMITED;
1140
Johan Hedbergf18ba582016-04-06 13:09:05 +03001141 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1142 flags |= LE_AD_NO_BREDR;
1143
Johan Hedbergf2252572015-11-18 12:49:20 +02001144 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1145 /* If a discovery flag wasn't provided, simply use the global
1146 * settings.
1147 */
1148 if (!flags)
1149 flags |= mgmt_get_adv_discov_flags(hdev);
1150
Johan Hedbergf2252572015-11-18 12:49:20 +02001151 /* If flags would still be empty, then there is no need to
1152 * include the "Flags" AD field".
1153 */
1154 if (flags) {
1155 ptr[0] = 0x02;
1156 ptr[1] = EIR_FLAGS;
1157 ptr[2] = flags;
1158
1159 ad_len += 3;
1160 ptr += 3;
1161 }
1162 }
1163
1164 if (adv_instance) {
1165 memcpy(ptr, adv_instance->adv_data,
1166 adv_instance->adv_data_len);
1167 ad_len += adv_instance->adv_data_len;
1168 ptr += adv_instance->adv_data_len;
1169 }
1170
1171 /* Provide Tx Power only if we can provide a valid value for it */
1172 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1173 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1174 ptr[0] = 0x02;
1175 ptr[1] = EIR_TX_POWER;
1176 ptr[2] = (u8)hdev->adv_tx_power;
1177
1178 ad_len += 3;
1179 ptr += 3;
1180 }
1181
1182 return ad_len;
1183}
1184
Johan Hedbergcab054a2015-11-30 11:21:45 +02001185void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001186{
1187 struct hci_dev *hdev = req->hdev;
1188 struct hci_cp_le_set_adv_data cp;
1189 u8 len;
1190
1191 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1192 return;
1193
1194 memset(&cp, 0, sizeof(cp));
1195
1196 len = create_instance_adv_data(hdev, instance, cp.data);
1197
1198 /* There's nothing to do if the data hasn't changed */
1199 if (hdev->adv_data_len == len &&
1200 memcmp(cp.data, hdev->adv_data, len) == 0)
1201 return;
1202
1203 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1204 hdev->adv_data_len = len;
1205
1206 cp.length = len;
1207
1208 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1209}
1210
Johan Hedbergcab054a2015-11-30 11:21:45 +02001211int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001212{
1213 struct hci_request req;
1214
1215 hci_req_init(&req, hdev);
1216 __hci_req_update_adv_data(&req, instance);
1217
1218 return hci_req_run(&req, NULL);
1219}
1220
1221static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1222{
1223 BT_DBG("%s status %u", hdev->name, status);
1224}
1225
1226void hci_req_reenable_advertising(struct hci_dev *hdev)
1227{
1228 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001229
1230 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001231 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001232 return;
1233
Johan Hedbergf2252572015-11-18 12:49:20 +02001234 hci_req_init(&req, hdev);
1235
Johan Hedbergcab054a2015-11-30 11:21:45 +02001236 if (hdev->cur_adv_instance) {
1237 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1238 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001239 } else {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001240 __hci_req_update_adv_data(&req, 0x00);
1241 __hci_req_update_scan_rsp_data(&req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001242 __hci_req_enable_advertising(&req);
1243 }
1244
1245 hci_req_run(&req, adv_enable_complete);
1246}
1247
1248static void adv_timeout_expire(struct work_struct *work)
1249{
1250 struct hci_dev *hdev = container_of(work, struct hci_dev,
1251 adv_instance_expire.work);
1252
1253 struct hci_request req;
1254 u8 instance;
1255
1256 BT_DBG("%s", hdev->name);
1257
1258 hci_dev_lock(hdev);
1259
1260 hdev->adv_instance_timeout = 0;
1261
Johan Hedbergcab054a2015-11-30 11:21:45 +02001262 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001263 if (instance == 0x00)
1264 goto unlock;
1265
1266 hci_req_init(&req, hdev);
1267
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001268 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001269
1270 if (list_empty(&hdev->adv_instances))
1271 __hci_req_disable_advertising(&req);
1272
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001273 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001274
1275unlock:
1276 hci_dev_unlock(hdev);
1277}
1278
1279int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1280 bool force)
1281{
1282 struct hci_dev *hdev = req->hdev;
1283 struct adv_info *adv_instance = NULL;
1284 u16 timeout;
1285
1286 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001287 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001288 return -EPERM;
1289
1290 if (hdev->adv_instance_timeout)
1291 return -EBUSY;
1292
1293 adv_instance = hci_find_adv_instance(hdev, instance);
1294 if (!adv_instance)
1295 return -ENOENT;
1296
1297 /* A zero timeout means unlimited advertising. As long as there is
1298 * only one instance, duration should be ignored. We still set a timeout
1299 * in case further instances are being added later on.
1300 *
1301 * If the remaining lifetime of the instance is more than the duration
1302 * then the timeout corresponds to the duration, otherwise it will be
1303 * reduced to the remaining instance lifetime.
1304 */
1305 if (adv_instance->timeout == 0 ||
1306 adv_instance->duration <= adv_instance->remaining_time)
1307 timeout = adv_instance->duration;
1308 else
1309 timeout = adv_instance->remaining_time;
1310
1311 /* The remaining time is being reduced unless the instance is being
1312 * advertised without time limit.
1313 */
1314 if (adv_instance->timeout)
1315 adv_instance->remaining_time =
1316 adv_instance->remaining_time - timeout;
1317
1318 hdev->adv_instance_timeout = timeout;
1319 queue_delayed_work(hdev->req_workqueue,
1320 &hdev->adv_instance_expire,
1321 msecs_to_jiffies(timeout * 1000));
1322
1323 /* If we're just re-scheduling the same instance again then do not
1324 * execute any HCI commands. This happens when a single instance is
1325 * being advertised.
1326 */
1327 if (!force && hdev->cur_adv_instance == instance &&
1328 hci_dev_test_flag(hdev, HCI_LE_ADV))
1329 return 0;
1330
1331 hdev->cur_adv_instance = instance;
Johan Hedbergcab054a2015-11-30 11:21:45 +02001332 __hci_req_update_adv_data(req, instance);
1333 __hci_req_update_scan_rsp_data(req, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001334 __hci_req_enable_advertising(req);
1335
1336 return 0;
1337}
1338
1339static void cancel_adv_timeout(struct hci_dev *hdev)
1340{
1341 if (hdev->adv_instance_timeout) {
1342 hdev->adv_instance_timeout = 0;
1343 cancel_delayed_work(&hdev->adv_instance_expire);
1344 }
1345}
1346
1347/* For a single instance:
1348 * - force == true: The instance will be removed even when its remaining
1349 * lifetime is not zero.
1350 * - force == false: the instance will be deactivated but kept stored unless
1351 * the remaining lifetime is zero.
1352 *
1353 * For instance == 0x00:
1354 * - force == true: All instances will be removed regardless of their timeout
1355 * setting.
1356 * - force == false: Only instances that have a timeout will be removed.
1357 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001358void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1359 struct hci_request *req, u8 instance,
1360 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001361{
1362 struct adv_info *adv_instance, *n, *next_instance = NULL;
1363 int err;
1364 u8 rem_inst;
1365
1366 /* Cancel any timeout concerning the removed instance(s). */
1367 if (!instance || hdev->cur_adv_instance == instance)
1368 cancel_adv_timeout(hdev);
1369
1370 /* Get the next instance to advertise BEFORE we remove
1371 * the current one. This can be the same instance again
1372 * if there is only one instance.
1373 */
1374 if (instance && hdev->cur_adv_instance == instance)
1375 next_instance = hci_get_next_instance(hdev, instance);
1376
1377 if (instance == 0x00) {
1378 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1379 list) {
1380 if (!(force || adv_instance->timeout))
1381 continue;
1382
1383 rem_inst = adv_instance->instance;
1384 err = hci_remove_adv_instance(hdev, rem_inst);
1385 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001386 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001387 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001388 } else {
1389 adv_instance = hci_find_adv_instance(hdev, instance);
1390
1391 if (force || (adv_instance && adv_instance->timeout &&
1392 !adv_instance->remaining_time)) {
1393 /* Don't advertise a removed instance. */
1394 if (next_instance &&
1395 next_instance->instance == instance)
1396 next_instance = NULL;
1397
1398 err = hci_remove_adv_instance(hdev, instance);
1399 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001400 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001401 }
1402 }
1403
Johan Hedbergf2252572015-11-18 12:49:20 +02001404 if (!req || !hdev_is_powered(hdev) ||
1405 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1406 return;
1407
1408 if (next_instance)
1409 __hci_req_schedule_adv_instance(req, next_instance->instance,
1410 false);
1411}
1412
Johan Hedberg0857dd32014-12-19 13:40:20 +02001413static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1414{
1415 struct hci_dev *hdev = req->hdev;
1416
1417 /* If we're advertising or initiating an LE connection we can't
1418 * go ahead and change the random address at this time. This is
1419 * because the eventual initiator address used for the
1420 * subsequently created connection will be undefined (some
1421 * controllers use the new address and others the one we had
1422 * when the operation started).
1423 *
1424 * In this kind of scenario skip the update and let the random
1425 * address be updated at the next cycle.
1426 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001427 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001428 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001429 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001430 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001431 return;
1432 }
1433
1434 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1435}
1436
1437int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001438 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001439{
1440 struct hci_dev *hdev = req->hdev;
1441 int err;
1442
1443 /* If privacy is enabled use a resolvable private address. If
1444 * current RPA has expired or there is something else than
1445 * the current RPA in use, then generate a new one.
1446 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001447 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001448 int to;
1449
1450 *own_addr_type = ADDR_LE_DEV_RANDOM;
1451
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001452 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001453 !bacmp(&hdev->random_addr, &hdev->rpa))
1454 return 0;
1455
1456 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1457 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001458 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001459 return err;
1460 }
1461
1462 set_random_addr(req, &hdev->rpa);
1463
1464 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1465 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1466
1467 return 0;
1468 }
1469
1470 /* In case of required privacy without resolvable private address,
1471 * use an non-resolvable private address. This is useful for active
1472 * scanning and non-connectable advertising.
1473 */
1474 if (require_privacy) {
1475 bdaddr_t nrpa;
1476
1477 while (true) {
1478 /* The non-resolvable private address is generated
1479 * from random six bytes with the two most significant
1480 * bits cleared.
1481 */
1482 get_random_bytes(&nrpa, 6);
1483 nrpa.b[5] &= 0x3f;
1484
1485 /* The non-resolvable private address shall not be
1486 * equal to the public address.
1487 */
1488 if (bacmp(&hdev->bdaddr, &nrpa))
1489 break;
1490 }
1491
1492 *own_addr_type = ADDR_LE_DEV_RANDOM;
1493 set_random_addr(req, &nrpa);
1494 return 0;
1495 }
1496
1497 /* If forcing static address is in use or there is no public
1498 * address use the static address as random address (but skip
1499 * the HCI command if the current random address is already the
1500 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001501 *
1502 * In case BR/EDR has been disabled on a dual-mode controller
1503 * and a static address has been configured, then use that
1504 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001505 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001506 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001507 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001508 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001509 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001510 *own_addr_type = ADDR_LE_DEV_RANDOM;
1511 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1512 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1513 &hdev->static_addr);
1514 return 0;
1515 }
1516
1517 /* Neither privacy nor static address is being used so use a
1518 * public address.
1519 */
1520 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1521
1522 return 0;
1523}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001524
Johan Hedberg405a2612014-12-19 23:18:22 +02001525static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1526{
1527 struct bdaddr_list *b;
1528
1529 list_for_each_entry(b, &hdev->whitelist, list) {
1530 struct hci_conn *conn;
1531
1532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1533 if (!conn)
1534 return true;
1535
1536 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1537 return true;
1538 }
1539
1540 return false;
1541}
1542
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001543void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001544{
1545 struct hci_dev *hdev = req->hdev;
1546 u8 scan;
1547
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001548 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001549 return;
1550
1551 if (!hdev_is_powered(hdev))
1552 return;
1553
1554 if (mgmt_powering_down(hdev))
1555 return;
1556
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001557 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001558 disconnected_whitelist_entries(hdev))
1559 scan = SCAN_PAGE;
1560 else
1561 scan = SCAN_DISABLED;
1562
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001563 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001564 scan |= SCAN_INQUIRY;
1565
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001566 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1567 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1568 return;
1569
Johan Hedberg405a2612014-12-19 23:18:22 +02001570 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1571}
1572
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001573static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001574{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001575 hci_dev_lock(req->hdev);
1576 __hci_req_update_scan(req);
1577 hci_dev_unlock(req->hdev);
1578 return 0;
1579}
Johan Hedberg405a2612014-12-19 23:18:22 +02001580
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001581static void scan_update_work(struct work_struct *work)
1582{
1583 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1584
1585 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001586}
1587
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001588static int connectable_update(struct hci_request *req, unsigned long opt)
1589{
1590 struct hci_dev *hdev = req->hdev;
1591
1592 hci_dev_lock(hdev);
1593
1594 __hci_req_update_scan(req);
1595
1596 /* If BR/EDR is not enabled and we disable advertising as a
1597 * by-product of disabling connectable, we need to update the
1598 * advertising flags.
1599 */
1600 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001601 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001602
1603 /* Update the advertising parameters if necessary */
1604 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001605 !list_empty(&hdev->adv_instances))
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001606 __hci_req_enable_advertising(req);
1607
1608 __hci_update_background_scan(req);
1609
1610 hci_dev_unlock(hdev);
1611
1612 return 0;
1613}
1614
1615static void connectable_update_work(struct work_struct *work)
1616{
1617 struct hci_dev *hdev = container_of(work, struct hci_dev,
1618 connectable_update);
1619 u8 status;
1620
1621 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1622 mgmt_set_connectable_complete(hdev, status);
1623}
1624
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001625static u8 get_service_classes(struct hci_dev *hdev)
1626{
1627 struct bt_uuid *uuid;
1628 u8 val = 0;
1629
1630 list_for_each_entry(uuid, &hdev->uuids, list)
1631 val |= uuid->svc_hint;
1632
1633 return val;
1634}
1635
1636void __hci_req_update_class(struct hci_request *req)
1637{
1638 struct hci_dev *hdev = req->hdev;
1639 u8 cod[3];
1640
1641 BT_DBG("%s", hdev->name);
1642
1643 if (!hdev_is_powered(hdev))
1644 return;
1645
1646 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1647 return;
1648
1649 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1650 return;
1651
1652 cod[0] = hdev->minor_class;
1653 cod[1] = hdev->major_class;
1654 cod[2] = get_service_classes(hdev);
1655
1656 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1657 cod[1] |= 0x20;
1658
1659 if (memcmp(cod, hdev->dev_class, 3) == 0)
1660 return;
1661
1662 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1663}
1664
Johan Hedbergaed1a882015-11-22 17:24:44 +03001665static void write_iac(struct hci_request *req)
1666{
1667 struct hci_dev *hdev = req->hdev;
1668 struct hci_cp_write_current_iac_lap cp;
1669
1670 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1671 return;
1672
1673 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1674 /* Limited discoverable mode */
1675 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1676 cp.iac_lap[0] = 0x00; /* LIAC */
1677 cp.iac_lap[1] = 0x8b;
1678 cp.iac_lap[2] = 0x9e;
1679 cp.iac_lap[3] = 0x33; /* GIAC */
1680 cp.iac_lap[4] = 0x8b;
1681 cp.iac_lap[5] = 0x9e;
1682 } else {
1683 /* General discoverable mode */
1684 cp.num_iac = 1;
1685 cp.iac_lap[0] = 0x33; /* GIAC */
1686 cp.iac_lap[1] = 0x8b;
1687 cp.iac_lap[2] = 0x9e;
1688 }
1689
1690 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1691 (cp.num_iac * 3) + 1, &cp);
1692}
1693
1694static int discoverable_update(struct hci_request *req, unsigned long opt)
1695{
1696 struct hci_dev *hdev = req->hdev;
1697
1698 hci_dev_lock(hdev);
1699
1700 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1701 write_iac(req);
1702 __hci_req_update_scan(req);
1703 __hci_req_update_class(req);
1704 }
1705
1706 /* Advertising instances don't use the global discoverable setting, so
1707 * only update AD if advertising was enabled using Set Advertising.
1708 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001709 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001710 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001711
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001712 /* Discoverable mode affects the local advertising
1713 * address in limited privacy mode.
1714 */
1715 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1716 __hci_req_enable_advertising(req);
1717 }
1718
Johan Hedbergaed1a882015-11-22 17:24:44 +03001719 hci_dev_unlock(hdev);
1720
1721 return 0;
1722}
1723
1724static void discoverable_update_work(struct work_struct *work)
1725{
1726 struct hci_dev *hdev = container_of(work, struct hci_dev,
1727 discoverable_update);
1728 u8 status;
1729
1730 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1731 mgmt_set_discoverable_complete(hdev, status);
1732}
1733
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001734void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1735 u8 reason)
1736{
1737 switch (conn->state) {
1738 case BT_CONNECTED:
1739 case BT_CONFIG:
1740 if (conn->type == AMP_LINK) {
1741 struct hci_cp_disconn_phy_link cp;
1742
1743 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1744 cp.reason = reason;
1745 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1746 &cp);
1747 } else {
1748 struct hci_cp_disconnect dc;
1749
1750 dc.handle = cpu_to_le16(conn->handle);
1751 dc.reason = reason;
1752 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1753 }
1754
1755 conn->state = BT_DISCONN;
1756
1757 break;
1758 case BT_CONNECT:
1759 if (conn->type == LE_LINK) {
1760 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1761 break;
1762 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1763 0, NULL);
1764 } else if (conn->type == ACL_LINK) {
1765 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1766 break;
1767 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1768 6, &conn->dst);
1769 }
1770 break;
1771 case BT_CONNECT2:
1772 if (conn->type == ACL_LINK) {
1773 struct hci_cp_reject_conn_req rej;
1774
1775 bacpy(&rej.bdaddr, &conn->dst);
1776 rej.reason = reason;
1777
1778 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1779 sizeof(rej), &rej);
1780 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1781 struct hci_cp_reject_sync_conn_req rej;
1782
1783 bacpy(&rej.bdaddr, &conn->dst);
1784
1785 /* SCO rejection has its own limited set of
1786 * allowed error values (0x0D-0x0F) which isn't
1787 * compatible with most values passed to this
1788 * function. To be safe hard-code one of the
1789 * values that's suitable for SCO.
1790 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02001791 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001792
1793 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1794 sizeof(rej), &rej);
1795 }
1796 break;
1797 default:
1798 conn->state = BT_CLOSED;
1799 break;
1800 }
1801}
1802
1803static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1804{
1805 if (status)
1806 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1807}
1808
1809int hci_abort_conn(struct hci_conn *conn, u8 reason)
1810{
1811 struct hci_request req;
1812 int err;
1813
1814 hci_req_init(&req, conn->hdev);
1815
1816 __hci_abort_conn(&req, conn, reason);
1817
1818 err = hci_req_run(&req, abort_conn_complete);
1819 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001820 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001821 return err;
1822 }
1823
1824 return 0;
1825}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001826
Johan Hedberga1d01db2015-11-11 08:11:25 +02001827static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001828{
1829 hci_dev_lock(req->hdev);
1830 __hci_update_background_scan(req);
1831 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001832 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001833}
1834
1835static void bg_scan_update(struct work_struct *work)
1836{
1837 struct hci_dev *hdev = container_of(work, struct hci_dev,
1838 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001839 struct hci_conn *conn;
1840 u8 status;
1841 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001842
Johan Hedberg84235d22015-11-11 08:11:20 +02001843 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1844 if (!err)
1845 return;
1846
1847 hci_dev_lock(hdev);
1848
1849 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1850 if (conn)
1851 hci_le_conn_failed(conn, status);
1852
1853 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001854}
1855
Johan Hedberga1d01db2015-11-11 08:11:25 +02001856static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001857{
1858 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001859 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001860}
1861
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001862static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1863{
1864 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02001865 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1866 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001867 struct hci_cp_inquiry cp;
1868
1869 BT_DBG("%s", req->hdev->name);
1870
1871 hci_dev_lock(req->hdev);
1872 hci_inquiry_cache_flush(req->hdev);
1873 hci_dev_unlock(req->hdev);
1874
1875 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02001876
1877 if (req->hdev->discovery.limited)
1878 memcpy(&cp.lap, liac, sizeof(cp.lap));
1879 else
1880 memcpy(&cp.lap, giac, sizeof(cp.lap));
1881
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001882 cp.length = length;
1883
1884 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1885
1886 return 0;
1887}
1888
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001889static void le_scan_disable_work(struct work_struct *work)
1890{
1891 struct hci_dev *hdev = container_of(work, struct hci_dev,
1892 le_scan_disable.work);
1893 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001894
1895 BT_DBG("%s", hdev->name);
1896
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001897 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001898 return;
1899
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001900 cancel_delayed_work(&hdev->le_scan_restart);
1901
1902 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1903 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001904 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1905 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001906 return;
1907 }
1908
1909 hdev->discovery.scan_start = 0;
1910
1911 /* If we were running LE only scan, change discovery state. If
1912 * we were running both LE and BR/EDR inquiry simultaneously,
1913 * and BR/EDR inquiry is already finished, stop discovery,
1914 * otherwise BR/EDR inquiry will stop discovery when finished.
1915 * If we will resolve remote device name, do not change
1916 * discovery state.
1917 */
1918
1919 if (hdev->discovery.type == DISCOV_TYPE_LE)
1920 goto discov_stopped;
1921
1922 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1923 return;
1924
1925 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1926 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1927 hdev->discovery.state != DISCOVERY_RESOLVING)
1928 goto discov_stopped;
1929
1930 return;
1931 }
1932
1933 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1934 HCI_CMD_TIMEOUT, &status);
1935 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001936 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001937 goto discov_stopped;
1938 }
1939
1940 return;
1941
1942discov_stopped:
1943 hci_dev_lock(hdev);
1944 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1945 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001946}
1947
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001948static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001949{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001950 struct hci_dev *hdev = req->hdev;
1951 struct hci_cp_le_set_scan_enable cp;
1952
1953 /* If controller is not scanning we are done. */
1954 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1955 return 0;
1956
1957 hci_req_add_le_scan_disable(req);
1958
1959 memset(&cp, 0, sizeof(cp));
1960 cp.enable = LE_SCAN_ENABLE;
1961 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1962 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1963
1964 return 0;
1965}
1966
1967static void le_scan_restart_work(struct work_struct *work)
1968{
1969 struct hci_dev *hdev = container_of(work, struct hci_dev,
1970 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001971 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001972 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001973
1974 BT_DBG("%s", hdev->name);
1975
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001976 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001977 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001978 bt_dev_err(hdev, "failed to restart LE scan: status %d",
1979 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001980 return;
1981 }
1982
1983 hci_dev_lock(hdev);
1984
1985 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1986 !hdev->discovery.scan_start)
1987 goto unlock;
1988
1989 /* When the scan was started, hdev->le_scan_disable has been queued
1990 * after duration from scan_start. During scan restart this job
1991 * has been canceled, and we need to queue it again after proper
1992 * timeout, to make sure that scan does not run indefinitely.
1993 */
1994 duration = hdev->discovery.scan_duration;
1995 scan_start = hdev->discovery.scan_start;
1996 now = jiffies;
1997 if (now - scan_start <= duration) {
1998 int elapsed;
1999
2000 if (now >= scan_start)
2001 elapsed = now - scan_start;
2002 else
2003 elapsed = ULONG_MAX - scan_start + now;
2004
2005 timeout = duration - elapsed;
2006 } else {
2007 timeout = 0;
2008 }
2009
2010 queue_delayed_work(hdev->req_workqueue,
2011 &hdev->le_scan_disable, timeout);
2012
2013unlock:
2014 hci_dev_unlock(hdev);
2015}
2016
Johan Hedberge68f0722015-11-11 08:30:30 +02002017static int active_scan(struct hci_request *req, unsigned long opt)
2018{
2019 uint16_t interval = opt;
2020 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002021 u8 own_addr_type;
2022 int err;
2023
2024 BT_DBG("%s", hdev->name);
2025
2026 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2027 hci_dev_lock(hdev);
2028
2029 /* Don't let discovery abort an outgoing connection attempt
2030 * that's using directed advertising.
2031 */
2032 if (hci_lookup_le_connect(hdev)) {
2033 hci_dev_unlock(hdev);
2034 return -EBUSY;
2035 }
2036
2037 cancel_adv_timeout(hdev);
2038 hci_dev_unlock(hdev);
2039
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302040 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002041 }
2042
2043 /* If controller is scanning, it means the background scanning is
2044 * running. Thus, we should temporarily stop it in order to set the
2045 * discovery scanning parameters.
2046 */
2047 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2048 hci_req_add_le_scan_disable(req);
2049
2050 /* All active scans will be done with either a resolvable private
2051 * address (when privacy feature has been enabled) or non-resolvable
2052 * private address.
2053 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002054 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2055 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002056 if (err < 0)
2057 own_addr_type = ADDR_LE_DEV_PUBLIC;
2058
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302059 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2060 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002061 return 0;
2062}
2063
2064static int interleaved_discov(struct hci_request *req, unsigned long opt)
2065{
2066 int err;
2067
2068 BT_DBG("%s", req->hdev->name);
2069
2070 err = active_scan(req, opt);
2071 if (err)
2072 return err;
2073
Johan Hedberg7df26b52015-11-11 12:24:21 +02002074 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002075}
2076
2077static void start_discovery(struct hci_dev *hdev, u8 *status)
2078{
2079 unsigned long timeout;
2080
2081 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2082
2083 switch (hdev->discovery.type) {
2084 case DISCOV_TYPE_BREDR:
2085 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002086 hci_req_sync(hdev, bredr_inquiry,
2087 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002088 status);
2089 return;
2090 case DISCOV_TYPE_INTERLEAVED:
2091 /* When running simultaneous discovery, the LE scanning time
2092 * should occupy the whole discovery time sine BR/EDR inquiry
2093 * and LE scanning are scheduled by the controller.
2094 *
2095 * For interleaving discovery in comparison, BR/EDR inquiry
2096 * and LE scanning are done sequentially with separate
2097 * timeouts.
2098 */
2099 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2100 &hdev->quirks)) {
2101 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2102 /* During simultaneous discovery, we double LE scan
2103 * interval. We must leave some time for the controller
2104 * to do BR/EDR inquiry.
2105 */
2106 hci_req_sync(hdev, interleaved_discov,
2107 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2108 status);
2109 break;
2110 }
2111
2112 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2113 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2114 HCI_CMD_TIMEOUT, status);
2115 break;
2116 case DISCOV_TYPE_LE:
2117 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2118 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2119 HCI_CMD_TIMEOUT, status);
2120 break;
2121 default:
2122 *status = HCI_ERROR_UNSPECIFIED;
2123 return;
2124 }
2125
2126 if (*status)
2127 return;
2128
2129 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2130
2131 /* When service discovery is used and the controller has a
2132 * strict duplicate filter, it is important to remember the
2133 * start and duration of the scan. This is required for
2134 * restarting scanning during the discovery phase.
2135 */
2136 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2137 hdev->discovery.result_filtering) {
2138 hdev->discovery.scan_start = jiffies;
2139 hdev->discovery.scan_duration = timeout;
2140 }
2141
2142 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2143 timeout);
2144}
2145
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002146bool hci_req_stop_discovery(struct hci_request *req)
2147{
2148 struct hci_dev *hdev = req->hdev;
2149 struct discovery_state *d = &hdev->discovery;
2150 struct hci_cp_remote_name_req_cancel cp;
2151 struct inquiry_entry *e;
2152 bool ret = false;
2153
2154 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2155
2156 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2157 if (test_bit(HCI_INQUIRY, &hdev->flags))
2158 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2159
2160 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2161 cancel_delayed_work(&hdev->le_scan_disable);
2162 hci_req_add_le_scan_disable(req);
2163 }
2164
2165 ret = true;
2166 } else {
2167 /* Passive scanning */
2168 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2169 hci_req_add_le_scan_disable(req);
2170 ret = true;
2171 }
2172 }
2173
2174 /* No further actions needed for LE-only discovery */
2175 if (d->type == DISCOV_TYPE_LE)
2176 return ret;
2177
2178 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2179 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2180 NAME_PENDING);
2181 if (!e)
2182 return ret;
2183
2184 bacpy(&cp.bdaddr, &e->data.bdaddr);
2185 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2186 &cp);
2187 ret = true;
2188 }
2189
2190 return ret;
2191}
2192
2193static int stop_discovery(struct hci_request *req, unsigned long opt)
2194{
2195 hci_dev_lock(req->hdev);
2196 hci_req_stop_discovery(req);
2197 hci_dev_unlock(req->hdev);
2198
2199 return 0;
2200}
2201
Johan Hedberge68f0722015-11-11 08:30:30 +02002202static void discov_update(struct work_struct *work)
2203{
2204 struct hci_dev *hdev = container_of(work, struct hci_dev,
2205 discov_update);
2206 u8 status = 0;
2207
2208 switch (hdev->discovery.state) {
2209 case DISCOVERY_STARTING:
2210 start_discovery(hdev, &status);
2211 mgmt_start_discovery_complete(hdev, status);
2212 if (status)
2213 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2214 else
2215 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2216 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002217 case DISCOVERY_STOPPING:
2218 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2219 mgmt_stop_discovery_complete(hdev, status);
2220 if (!status)
2221 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2222 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002223 case DISCOVERY_STOPPED:
2224 default:
2225 return;
2226 }
2227}
2228
Johan Hedbergc366f552015-11-23 15:43:06 +02002229static void discov_off(struct work_struct *work)
2230{
2231 struct hci_dev *hdev = container_of(work, struct hci_dev,
2232 discov_off.work);
2233
2234 BT_DBG("%s", hdev->name);
2235
2236 hci_dev_lock(hdev);
2237
2238 /* When discoverable timeout triggers, then just make sure
2239 * the limited discoverable flag is cleared. Even in the case
2240 * of a timeout triggered from general discoverable, it is
2241 * safe to unconditionally clear the flag.
2242 */
2243 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2244 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2245 hdev->discov_timeout = 0;
2246
2247 hci_dev_unlock(hdev);
2248
2249 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2250 mgmt_new_settings(hdev);
2251}
2252
Johan Hedberg2ff13892015-11-25 16:15:44 +02002253static int powered_update_hci(struct hci_request *req, unsigned long opt)
2254{
2255 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002256 u8 link_sec;
2257
2258 hci_dev_lock(hdev);
2259
2260 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2261 !lmp_host_ssp_capable(hdev)) {
2262 u8 mode = 0x01;
2263
2264 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2265
2266 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2267 u8 support = 0x01;
2268
2269 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2270 sizeof(support), &support);
2271 }
2272 }
2273
2274 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2275 lmp_bredr_capable(hdev)) {
2276 struct hci_cp_write_le_host_supported cp;
2277
2278 cp.le = 0x01;
2279 cp.simul = 0x00;
2280
2281 /* Check first if we already have the right
2282 * host state (host features set)
2283 */
2284 if (cp.le != lmp_host_le_capable(hdev) ||
2285 cp.simul != lmp_host_le_br_capable(hdev))
2286 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2287 sizeof(cp), &cp);
2288 }
2289
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002290 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002291 /* Make sure the controller has a good default for
2292 * advertising data. This also applies to the case
2293 * where BR/EDR was toggled during the AUTO_OFF phase.
2294 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002295 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2296 list_empty(&hdev->adv_instances)) {
2297 __hci_req_update_adv_data(req, 0x00);
2298 __hci_req_update_scan_rsp_data(req, 0x00);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002299
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002300 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2301 __hci_req_enable_advertising(req);
2302 } else if (!list_empty(&hdev->adv_instances)) {
2303 struct adv_info *adv_instance;
2304
Johan Hedberg2ff13892015-11-25 16:15:44 +02002305 adv_instance = list_first_entry(&hdev->adv_instances,
2306 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002307 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002308 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002309 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002310 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002311 }
2312
2313 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2314 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2315 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2316 sizeof(link_sec), &link_sec);
2317
2318 if (lmp_bredr_capable(hdev)) {
2319 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2320 __hci_req_write_fast_connectable(req, true);
2321 else
2322 __hci_req_write_fast_connectable(req, false);
2323 __hci_req_update_scan(req);
2324 __hci_req_update_class(req);
2325 __hci_req_update_name(req);
2326 __hci_req_update_eir(req);
2327 }
2328
2329 hci_dev_unlock(hdev);
2330 return 0;
2331}
2332
2333int __hci_req_hci_power_on(struct hci_dev *hdev)
2334{
2335 /* Register the available SMP channels (BR/EDR and LE) only when
2336 * successfully powering on the controller. This late
2337 * registration is required so that LE SMP can clearly decide if
2338 * the public address or static address is used.
2339 */
2340 smp_register(hdev);
2341
2342 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2343 NULL);
2344}
2345
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002346void hci_request_setup(struct hci_dev *hdev)
2347{
Johan Hedberge68f0722015-11-11 08:30:30 +02002348 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002349 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002350 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002351 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002352 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002353 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002354 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2355 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002356 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002357}
2358
2359void hci_request_cancel_all(struct hci_dev *hdev)
2360{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002361 hci_req_sync_cancel(hdev, ENODEV);
2362
Johan Hedberge68f0722015-11-11 08:30:30 +02002363 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002364 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002365 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002366 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002367 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002368 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002369 cancel_delayed_work_sync(&hdev->le_scan_disable);
2370 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002371
2372 if (hdev->adv_instance_timeout) {
2373 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2374 hdev->adv_instance_timeout = 0;
2375 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002376}