blob: a5b53d3ea50802239bc3f063191b8505f595f00e [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200368 }
369
Alain Michaud10873f92020-06-11 02:01:56 +0000370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
Johan Hedberg196a5e92015-11-22 18:55:44 +0200381/* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
384 *
385 * This function requires the caller holds hdev->lock.
386 */
387static void __hci_update_background_scan(struct hci_request *req)
388{
389 struct hci_dev *hdev = req->hdev;
390
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 return;
398
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 return;
402
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
405 return;
406
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
409 *
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
413 */
414 hci_discovery_filter_clear(hdev);
415
416 if (list_empty(&hdev->pend_le_conns) &&
417 list_empty(&hdev->pend_le_reports)) {
418 /* If there is no pending LE connections or devices
419 * to be scanned for, we should stop the background
420 * scanning.
421 */
422
423 /* If controller is not scanning we are done. */
424 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
425 return;
426
427 hci_req_add_le_scan_disable(req);
428
429 BT_DBG("%s stopping background scanning", hdev->name);
430 } else {
431 /* If there is at least one pending LE connection, we should
432 * keep the background scan running.
433 */
434
435 /* If controller is connecting, we should not start scanning
436 * since some controllers are not able to scan and connect at
437 * the same time.
438 */
439 if (hci_lookup_le_connect(hdev))
440 return;
441
442 /* If controller is currently scanning, we stop it to ensure we
443 * don't miss any advertising (due to duplicates filter).
444 */
445 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
446 hci_req_add_le_scan_disable(req);
447
448 hci_req_add_le_passive_scan(req);
449
450 BT_DBG("%s starting background scanning", hdev->name);
451 }
452}
453
Johan Hedberg00cf5042015-11-25 16:15:41 +0200454void __hci_req_update_name(struct hci_request *req)
455{
456 struct hci_dev *hdev = req->hdev;
457 struct hci_cp_write_local_name cp;
458
459 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
460
461 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
462}
463
Johan Hedbergb1a89172015-11-25 16:15:42 +0200464#define PNP_INFO_SVCLASS_ID 0x1200
465
466static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
467{
468 u8 *ptr = data, *uuids_start = NULL;
469 struct bt_uuid *uuid;
470
471 if (len < 4)
472 return ptr;
473
474 list_for_each_entry(uuid, &hdev->uuids, list) {
475 u16 uuid16;
476
477 if (uuid->size != 16)
478 continue;
479
480 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
481 if (uuid16 < 0x1100)
482 continue;
483
484 if (uuid16 == PNP_INFO_SVCLASS_ID)
485 continue;
486
487 if (!uuids_start) {
488 uuids_start = ptr;
489 uuids_start[0] = 1;
490 uuids_start[1] = EIR_UUID16_ALL;
491 ptr += 2;
492 }
493
494 /* Stop if not enough space to put next UUID */
495 if ((ptr - data) + sizeof(u16) > len) {
496 uuids_start[1] = EIR_UUID16_SOME;
497 break;
498 }
499
500 *ptr++ = (uuid16 & 0x00ff);
501 *ptr++ = (uuid16 & 0xff00) >> 8;
502 uuids_start[0] += sizeof(uuid16);
503 }
504
505 return ptr;
506}
507
508static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
509{
510 u8 *ptr = data, *uuids_start = NULL;
511 struct bt_uuid *uuid;
512
513 if (len < 6)
514 return ptr;
515
516 list_for_each_entry(uuid, &hdev->uuids, list) {
517 if (uuid->size != 32)
518 continue;
519
520 if (!uuids_start) {
521 uuids_start = ptr;
522 uuids_start[0] = 1;
523 uuids_start[1] = EIR_UUID32_ALL;
524 ptr += 2;
525 }
526
527 /* Stop if not enough space to put next UUID */
528 if ((ptr - data) + sizeof(u32) > len) {
529 uuids_start[1] = EIR_UUID32_SOME;
530 break;
531 }
532
533 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
534 ptr += sizeof(u32);
535 uuids_start[0] += sizeof(u32);
536 }
537
538 return ptr;
539}
540
541static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
542{
543 u8 *ptr = data, *uuids_start = NULL;
544 struct bt_uuid *uuid;
545
546 if (len < 18)
547 return ptr;
548
549 list_for_each_entry(uuid, &hdev->uuids, list) {
550 if (uuid->size != 128)
551 continue;
552
553 if (!uuids_start) {
554 uuids_start = ptr;
555 uuids_start[0] = 1;
556 uuids_start[1] = EIR_UUID128_ALL;
557 ptr += 2;
558 }
559
560 /* Stop if not enough space to put next UUID */
561 if ((ptr - data) + 16 > len) {
562 uuids_start[1] = EIR_UUID128_SOME;
563 break;
564 }
565
566 memcpy(ptr, uuid->uuid, 16);
567 ptr += 16;
568 uuids_start[0] += 16;
569 }
570
571 return ptr;
572}
573
574static void create_eir(struct hci_dev *hdev, u8 *data)
575{
576 u8 *ptr = data;
577 size_t name_len;
578
579 name_len = strlen(hdev->dev_name);
580
581 if (name_len > 0) {
582 /* EIR Data type */
583 if (name_len > 48) {
584 name_len = 48;
585 ptr[1] = EIR_NAME_SHORT;
586 } else
587 ptr[1] = EIR_NAME_COMPLETE;
588
589 /* EIR Data length */
590 ptr[0] = name_len + 1;
591
592 memcpy(ptr + 2, hdev->dev_name, name_len);
593
594 ptr += (name_len + 2);
595 }
596
597 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
598 ptr[0] = 2;
599 ptr[1] = EIR_TX_POWER;
600 ptr[2] = (u8) hdev->inq_tx_power;
601
602 ptr += 3;
603 }
604
605 if (hdev->devid_source > 0) {
606 ptr[0] = 9;
607 ptr[1] = EIR_DEVICE_ID;
608
609 put_unaligned_le16(hdev->devid_source, ptr + 2);
610 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
611 put_unaligned_le16(hdev->devid_product, ptr + 6);
612 put_unaligned_le16(hdev->devid_version, ptr + 8);
613
614 ptr += 10;
615 }
616
617 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
618 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
619 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620}
621
622void __hci_req_update_eir(struct hci_request *req)
623{
624 struct hci_dev *hdev = req->hdev;
625 struct hci_cp_write_eir cp;
626
627 if (!hdev_is_powered(hdev))
628 return;
629
630 if (!lmp_ext_inq_capable(hdev))
631 return;
632
633 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
634 return;
635
636 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
637 return;
638
639 memset(&cp, 0, sizeof(cp));
640
641 create_eir(hdev, cp.data);
642
643 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
644 return;
645
646 memcpy(hdev->eir, cp.data, sizeof(cp.data));
647
648 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
649}
650
Johan Hedberg0857dd32014-12-19 13:40:20 +0200651void hci_req_add_le_scan_disable(struct hci_request *req)
652{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530653 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200654
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700655 if (hdev->scanning_paused) {
656 bt_dev_dbg(hdev, "Scanning is paused for suspend");
657 return;
658 }
659
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530660 if (use_ext_scan(hdev)) {
661 struct hci_cp_le_set_ext_scan_enable cp;
662
663 memset(&cp, 0, sizeof(cp));
664 cp.enable = LE_SCAN_DISABLE;
665 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
666 &cp);
667 } else {
668 struct hci_cp_le_set_scan_enable cp;
669
670 memset(&cp, 0, sizeof(cp));
671 cp.enable = LE_SCAN_DISABLE;
672 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
673 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200674}
675
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700676static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
677 u8 bdaddr_type)
678{
679 struct hci_cp_le_del_from_white_list cp;
680
681 cp.bdaddr_type = bdaddr_type;
682 bacpy(&cp.bdaddr, bdaddr);
683
684 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
685 cp.bdaddr_type);
686 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
687}
688
689/* Adds connection to white list if needed. On error, returns -1. */
690static int add_to_white_list(struct hci_request *req,
691 struct hci_conn_params *params, u8 *num_entries,
692 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200693{
694 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700695 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200696
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700697 /* Already in white list */
698 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
699 params->addr_type))
700 return 0;
701
702 /* Select filter policy to accept all advertising */
703 if (*num_entries >= hdev->le_white_list_size)
704 return -1;
705
706 /* White list can not be used with RPAs */
707 if (!allow_rpa &&
708 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
709 return -1;
710 }
711
712 /* During suspend, only wakeable devices can be in whitelist */
713 if (hdev->suspended && !params->wakeable)
714 return 0;
715
716 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200717 cp.bdaddr_type = params->addr_type;
718 bacpy(&cp.bdaddr, &params->addr);
719
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700720 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
721 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200722 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700723
724 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200725}
726
727static u8 update_white_list(struct hci_request *req)
728{
729 struct hci_dev *hdev = req->hdev;
730 struct hci_conn_params *params;
731 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700732 u8 num_entries = 0;
733 bool pend_conn, pend_report;
734 /* We allow whitelisting even with RPAs in suspend. In the worst case,
735 * we won't be able to wake from devices that use the privacy1.2
736 * features. Additionally, once we support privacy1.2 and IRK
737 * offloading, we can update this to also check for those conditions.
738 */
739 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200740
741 /* Go through the current white list programmed into the
742 * controller one by one and check if that address is still
743 * in the list of pending connections or list of devices to
744 * report. If not present in either list, then queue the
745 * command to remove it from the controller.
746 */
747 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700748 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
749 &b->bdaddr,
750 b->bdaddr_type);
751 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
752 &b->bdaddr,
753 b->bdaddr_type);
754
755 /* If the device is not likely to connect or report,
756 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500757 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700758 if (!pend_conn && !pend_report) {
759 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200760 continue;
761 }
762
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700763 /* White list can not be used with RPAs */
764 if (!allow_rpa &&
765 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500766 return 0x00;
767 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200768
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700769 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200770 }
771
772 /* Since all no longer valid white list entries have been
773 * removed, walk through the list of pending connections
774 * and ensure that any new device gets programmed into
775 * the controller.
776 *
777 * If the list of the devices is larger than the list of
778 * available white list entries in the controller, then
779 * just abort and return filer policy value to not use the
780 * white list.
781 */
782 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700783 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200784 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200785 }
786
787 /* After adding all new pending connections, walk through
788 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700789 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200790 */
791 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700792 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200793 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200794 }
795
796 /* Select filter policy to use white list */
797 return 0x01;
798}
799
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200800static bool scan_use_rpa(struct hci_dev *hdev)
801{
802 return hci_dev_test_flag(hdev, HCI_PRIVACY);
803}
804
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530805static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
806 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200807{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530808 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530809
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530810 /* Use ext scanning if set ext scan param and ext scan enable is
811 * supported
812 */
813 if (use_ext_scan(hdev)) {
814 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
815 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
816 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530817 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
818 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530819
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530820 ext_param_cp = (void *)data;
821 phy_params = (void *)ext_param_cp->data;
822
823 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
824 ext_param_cp->own_addr_type = own_addr_type;
825 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530826
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530827 plen = sizeof(*ext_param_cp);
828
829 if (scan_1m(hdev) || scan_2m(hdev)) {
830 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
831
832 memset(phy_params, 0, sizeof(*phy_params));
833 phy_params->type = type;
834 phy_params->interval = cpu_to_le16(interval);
835 phy_params->window = cpu_to_le16(window);
836
837 plen += sizeof(*phy_params);
838 phy_params++;
839 }
840
841 if (scan_coded(hdev)) {
842 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
843
844 memset(phy_params, 0, sizeof(*phy_params));
845 phy_params->type = type;
846 phy_params->interval = cpu_to_le16(interval);
847 phy_params->window = cpu_to_le16(window);
848
849 plen += sizeof(*phy_params);
850 phy_params++;
851 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530852
853 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530854 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530855
856 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
857 ext_enable_cp.enable = LE_SCAN_ENABLE;
858 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859
860 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
861 sizeof(ext_enable_cp), &ext_enable_cp);
862 } else {
863 struct hci_cp_le_set_scan_param param_cp;
864 struct hci_cp_le_set_scan_enable enable_cp;
865
866 memset(&param_cp, 0, sizeof(param_cp));
867 param_cp.type = type;
868 param_cp.interval = cpu_to_le16(interval);
869 param_cp.window = cpu_to_le16(window);
870 param_cp.own_address_type = own_addr_type;
871 param_cp.filter_policy = filter_policy;
872 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
873 &param_cp);
874
875 memset(&enable_cp, 0, sizeof(enable_cp));
876 enable_cp.enable = LE_SCAN_ENABLE;
877 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
878 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
879 &enable_cp);
880 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530881}
882
883void hci_req_add_le_passive_scan(struct hci_request *req)
884{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200885 struct hci_dev *hdev = req->hdev;
886 u8 own_addr_type;
887 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700888 u16 window, interval;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700889
890 if (hdev->scanning_paused) {
891 bt_dev_dbg(hdev, "Scanning is paused for suspend");
892 return;
893 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200894
895 /* Set require_privacy to false since no SCAN_REQ are send
896 * during passive scanning. Not using an non-resolvable address
897 * here is important so that peer devices using direct
898 * advertising with our address will be correctly reported
899 * by the controller.
900 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200901 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
902 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200903 return;
904
905 /* Adding or removing entries from the white list must
906 * happen before enabling scanning. The controller does
907 * not allow white list modification while scanning.
908 */
909 filter_policy = update_white_list(req);
910
911 /* When the controller is using random resolvable addresses and
912 * with that having LE privacy enabled, then controllers with
913 * Extended Scanner Filter Policies support can now enable support
914 * for handling directed advertising.
915 *
916 * So instead of using filter polices 0x00 (no whitelist)
917 * and 0x01 (whitelist enabled) use the new filter policies
918 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
919 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700920 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200921 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
922 filter_policy |= 0x02;
923
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700924 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000925 window = hdev->le_scan_window_suspend;
926 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700927 } else {
928 window = hdev->le_scan_window;
929 interval = hdev->le_scan_interval;
930 }
931
932 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
933 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
934 own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200935}
936
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530937static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
938{
939 struct adv_info *adv_instance;
940
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +0300941 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530942 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +0300943 return 1;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530944
945 adv_instance = hci_find_adv_instance(hdev, instance);
946 if (!adv_instance)
947 return 0;
948
949 /* TODO: Take into account the "appearance" and "local-name" flags here.
950 * These are currently being ignored as they are not supported.
951 */
952 return adv_instance->scan_rsp_len;
953}
954
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700955static void hci_req_clear_event_filter(struct hci_request *req)
956{
957 struct hci_cp_set_event_filter f;
958
959 memset(&f, 0, sizeof(f));
960 f.flt_type = HCI_FLT_CLEAR_ALL;
961 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
962
963 /* Update page scan state (since we may have modified it when setting
964 * the event filter).
965 */
966 __hci_req_update_scan(req);
967}
968
969static void hci_req_set_event_filter(struct hci_request *req)
970{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200971 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700972 struct hci_cp_set_event_filter f;
973 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200974 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700975
976 /* Always clear event filter when starting */
977 hci_req_clear_event_filter(req);
978
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200979 list_for_each_entry(b, &hdev->whitelist, list) {
980 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
981 b->current_flags))
982 continue;
983
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700984 memset(&f, 0, sizeof(f));
985 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
986 f.flt_type = HCI_FLT_CONN_SETUP;
987 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
988 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
989
990 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
991 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200992 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700993 }
994
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700995 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
996}
997
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700998static void hci_req_config_le_suspend_scan(struct hci_request *req)
999{
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001000 /* Before changing params disable scan if enabled */
1001 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1002 hci_req_add_le_scan_disable(req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001003
1004 /* Configure params and enable scanning */
1005 hci_req_add_le_passive_scan(req);
1006
1007 /* Block suspend notifier on response */
1008 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1009}
1010
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001011static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1012{
1013 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1014 status);
1015 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1016 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1017 wake_up(&hdev->suspend_wait_q);
1018 }
1019}
1020
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001021/* Call with hci_dev_lock */
1022void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1023{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001024 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001025 struct hci_conn *conn;
1026 struct hci_request req;
1027 u8 page_scan;
1028 int disconnect_counter;
1029
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001030 if (next == hdev->suspend_state) {
1031 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1032 goto done;
1033 }
1034
1035 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001036 hci_req_init(&req, hdev);
1037
1038 if (next == BT_SUSPEND_DISCONNECT) {
1039 /* Mark device as suspended */
1040 hdev->suspended = true;
1041
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001042 /* Pause discovery if not already stopped */
1043 old_state = hdev->discovery.state;
1044 if (old_state != DISCOVERY_STOPPED) {
1045 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1046 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1047 queue_work(hdev->req_workqueue, &hdev->discov_update);
1048 }
1049
1050 hdev->discovery_paused = true;
1051 hdev->discovery_old_state = old_state;
1052
1053 /* Stop advertising */
1054 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1055 if (old_state) {
1056 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1057 cancel_delayed_work(&hdev->discov_off);
1058 queue_delayed_work(hdev->req_workqueue,
1059 &hdev->discov_off, 0);
1060 }
1061
1062 hdev->advertising_paused = true;
1063 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001064 /* Disable page scan */
1065 page_scan = SCAN_DISABLED;
1066 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1067
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001068 /* Disable LE passive scan if enabled */
1069 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1070 hci_req_add_le_scan_disable(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001071
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001072 /* Mark task needing completion */
1073 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1074
1075 /* Prevent disconnects from causing scanning to be re-enabled */
1076 hdev->scanning_paused = true;
1077
1078 /* Run commands before disconnecting */
1079 hci_req_run(&req, suspend_req_complete);
1080
1081 disconnect_counter = 0;
1082 /* Soft disconnect everything (power off) */
1083 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1084 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1085 disconnect_counter++;
1086 }
1087
1088 if (disconnect_counter > 0) {
1089 bt_dev_dbg(hdev,
1090 "Had %d disconnects. Will wait on them",
1091 disconnect_counter);
1092 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1093 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001094 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001095 /* Unpause to take care of updating scanning params */
1096 hdev->scanning_paused = false;
1097 /* Enable event filter for paired devices */
1098 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001099 /* Enable passive scan at lower duty cycle */
1100 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001101 /* Pause scan changes again. */
1102 hdev->scanning_paused = true;
1103 hci_req_run(&req, suspend_req_complete);
1104 } else {
1105 hdev->suspended = false;
1106 hdev->scanning_paused = false;
1107
1108 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001109 /* Reset passive/background scanning to normal */
1110 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001111
1112 /* Unpause advertising */
1113 hdev->advertising_paused = false;
1114 if (hdev->advertising_old_state) {
1115 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1116 hdev->suspend_tasks);
1117 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1118 queue_work(hdev->req_workqueue,
1119 &hdev->discoverable_update);
1120 hdev->advertising_old_state = 0;
1121 }
1122
1123 /* Unpause discovery */
1124 hdev->discovery_paused = false;
1125 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1126 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1127 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1128 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1129 queue_work(hdev->req_workqueue, &hdev->discov_update);
1130 }
1131
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001132 hci_req_run(&req, suspend_req_complete);
1133 }
1134
1135 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001136
1137done:
1138 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1139 wake_up(&hdev->suspend_wait_q);
1140}
1141
Johan Hedbergf2252572015-11-18 12:49:20 +02001142static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1143{
Johan Hedbergcab054a2015-11-30 11:21:45 +02001144 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001145 struct adv_info *adv_instance;
1146
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001147 /* Instance 0x00 always set local name */
Johan Hedbergf2252572015-11-18 12:49:20 +02001148 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001149 return 1;
Johan Hedbergf2252572015-11-18 12:49:20 +02001150
1151 adv_instance = hci_find_adv_instance(hdev, instance);
1152 if (!adv_instance)
1153 return 0;
1154
1155 /* TODO: Take into account the "appearance" and "local-name" flags here.
1156 * These are currently being ignored as they are not supported.
1157 */
1158 return adv_instance->scan_rsp_len;
1159}
1160
1161void __hci_req_disable_advertising(struct hci_request *req)
1162{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301163 if (ext_adv_capable(req->hdev)) {
1164 struct hci_cp_le_set_ext_adv_enable cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001165
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301166 cp.enable = 0x00;
1167 /* Disable all sets since we only support one set at the moment */
1168 cp.num_of_sets = 0x00;
1169
1170 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
1171 } else {
1172 u8 enable = 0x00;
1173
1174 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1175 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001176}
1177
1178static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1179{
1180 u32 flags;
1181 struct adv_info *adv_instance;
1182
1183 if (instance == 0x00) {
1184 /* Instance 0 always manages the "Tx Power" and "Flags"
1185 * fields
1186 */
1187 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1188
1189 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1190 * corresponds to the "connectable" instance flag.
1191 */
1192 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1193 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1194
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001195 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1196 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1197 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001198 flags |= MGMT_ADV_FLAG_DISCOV;
1199
Johan Hedbergf2252572015-11-18 12:49:20 +02001200 return flags;
1201 }
1202
1203 adv_instance = hci_find_adv_instance(hdev, instance);
1204
1205 /* Return 0 when we got an invalid instance identifier. */
1206 if (!adv_instance)
1207 return 0;
1208
1209 return adv_instance->flags;
1210}
1211
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001212static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1213{
1214 /* If privacy is not enabled don't use RPA */
1215 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1216 return false;
1217
1218 /* If basic privacy mode is enabled use RPA */
1219 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1220 return true;
1221
1222 /* If limited privacy mode is enabled don't use RPA if we're
1223 * both discoverable and bondable.
1224 */
1225 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1226 hci_dev_test_flag(hdev, HCI_BONDABLE))
1227 return false;
1228
1229 /* We're neither bondable nor discoverable in the limited
1230 * privacy mode, therefore use RPA.
1231 */
1232 return true;
1233}
1234
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001235static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1236{
1237 /* If there is no connection we are OK to advertise. */
1238 if (hci_conn_num(hdev, LE_LINK) == 0)
1239 return true;
1240
1241 /* Check le_states if there is any connection in slave role. */
1242 if (hdev->conn_hash.le_num_slave > 0) {
1243 /* Slave connection state and non connectable mode bit 20. */
1244 if (!connectable && !(hdev->le_states[2] & 0x10))
1245 return false;
1246
1247 /* Slave connection state and connectable mode bit 38
1248 * and scannable bit 21.
1249 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001250 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1251 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001252 return false;
1253 }
1254
1255 /* Check le_states if there is any connection in master role. */
1256 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1257 /* Master connection state and non connectable mode bit 18. */
1258 if (!connectable && !(hdev->le_states[2] & 0x02))
1259 return false;
1260
1261 /* Master connection state and connectable mode bit 35 and
1262 * scannable 19.
1263 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001264 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001265 !(hdev->le_states[2] & 0x08)))
1266 return false;
1267 }
1268
1269 return true;
1270}
1271
Johan Hedbergf2252572015-11-18 12:49:20 +02001272void __hci_req_enable_advertising(struct hci_request *req)
1273{
1274 struct hci_dev *hdev = req->hdev;
1275 struct hci_cp_le_set_adv_param cp;
1276 u8 own_addr_type, enable = 0x01;
1277 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301278 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001279 u32 flags;
1280
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001281 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1282
1283 /* If the "connectable" instance flag was not set, then choose between
1284 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1285 */
1286 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1287 mgmt_get_connectable(hdev);
1288
1289 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001290 return;
1291
1292 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1293 __hci_req_disable_advertising(req);
1294
1295 /* Clear the HCI_LE_ADV bit temporarily so that the
1296 * hci_update_random_address knows that it's safe to go ahead
1297 * and write a new random address. The flag will be set back on
1298 * as soon as the SET_ADV_ENABLE HCI command completes.
1299 */
1300 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1301
Johan Hedbergf2252572015-11-18 12:49:20 +02001302 /* Set require_privacy to true only when non-connectable
1303 * advertising is used. In that case it is fine to use a
1304 * non-resolvable private address.
1305 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001306 if (hci_update_random_address(req, !connectable,
1307 adv_use_rpa(hdev, flags),
1308 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001309 return;
1310
1311 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001312
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301313 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001314 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001315
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301316 adv_min_interval = hdev->le_adv_min_interval;
1317 adv_max_interval = hdev->le_adv_max_interval;
1318 } else {
1319 if (get_cur_adv_instance_scan_rsp_len(hdev))
1320 cp.type = LE_ADV_SCAN_IND;
1321 else
1322 cp.type = LE_ADV_NONCONN_IND;
1323
1324 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1325 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1326 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1327 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1328 } else {
1329 adv_min_interval = hdev->le_adv_min_interval;
1330 adv_max_interval = hdev->le_adv_max_interval;
1331 }
1332 }
1333
1334 cp.min_interval = cpu_to_le16(adv_min_interval);
1335 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001336 cp.own_address_type = own_addr_type;
1337 cp.channel_map = hdev->le_adv_channel_map;
1338
1339 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1340
1341 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1342}
1343
Michał Narajowskif61851f2016-10-19 10:20:27 +02001344u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001345{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001346 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001347 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001348
Michał Narajowskif61851f2016-10-19 10:20:27 +02001349 /* no space left for name (+ NULL + type + len) */
1350 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1351 return ad_len;
1352
1353 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001354 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001355 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001356 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001357 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001358
Michał Narajowskif61851f2016-10-19 10:20:27 +02001359 /* use short name if present */
1360 short_len = strlen(hdev->short_name);
1361 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001362 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001363 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001364
Michał Narajowskif61851f2016-10-19 10:20:27 +02001365 /* use shortened full name if present, we already know that name
1366 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1367 */
1368 if (complete_len) {
1369 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1370
1371 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1372 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1373
1374 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1375 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001376 }
1377
1378 return ad_len;
1379}
1380
Michał Narajowski1b422062016-10-05 12:28:27 +02001381static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1382{
1383 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1384}
1385
Michał Narajowski7c295c42016-09-18 12:50:02 +02001386static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1387{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001388 u8 scan_rsp_len = 0;
1389
1390 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001391 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001392 }
1393
Michał Narajowski1b422062016-10-05 12:28:27 +02001394 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001395}
1396
Johan Hedbergf2252572015-11-18 12:49:20 +02001397static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1398 u8 *ptr)
1399{
1400 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001401 u32 instance_flags;
1402 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001403
1404 adv_instance = hci_find_adv_instance(hdev, instance);
1405 if (!adv_instance)
1406 return 0;
1407
Michał Narajowski7c295c42016-09-18 12:50:02 +02001408 instance_flags = adv_instance->flags;
1409
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001410 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001411 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001412 }
1413
Michał Narajowski1b422062016-10-05 12:28:27 +02001414 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001415 adv_instance->scan_rsp_len);
1416
Michał Narajowski7c295c42016-09-18 12:50:02 +02001417 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001418
1419 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1420 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1421
1422 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001423}
1424
Johan Hedbergcab054a2015-11-30 11:21:45 +02001425void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001426{
1427 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001428 u8 len;
1429
1430 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1431 return;
1432
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301433 if (ext_adv_capable(hdev)) {
1434 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001435
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301436 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001437
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301438 if (instance)
1439 len = create_instance_scan_rsp_data(hdev, instance,
1440 cp.data);
1441 else
1442 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001443
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301444 if (hdev->scan_rsp_data_len == len &&
1445 !memcmp(cp.data, hdev->scan_rsp_data, len))
1446 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001447
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301448 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1449 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001450
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001451 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301452 cp.length = len;
1453 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1454 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1455
1456 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1457 &cp);
1458 } else {
1459 struct hci_cp_le_set_scan_rsp_data cp;
1460
1461 memset(&cp, 0, sizeof(cp));
1462
1463 if (instance)
1464 len = create_instance_scan_rsp_data(hdev, instance,
1465 cp.data);
1466 else
1467 len = create_default_scan_rsp_data(hdev, cp.data);
1468
1469 if (hdev->scan_rsp_data_len == len &&
1470 !memcmp(cp.data, hdev->scan_rsp_data, len))
1471 return;
1472
1473 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1474 hdev->scan_rsp_data_len = len;
1475
1476 cp.length = len;
1477
1478 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1479 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001480}
1481
Johan Hedbergf2252572015-11-18 12:49:20 +02001482static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1483{
1484 struct adv_info *adv_instance = NULL;
1485 u8 ad_len = 0, flags = 0;
1486 u32 instance_flags;
1487
1488 /* Return 0 when the current instance identifier is invalid. */
1489 if (instance) {
1490 adv_instance = hci_find_adv_instance(hdev, instance);
1491 if (!adv_instance)
1492 return 0;
1493 }
1494
1495 instance_flags = get_adv_instance_flags(hdev, instance);
1496
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001497 /* If instance already has the flags set skip adding it once
1498 * again.
1499 */
1500 if (adv_instance && eir_get_data(adv_instance->adv_data,
1501 adv_instance->adv_data_len, EIR_FLAGS,
1502 NULL))
1503 goto skip_flags;
1504
Johan Hedbergf2252572015-11-18 12:49:20 +02001505 /* The Add Advertising command allows userspace to set both the general
1506 * and limited discoverable flags.
1507 */
1508 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1509 flags |= LE_AD_GENERAL;
1510
1511 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1512 flags |= LE_AD_LIMITED;
1513
Johan Hedbergf18ba582016-04-06 13:09:05 +03001514 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1515 flags |= LE_AD_NO_BREDR;
1516
Johan Hedbergf2252572015-11-18 12:49:20 +02001517 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1518 /* If a discovery flag wasn't provided, simply use the global
1519 * settings.
1520 */
1521 if (!flags)
1522 flags |= mgmt_get_adv_discov_flags(hdev);
1523
Johan Hedbergf2252572015-11-18 12:49:20 +02001524 /* If flags would still be empty, then there is no need to
1525 * include the "Flags" AD field".
1526 */
1527 if (flags) {
1528 ptr[0] = 0x02;
1529 ptr[1] = EIR_FLAGS;
1530 ptr[2] = flags;
1531
1532 ad_len += 3;
1533 ptr += 3;
1534 }
1535 }
1536
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001537skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001538 if (adv_instance) {
1539 memcpy(ptr, adv_instance->adv_data,
1540 adv_instance->adv_data_len);
1541 ad_len += adv_instance->adv_data_len;
1542 ptr += adv_instance->adv_data_len;
1543 }
1544
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301545 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1546 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001547
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301548 if (ext_adv_capable(hdev)) {
1549 if (adv_instance)
1550 adv_tx_power = adv_instance->tx_power;
1551 else
1552 adv_tx_power = hdev->adv_tx_power;
1553 } else {
1554 adv_tx_power = hdev->adv_tx_power;
1555 }
1556
1557 /* Provide Tx Power only if we can provide a valid value for it */
1558 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1559 ptr[0] = 0x02;
1560 ptr[1] = EIR_TX_POWER;
1561 ptr[2] = (u8)adv_tx_power;
1562
1563 ad_len += 3;
1564 ptr += 3;
1565 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001566 }
1567
1568 return ad_len;
1569}
1570
Johan Hedbergcab054a2015-11-30 11:21:45 +02001571void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001572{
1573 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001574 u8 len;
1575
1576 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1577 return;
1578
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301579 if (ext_adv_capable(hdev)) {
1580 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001581
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301582 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001583
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301584 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001585
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301586 /* There's nothing to do if the data hasn't changed */
1587 if (hdev->adv_data_len == len &&
1588 memcmp(cp.data, hdev->adv_data, len) == 0)
1589 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001590
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301591 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1592 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001593
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301594 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001595 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301596 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1597 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1598
1599 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1600 } else {
1601 struct hci_cp_le_set_adv_data cp;
1602
1603 memset(&cp, 0, sizeof(cp));
1604
1605 len = create_instance_adv_data(hdev, instance, cp.data);
1606
1607 /* There's nothing to do if the data hasn't changed */
1608 if (hdev->adv_data_len == len &&
1609 memcmp(cp.data, hdev->adv_data, len) == 0)
1610 return;
1611
1612 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1613 hdev->adv_data_len = len;
1614
1615 cp.length = len;
1616
1617 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1618 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001619}
1620
Johan Hedbergcab054a2015-11-30 11:21:45 +02001621int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001622{
1623 struct hci_request req;
1624
1625 hci_req_init(&req, hdev);
1626 __hci_req_update_adv_data(&req, instance);
1627
1628 return hci_req_run(&req, NULL);
1629}
1630
1631static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1632{
1633 BT_DBG("%s status %u", hdev->name, status);
1634}
1635
1636void hci_req_reenable_advertising(struct hci_dev *hdev)
1637{
1638 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001639
1640 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001641 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001642 return;
1643
Johan Hedbergf2252572015-11-18 12:49:20 +02001644 hci_req_init(&req, hdev);
1645
Johan Hedbergcab054a2015-11-30 11:21:45 +02001646 if (hdev->cur_adv_instance) {
1647 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1648 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001649 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301650 if (ext_adv_capable(hdev)) {
1651 __hci_req_start_ext_adv(&req, 0x00);
1652 } else {
1653 __hci_req_update_adv_data(&req, 0x00);
1654 __hci_req_update_scan_rsp_data(&req, 0x00);
1655 __hci_req_enable_advertising(&req);
1656 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001657 }
1658
1659 hci_req_run(&req, adv_enable_complete);
1660}
1661
1662static void adv_timeout_expire(struct work_struct *work)
1663{
1664 struct hci_dev *hdev = container_of(work, struct hci_dev,
1665 adv_instance_expire.work);
1666
1667 struct hci_request req;
1668 u8 instance;
1669
1670 BT_DBG("%s", hdev->name);
1671
1672 hci_dev_lock(hdev);
1673
1674 hdev->adv_instance_timeout = 0;
1675
Johan Hedbergcab054a2015-11-30 11:21:45 +02001676 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001677 if (instance == 0x00)
1678 goto unlock;
1679
1680 hci_req_init(&req, hdev);
1681
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001682 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001683
1684 if (list_empty(&hdev->adv_instances))
1685 __hci_req_disable_advertising(&req);
1686
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001687 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001688
1689unlock:
1690 hci_dev_unlock(hdev);
1691}
1692
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301693int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1694 bool use_rpa, struct adv_info *adv_instance,
1695 u8 *own_addr_type, bdaddr_t *rand_addr)
1696{
1697 int err;
1698
1699 bacpy(rand_addr, BDADDR_ANY);
1700
1701 /* If privacy is enabled use a resolvable private address. If
1702 * current RPA has expired then generate a new one.
1703 */
1704 if (use_rpa) {
1705 int to;
1706
1707 *own_addr_type = ADDR_LE_DEV_RANDOM;
1708
1709 if (adv_instance) {
1710 if (!adv_instance->rpa_expired &&
1711 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1712 return 0;
1713
1714 adv_instance->rpa_expired = false;
1715 } else {
1716 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1717 !bacmp(&hdev->random_addr, &hdev->rpa))
1718 return 0;
1719 }
1720
1721 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1722 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001723 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301724 return err;
1725 }
1726
1727 bacpy(rand_addr, &hdev->rpa);
1728
1729 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1730 if (adv_instance)
1731 queue_delayed_work(hdev->workqueue,
1732 &adv_instance->rpa_expired_cb, to);
1733 else
1734 queue_delayed_work(hdev->workqueue,
1735 &hdev->rpa_expired, to);
1736
1737 return 0;
1738 }
1739
1740 /* In case of required privacy without resolvable private address,
1741 * use an non-resolvable private address. This is useful for
1742 * non-connectable advertising.
1743 */
1744 if (require_privacy) {
1745 bdaddr_t nrpa;
1746
1747 while (true) {
1748 /* The non-resolvable private address is generated
1749 * from random six bytes with the two most significant
1750 * bits cleared.
1751 */
1752 get_random_bytes(&nrpa, 6);
1753 nrpa.b[5] &= 0x3f;
1754
1755 /* The non-resolvable private address shall not be
1756 * equal to the public address.
1757 */
1758 if (bacmp(&hdev->bdaddr, &nrpa))
1759 break;
1760 }
1761
1762 *own_addr_type = ADDR_LE_DEV_RANDOM;
1763 bacpy(rand_addr, &nrpa);
1764
1765 return 0;
1766 }
1767
1768 /* No privacy so use a public address. */
1769 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1770
1771 return 0;
1772}
1773
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301774void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1775{
1776 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1777}
1778
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301779int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301780{
1781 struct hci_cp_le_set_ext_adv_params cp;
1782 struct hci_dev *hdev = req->hdev;
1783 bool connectable;
1784 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301785 bdaddr_t random_addr;
1786 u8 own_addr_type;
1787 int err;
1788 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301789 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301790 /* In ext adv set param interval is 3 octets */
1791 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1792
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301793 if (instance > 0) {
1794 adv_instance = hci_find_adv_instance(hdev, instance);
1795 if (!adv_instance)
1796 return -EINVAL;
1797 } else {
1798 adv_instance = NULL;
1799 }
1800
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301801 flags = get_adv_instance_flags(hdev, instance);
1802
1803 /* If the "connectable" instance flag was not set, then choose between
1804 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1805 */
1806 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1807 mgmt_get_connectable(hdev);
1808
Colin Ian King75edd1f2018-11-09 13:27:36 +00001809 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301810 return -EPERM;
1811
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301812 /* Set require_privacy to true only when non-connectable
1813 * advertising is used. In that case it is fine to use a
1814 * non-resolvable private address.
1815 */
1816 err = hci_get_random_address(hdev, !connectable,
1817 adv_use_rpa(hdev, flags), adv_instance,
1818 &own_addr_type, &random_addr);
1819 if (err < 0)
1820 return err;
1821
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301822 memset(&cp, 0, sizeof(cp));
1823
1824 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1825 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1826
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301827 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1828
1829 if (connectable) {
1830 if (secondary_adv)
1831 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1832 else
1833 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1834 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1835 if (secondary_adv)
1836 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1837 else
1838 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1839 } else {
1840 if (secondary_adv)
1841 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1842 else
1843 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1844 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301845
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301846 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301847 cp.channel_map = hdev->le_adv_channel_map;
1848 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001849 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301850
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301851 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1852 cp.primary_phy = HCI_ADV_PHY_1M;
1853 cp.secondary_phy = HCI_ADV_PHY_2M;
1854 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1855 cp.primary_phy = HCI_ADV_PHY_CODED;
1856 cp.secondary_phy = HCI_ADV_PHY_CODED;
1857 } else {
1858 /* In all other cases use 1M */
1859 cp.primary_phy = HCI_ADV_PHY_1M;
1860 cp.secondary_phy = HCI_ADV_PHY_1M;
1861 }
1862
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301863 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1864
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301865 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1866 bacmp(&random_addr, BDADDR_ANY)) {
1867 struct hci_cp_le_set_adv_set_rand_addr cp;
1868
1869 /* Check if random address need to be updated */
1870 if (adv_instance) {
1871 if (!bacmp(&random_addr, &adv_instance->random_addr))
1872 return 0;
1873 } else {
1874 if (!bacmp(&random_addr, &hdev->random_addr))
1875 return 0;
1876 }
1877
1878 memset(&cp, 0, sizeof(cp));
1879
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001880 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301881 bacpy(&cp.bdaddr, &random_addr);
1882
1883 hci_req_add(req,
1884 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1885 sizeof(cp), &cp);
1886 }
1887
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301888 return 0;
1889}
1890
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001891int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301892{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001893 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301894 struct hci_cp_le_set_ext_adv_enable *cp;
1895 struct hci_cp_ext_adv_set *adv_set;
1896 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001897 struct adv_info *adv_instance;
1898
1899 if (instance > 0) {
1900 adv_instance = hci_find_adv_instance(hdev, instance);
1901 if (!adv_instance)
1902 return -EINVAL;
1903 } else {
1904 adv_instance = NULL;
1905 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301906
1907 cp = (void *) data;
1908 adv_set = (void *) cp->data;
1909
1910 memset(cp, 0, sizeof(*cp));
1911
1912 cp->enable = 0x01;
1913 cp->num_of_sets = 0x01;
1914
1915 memset(adv_set, 0, sizeof(*adv_set));
1916
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001917 adv_set->handle = instance;
1918
1919 /* Set duration per instance since controller is responsible for
1920 * scheduling it.
1921 */
1922 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001923 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001924
1925 /* Time = N * 10 ms */
1926 adv_set->duration = cpu_to_le16(duration / 10);
1927 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301928
1929 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1930 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1931 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001932
1933 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301934}
1935
1936int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1937{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301938 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301939 int err;
1940
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301941 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1942 __hci_req_disable_advertising(req);
1943
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301944 err = __hci_req_setup_ext_adv_instance(req, instance);
1945 if (err < 0)
1946 return err;
1947
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301948 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001949 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301950
1951 return 0;
1952}
1953
Johan Hedbergf2252572015-11-18 12:49:20 +02001954int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1955 bool force)
1956{
1957 struct hci_dev *hdev = req->hdev;
1958 struct adv_info *adv_instance = NULL;
1959 u16 timeout;
1960
1961 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001962 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001963 return -EPERM;
1964
1965 if (hdev->adv_instance_timeout)
1966 return -EBUSY;
1967
1968 adv_instance = hci_find_adv_instance(hdev, instance);
1969 if (!adv_instance)
1970 return -ENOENT;
1971
1972 /* A zero timeout means unlimited advertising. As long as there is
1973 * only one instance, duration should be ignored. We still set a timeout
1974 * in case further instances are being added later on.
1975 *
1976 * If the remaining lifetime of the instance is more than the duration
1977 * then the timeout corresponds to the duration, otherwise it will be
1978 * reduced to the remaining instance lifetime.
1979 */
1980 if (adv_instance->timeout == 0 ||
1981 adv_instance->duration <= adv_instance->remaining_time)
1982 timeout = adv_instance->duration;
1983 else
1984 timeout = adv_instance->remaining_time;
1985
1986 /* The remaining time is being reduced unless the instance is being
1987 * advertised without time limit.
1988 */
1989 if (adv_instance->timeout)
1990 adv_instance->remaining_time =
1991 adv_instance->remaining_time - timeout;
1992
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001993 /* Only use work for scheduling instances with legacy advertising */
1994 if (!ext_adv_capable(hdev)) {
1995 hdev->adv_instance_timeout = timeout;
1996 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02001997 &hdev->adv_instance_expire,
1998 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001999 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002000
2001 /* If we're just re-scheduling the same instance again then do not
2002 * execute any HCI commands. This happens when a single instance is
2003 * being advertised.
2004 */
2005 if (!force && hdev->cur_adv_instance == instance &&
2006 hci_dev_test_flag(hdev, HCI_LE_ADV))
2007 return 0;
2008
2009 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302010 if (ext_adv_capable(hdev)) {
2011 __hci_req_start_ext_adv(req, instance);
2012 } else {
2013 __hci_req_update_adv_data(req, instance);
2014 __hci_req_update_scan_rsp_data(req, instance);
2015 __hci_req_enable_advertising(req);
2016 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002017
2018 return 0;
2019}
2020
2021static void cancel_adv_timeout(struct hci_dev *hdev)
2022{
2023 if (hdev->adv_instance_timeout) {
2024 hdev->adv_instance_timeout = 0;
2025 cancel_delayed_work(&hdev->adv_instance_expire);
2026 }
2027}
2028
2029/* For a single instance:
2030 * - force == true: The instance will be removed even when its remaining
2031 * lifetime is not zero.
2032 * - force == false: the instance will be deactivated but kept stored unless
2033 * the remaining lifetime is zero.
2034 *
2035 * For instance == 0x00:
2036 * - force == true: All instances will be removed regardless of their timeout
2037 * setting.
2038 * - force == false: Only instances that have a timeout will be removed.
2039 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002040void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2041 struct hci_request *req, u8 instance,
2042 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002043{
2044 struct adv_info *adv_instance, *n, *next_instance = NULL;
2045 int err;
2046 u8 rem_inst;
2047
2048 /* Cancel any timeout concerning the removed instance(s). */
2049 if (!instance || hdev->cur_adv_instance == instance)
2050 cancel_adv_timeout(hdev);
2051
2052 /* Get the next instance to advertise BEFORE we remove
2053 * the current one. This can be the same instance again
2054 * if there is only one instance.
2055 */
2056 if (instance && hdev->cur_adv_instance == instance)
2057 next_instance = hci_get_next_instance(hdev, instance);
2058
2059 if (instance == 0x00) {
2060 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2061 list) {
2062 if (!(force || adv_instance->timeout))
2063 continue;
2064
2065 rem_inst = adv_instance->instance;
2066 err = hci_remove_adv_instance(hdev, rem_inst);
2067 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002068 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002069 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002070 } else {
2071 adv_instance = hci_find_adv_instance(hdev, instance);
2072
2073 if (force || (adv_instance && adv_instance->timeout &&
2074 !adv_instance->remaining_time)) {
2075 /* Don't advertise a removed instance. */
2076 if (next_instance &&
2077 next_instance->instance == instance)
2078 next_instance = NULL;
2079
2080 err = hci_remove_adv_instance(hdev, instance);
2081 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002082 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002083 }
2084 }
2085
Johan Hedbergf2252572015-11-18 12:49:20 +02002086 if (!req || !hdev_is_powered(hdev) ||
2087 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2088 return;
2089
2090 if (next_instance)
2091 __hci_req_schedule_adv_instance(req, next_instance->instance,
2092 false);
2093}
2094
Johan Hedberg0857dd32014-12-19 13:40:20 +02002095static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2096{
2097 struct hci_dev *hdev = req->hdev;
2098
2099 /* If we're advertising or initiating an LE connection we can't
2100 * go ahead and change the random address at this time. This is
2101 * because the eventual initiator address used for the
2102 * subsequently created connection will be undefined (some
2103 * controllers use the new address and others the one we had
2104 * when the operation started).
2105 *
2106 * In this kind of scenario skip the update and let the random
2107 * address be updated at the next cycle.
2108 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002109 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002110 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002111 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002112 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002113 return;
2114 }
2115
2116 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2117}
2118
2119int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002120 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002121{
2122 struct hci_dev *hdev = req->hdev;
2123 int err;
2124
2125 /* If privacy is enabled use a resolvable private address. If
2126 * current RPA has expired or there is something else than
2127 * the current RPA in use, then generate a new one.
2128 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002129 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002130 int to;
2131
2132 *own_addr_type = ADDR_LE_DEV_RANDOM;
2133
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002134 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002135 !bacmp(&hdev->random_addr, &hdev->rpa))
2136 return 0;
2137
2138 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2139 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002140 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002141 return err;
2142 }
2143
2144 set_random_addr(req, &hdev->rpa);
2145
2146 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2147 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2148
2149 return 0;
2150 }
2151
2152 /* In case of required privacy without resolvable private address,
2153 * use an non-resolvable private address. This is useful for active
2154 * scanning and non-connectable advertising.
2155 */
2156 if (require_privacy) {
2157 bdaddr_t nrpa;
2158
2159 while (true) {
2160 /* The non-resolvable private address is generated
2161 * from random six bytes with the two most significant
2162 * bits cleared.
2163 */
2164 get_random_bytes(&nrpa, 6);
2165 nrpa.b[5] &= 0x3f;
2166
2167 /* The non-resolvable private address shall not be
2168 * equal to the public address.
2169 */
2170 if (bacmp(&hdev->bdaddr, &nrpa))
2171 break;
2172 }
2173
2174 *own_addr_type = ADDR_LE_DEV_RANDOM;
2175 set_random_addr(req, &nrpa);
2176 return 0;
2177 }
2178
2179 /* If forcing static address is in use or there is no public
2180 * address use the static address as random address (but skip
2181 * the HCI command if the current random address is already the
2182 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002183 *
2184 * In case BR/EDR has been disabled on a dual-mode controller
2185 * and a static address has been configured, then use that
2186 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002187 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002188 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002189 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002190 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002191 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002192 *own_addr_type = ADDR_LE_DEV_RANDOM;
2193 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2194 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2195 &hdev->static_addr);
2196 return 0;
2197 }
2198
2199 /* Neither privacy nor static address is being used so use a
2200 * public address.
2201 */
2202 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2203
2204 return 0;
2205}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002206
Johan Hedberg405a2612014-12-19 23:18:22 +02002207static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2208{
2209 struct bdaddr_list *b;
2210
2211 list_for_each_entry(b, &hdev->whitelist, list) {
2212 struct hci_conn *conn;
2213
2214 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2215 if (!conn)
2216 return true;
2217
2218 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2219 return true;
2220 }
2221
2222 return false;
2223}
2224
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002225void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002226{
2227 struct hci_dev *hdev = req->hdev;
2228 u8 scan;
2229
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002230 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002231 return;
2232
2233 if (!hdev_is_powered(hdev))
2234 return;
2235
2236 if (mgmt_powering_down(hdev))
2237 return;
2238
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002239 if (hdev->scanning_paused)
2240 return;
2241
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002242 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002243 disconnected_whitelist_entries(hdev))
2244 scan = SCAN_PAGE;
2245 else
2246 scan = SCAN_DISABLED;
2247
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002248 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002249 scan |= SCAN_INQUIRY;
2250
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002251 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2252 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2253 return;
2254
Johan Hedberg405a2612014-12-19 23:18:22 +02002255 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2256}
2257
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002258static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002259{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002260 hci_dev_lock(req->hdev);
2261 __hci_req_update_scan(req);
2262 hci_dev_unlock(req->hdev);
2263 return 0;
2264}
Johan Hedberg405a2612014-12-19 23:18:22 +02002265
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002266static void scan_update_work(struct work_struct *work)
2267{
2268 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2269
2270 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002271}
2272
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002273static int connectable_update(struct hci_request *req, unsigned long opt)
2274{
2275 struct hci_dev *hdev = req->hdev;
2276
2277 hci_dev_lock(hdev);
2278
2279 __hci_req_update_scan(req);
2280
2281 /* If BR/EDR is not enabled and we disable advertising as a
2282 * by-product of disabling connectable, we need to update the
2283 * advertising flags.
2284 */
2285 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002286 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002287
2288 /* Update the advertising parameters if necessary */
2289 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302290 !list_empty(&hdev->adv_instances)) {
2291 if (ext_adv_capable(hdev))
2292 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2293 else
2294 __hci_req_enable_advertising(req);
2295 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002296
2297 __hci_update_background_scan(req);
2298
2299 hci_dev_unlock(hdev);
2300
2301 return 0;
2302}
2303
2304static void connectable_update_work(struct work_struct *work)
2305{
2306 struct hci_dev *hdev = container_of(work, struct hci_dev,
2307 connectable_update);
2308 u8 status;
2309
2310 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2311 mgmt_set_connectable_complete(hdev, status);
2312}
2313
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002314static u8 get_service_classes(struct hci_dev *hdev)
2315{
2316 struct bt_uuid *uuid;
2317 u8 val = 0;
2318
2319 list_for_each_entry(uuid, &hdev->uuids, list)
2320 val |= uuid->svc_hint;
2321
2322 return val;
2323}
2324
2325void __hci_req_update_class(struct hci_request *req)
2326{
2327 struct hci_dev *hdev = req->hdev;
2328 u8 cod[3];
2329
2330 BT_DBG("%s", hdev->name);
2331
2332 if (!hdev_is_powered(hdev))
2333 return;
2334
2335 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2336 return;
2337
2338 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2339 return;
2340
2341 cod[0] = hdev->minor_class;
2342 cod[1] = hdev->major_class;
2343 cod[2] = get_service_classes(hdev);
2344
2345 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2346 cod[1] |= 0x20;
2347
2348 if (memcmp(cod, hdev->dev_class, 3) == 0)
2349 return;
2350
2351 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2352}
2353
Johan Hedbergaed1a882015-11-22 17:24:44 +03002354static void write_iac(struct hci_request *req)
2355{
2356 struct hci_dev *hdev = req->hdev;
2357 struct hci_cp_write_current_iac_lap cp;
2358
2359 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2360 return;
2361
2362 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2363 /* Limited discoverable mode */
2364 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2365 cp.iac_lap[0] = 0x00; /* LIAC */
2366 cp.iac_lap[1] = 0x8b;
2367 cp.iac_lap[2] = 0x9e;
2368 cp.iac_lap[3] = 0x33; /* GIAC */
2369 cp.iac_lap[4] = 0x8b;
2370 cp.iac_lap[5] = 0x9e;
2371 } else {
2372 /* General discoverable mode */
2373 cp.num_iac = 1;
2374 cp.iac_lap[0] = 0x33; /* GIAC */
2375 cp.iac_lap[1] = 0x8b;
2376 cp.iac_lap[2] = 0x9e;
2377 }
2378
2379 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2380 (cp.num_iac * 3) + 1, &cp);
2381}
2382
2383static int discoverable_update(struct hci_request *req, unsigned long opt)
2384{
2385 struct hci_dev *hdev = req->hdev;
2386
2387 hci_dev_lock(hdev);
2388
2389 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2390 write_iac(req);
2391 __hci_req_update_scan(req);
2392 __hci_req_update_class(req);
2393 }
2394
2395 /* Advertising instances don't use the global discoverable setting, so
2396 * only update AD if advertising was enabled using Set Advertising.
2397 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002398 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002399 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002400
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002401 /* Discoverable mode affects the local advertising
2402 * address in limited privacy mode.
2403 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302404 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2405 if (ext_adv_capable(hdev))
2406 __hci_req_start_ext_adv(req, 0x00);
2407 else
2408 __hci_req_enable_advertising(req);
2409 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002410 }
2411
Johan Hedbergaed1a882015-11-22 17:24:44 +03002412 hci_dev_unlock(hdev);
2413
2414 return 0;
2415}
2416
2417static void discoverable_update_work(struct work_struct *work)
2418{
2419 struct hci_dev *hdev = container_of(work, struct hci_dev,
2420 discoverable_update);
2421 u8 status;
2422
2423 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2424 mgmt_set_discoverable_complete(hdev, status);
2425}
2426
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002427void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2428 u8 reason)
2429{
2430 switch (conn->state) {
2431 case BT_CONNECTED:
2432 case BT_CONFIG:
2433 if (conn->type == AMP_LINK) {
2434 struct hci_cp_disconn_phy_link cp;
2435
2436 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2437 cp.reason = reason;
2438 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2439 &cp);
2440 } else {
2441 struct hci_cp_disconnect dc;
2442
2443 dc.handle = cpu_to_le16(conn->handle);
2444 dc.reason = reason;
2445 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2446 }
2447
2448 conn->state = BT_DISCONN;
2449
2450 break;
2451 case BT_CONNECT:
2452 if (conn->type == LE_LINK) {
2453 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2454 break;
2455 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2456 0, NULL);
2457 } else if (conn->type == ACL_LINK) {
2458 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2459 break;
2460 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2461 6, &conn->dst);
2462 }
2463 break;
2464 case BT_CONNECT2:
2465 if (conn->type == ACL_LINK) {
2466 struct hci_cp_reject_conn_req rej;
2467
2468 bacpy(&rej.bdaddr, &conn->dst);
2469 rej.reason = reason;
2470
2471 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2472 sizeof(rej), &rej);
2473 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2474 struct hci_cp_reject_sync_conn_req rej;
2475
2476 bacpy(&rej.bdaddr, &conn->dst);
2477
2478 /* SCO rejection has its own limited set of
2479 * allowed error values (0x0D-0x0F) which isn't
2480 * compatible with most values passed to this
2481 * function. To be safe hard-code one of the
2482 * values that's suitable for SCO.
2483 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002484 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002485
2486 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2487 sizeof(rej), &rej);
2488 }
2489 break;
2490 default:
2491 conn->state = BT_CLOSED;
2492 break;
2493 }
2494}
2495
2496static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2497{
2498 if (status)
2499 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2500}
2501
2502int hci_abort_conn(struct hci_conn *conn, u8 reason)
2503{
2504 struct hci_request req;
2505 int err;
2506
2507 hci_req_init(&req, conn->hdev);
2508
2509 __hci_abort_conn(&req, conn, reason);
2510
2511 err = hci_req_run(&req, abort_conn_complete);
2512 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002513 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002514 return err;
2515 }
2516
2517 return 0;
2518}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002519
Johan Hedberga1d01db2015-11-11 08:11:25 +02002520static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002521{
2522 hci_dev_lock(req->hdev);
2523 __hci_update_background_scan(req);
2524 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002525 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002526}
2527
2528static void bg_scan_update(struct work_struct *work)
2529{
2530 struct hci_dev *hdev = container_of(work, struct hci_dev,
2531 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002532 struct hci_conn *conn;
2533 u8 status;
2534 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002535
Johan Hedberg84235d22015-11-11 08:11:20 +02002536 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2537 if (!err)
2538 return;
2539
2540 hci_dev_lock(hdev);
2541
2542 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2543 if (conn)
2544 hci_le_conn_failed(conn, status);
2545
2546 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002547}
2548
Johan Hedberga1d01db2015-11-11 08:11:25 +02002549static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002550{
2551 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002552 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002553}
2554
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002555static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2556{
2557 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002558 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2559 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002560 struct hci_cp_inquiry cp;
2561
2562 BT_DBG("%s", req->hdev->name);
2563
2564 hci_dev_lock(req->hdev);
2565 hci_inquiry_cache_flush(req->hdev);
2566 hci_dev_unlock(req->hdev);
2567
2568 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002569
2570 if (req->hdev->discovery.limited)
2571 memcpy(&cp.lap, liac, sizeof(cp.lap));
2572 else
2573 memcpy(&cp.lap, giac, sizeof(cp.lap));
2574
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002575 cp.length = length;
2576
2577 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2578
2579 return 0;
2580}
2581
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002582static void le_scan_disable_work(struct work_struct *work)
2583{
2584 struct hci_dev *hdev = container_of(work, struct hci_dev,
2585 le_scan_disable.work);
2586 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002587
2588 BT_DBG("%s", hdev->name);
2589
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002590 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002591 return;
2592
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002593 cancel_delayed_work(&hdev->le_scan_restart);
2594
2595 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2596 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002597 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2598 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002599 return;
2600 }
2601
2602 hdev->discovery.scan_start = 0;
2603
2604 /* If we were running LE only scan, change discovery state. If
2605 * we were running both LE and BR/EDR inquiry simultaneously,
2606 * and BR/EDR inquiry is already finished, stop discovery,
2607 * otherwise BR/EDR inquiry will stop discovery when finished.
2608 * If we will resolve remote device name, do not change
2609 * discovery state.
2610 */
2611
2612 if (hdev->discovery.type == DISCOV_TYPE_LE)
2613 goto discov_stopped;
2614
2615 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2616 return;
2617
2618 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2619 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2620 hdev->discovery.state != DISCOVERY_RESOLVING)
2621 goto discov_stopped;
2622
2623 return;
2624 }
2625
2626 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2627 HCI_CMD_TIMEOUT, &status);
2628 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002629 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002630 goto discov_stopped;
2631 }
2632
2633 return;
2634
2635discov_stopped:
2636 hci_dev_lock(hdev);
2637 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2638 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002639}
2640
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002641static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002642{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002643 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002644
2645 /* If controller is not scanning we are done. */
2646 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2647 return 0;
2648
2649 hci_req_add_le_scan_disable(req);
2650
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302651 if (use_ext_scan(hdev)) {
2652 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2653
2654 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2655 ext_enable_cp.enable = LE_SCAN_ENABLE;
2656 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2657
2658 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2659 sizeof(ext_enable_cp), &ext_enable_cp);
2660 } else {
2661 struct hci_cp_le_set_scan_enable cp;
2662
2663 memset(&cp, 0, sizeof(cp));
2664 cp.enable = LE_SCAN_ENABLE;
2665 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2666 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2667 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002668
2669 return 0;
2670}
2671
2672static void le_scan_restart_work(struct work_struct *work)
2673{
2674 struct hci_dev *hdev = container_of(work, struct hci_dev,
2675 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002676 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002677 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002678
2679 BT_DBG("%s", hdev->name);
2680
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002681 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002682 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002683 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2684 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002685 return;
2686 }
2687
2688 hci_dev_lock(hdev);
2689
2690 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2691 !hdev->discovery.scan_start)
2692 goto unlock;
2693
2694 /* When the scan was started, hdev->le_scan_disable has been queued
2695 * after duration from scan_start. During scan restart this job
2696 * has been canceled, and we need to queue it again after proper
2697 * timeout, to make sure that scan does not run indefinitely.
2698 */
2699 duration = hdev->discovery.scan_duration;
2700 scan_start = hdev->discovery.scan_start;
2701 now = jiffies;
2702 if (now - scan_start <= duration) {
2703 int elapsed;
2704
2705 if (now >= scan_start)
2706 elapsed = now - scan_start;
2707 else
2708 elapsed = ULONG_MAX - scan_start + now;
2709
2710 timeout = duration - elapsed;
2711 } else {
2712 timeout = 0;
2713 }
2714
2715 queue_delayed_work(hdev->req_workqueue,
2716 &hdev->le_scan_disable, timeout);
2717
2718unlock:
2719 hci_dev_unlock(hdev);
2720}
2721
Johan Hedberge68f0722015-11-11 08:30:30 +02002722static int active_scan(struct hci_request *req, unsigned long opt)
2723{
2724 uint16_t interval = opt;
2725 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002726 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002727 /* White list is not used for discovery */
2728 u8 filter_policy = 0x00;
Johan Hedberge68f0722015-11-11 08:30:30 +02002729 int err;
2730
2731 BT_DBG("%s", hdev->name);
2732
Johan Hedberge68f0722015-11-11 08:30:30 +02002733 /* If controller is scanning, it means the background scanning is
2734 * running. Thus, we should temporarily stop it in order to set the
2735 * discovery scanning parameters.
2736 */
2737 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2738 hci_req_add_le_scan_disable(req);
2739
2740 /* All active scans will be done with either a resolvable private
2741 * address (when privacy feature has been enabled) or non-resolvable
2742 * private address.
2743 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002744 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2745 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002746 if (err < 0)
2747 own_addr_type = ADDR_LE_DEV_PUBLIC;
2748
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302749 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002750 own_addr_type, filter_policy);
Johan Hedberge68f0722015-11-11 08:30:30 +02002751 return 0;
2752}
2753
2754static int interleaved_discov(struct hci_request *req, unsigned long opt)
2755{
2756 int err;
2757
2758 BT_DBG("%s", req->hdev->name);
2759
2760 err = active_scan(req, opt);
2761 if (err)
2762 return err;
2763
Johan Hedberg7df26b52015-11-11 12:24:21 +02002764 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002765}
2766
2767static void start_discovery(struct hci_dev *hdev, u8 *status)
2768{
2769 unsigned long timeout;
2770
2771 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2772
2773 switch (hdev->discovery.type) {
2774 case DISCOV_TYPE_BREDR:
2775 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002776 hci_req_sync(hdev, bredr_inquiry,
2777 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002778 status);
2779 return;
2780 case DISCOV_TYPE_INTERLEAVED:
2781 /* When running simultaneous discovery, the LE scanning time
2782 * should occupy the whole discovery time sine BR/EDR inquiry
2783 * and LE scanning are scheduled by the controller.
2784 *
2785 * For interleaving discovery in comparison, BR/EDR inquiry
2786 * and LE scanning are done sequentially with separate
2787 * timeouts.
2788 */
2789 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2790 &hdev->quirks)) {
2791 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2792 /* During simultaneous discovery, we double LE scan
2793 * interval. We must leave some time for the controller
2794 * to do BR/EDR inquiry.
2795 */
2796 hci_req_sync(hdev, interleaved_discov,
2797 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2798 status);
2799 break;
2800 }
2801
2802 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2803 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2804 HCI_CMD_TIMEOUT, status);
2805 break;
2806 case DISCOV_TYPE_LE:
2807 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2808 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2809 HCI_CMD_TIMEOUT, status);
2810 break;
2811 default:
2812 *status = HCI_ERROR_UNSPECIFIED;
2813 return;
2814 }
2815
2816 if (*status)
2817 return;
2818
2819 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2820
2821 /* When service discovery is used and the controller has a
2822 * strict duplicate filter, it is important to remember the
2823 * start and duration of the scan. This is required for
2824 * restarting scanning during the discovery phase.
2825 */
2826 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2827 hdev->discovery.result_filtering) {
2828 hdev->discovery.scan_start = jiffies;
2829 hdev->discovery.scan_duration = timeout;
2830 }
2831
2832 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2833 timeout);
2834}
2835
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002836bool hci_req_stop_discovery(struct hci_request *req)
2837{
2838 struct hci_dev *hdev = req->hdev;
2839 struct discovery_state *d = &hdev->discovery;
2840 struct hci_cp_remote_name_req_cancel cp;
2841 struct inquiry_entry *e;
2842 bool ret = false;
2843
2844 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2845
2846 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2847 if (test_bit(HCI_INQUIRY, &hdev->flags))
2848 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2849
2850 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2851 cancel_delayed_work(&hdev->le_scan_disable);
2852 hci_req_add_le_scan_disable(req);
2853 }
2854
2855 ret = true;
2856 } else {
2857 /* Passive scanning */
2858 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2859 hci_req_add_le_scan_disable(req);
2860 ret = true;
2861 }
2862 }
2863
2864 /* No further actions needed for LE-only discovery */
2865 if (d->type == DISCOV_TYPE_LE)
2866 return ret;
2867
2868 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2869 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2870 NAME_PENDING);
2871 if (!e)
2872 return ret;
2873
2874 bacpy(&cp.bdaddr, &e->data.bdaddr);
2875 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2876 &cp);
2877 ret = true;
2878 }
2879
2880 return ret;
2881}
2882
2883static int stop_discovery(struct hci_request *req, unsigned long opt)
2884{
2885 hci_dev_lock(req->hdev);
2886 hci_req_stop_discovery(req);
2887 hci_dev_unlock(req->hdev);
2888
2889 return 0;
2890}
2891
Johan Hedberge68f0722015-11-11 08:30:30 +02002892static void discov_update(struct work_struct *work)
2893{
2894 struct hci_dev *hdev = container_of(work, struct hci_dev,
2895 discov_update);
2896 u8 status = 0;
2897
2898 switch (hdev->discovery.state) {
2899 case DISCOVERY_STARTING:
2900 start_discovery(hdev, &status);
2901 mgmt_start_discovery_complete(hdev, status);
2902 if (status)
2903 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2904 else
2905 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2906 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002907 case DISCOVERY_STOPPING:
2908 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2909 mgmt_stop_discovery_complete(hdev, status);
2910 if (!status)
2911 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2912 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002913 case DISCOVERY_STOPPED:
2914 default:
2915 return;
2916 }
2917}
2918
Johan Hedbergc366f552015-11-23 15:43:06 +02002919static void discov_off(struct work_struct *work)
2920{
2921 struct hci_dev *hdev = container_of(work, struct hci_dev,
2922 discov_off.work);
2923
2924 BT_DBG("%s", hdev->name);
2925
2926 hci_dev_lock(hdev);
2927
2928 /* When discoverable timeout triggers, then just make sure
2929 * the limited discoverable flag is cleared. Even in the case
2930 * of a timeout triggered from general discoverable, it is
2931 * safe to unconditionally clear the flag.
2932 */
2933 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2934 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2935 hdev->discov_timeout = 0;
2936
2937 hci_dev_unlock(hdev);
2938
2939 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2940 mgmt_new_settings(hdev);
2941}
2942
Johan Hedberg2ff13892015-11-25 16:15:44 +02002943static int powered_update_hci(struct hci_request *req, unsigned long opt)
2944{
2945 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002946 u8 link_sec;
2947
2948 hci_dev_lock(hdev);
2949
2950 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2951 !lmp_host_ssp_capable(hdev)) {
2952 u8 mode = 0x01;
2953
2954 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2955
2956 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2957 u8 support = 0x01;
2958
2959 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2960 sizeof(support), &support);
2961 }
2962 }
2963
2964 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2965 lmp_bredr_capable(hdev)) {
2966 struct hci_cp_write_le_host_supported cp;
2967
2968 cp.le = 0x01;
2969 cp.simul = 0x00;
2970
2971 /* Check first if we already have the right
2972 * host state (host features set)
2973 */
2974 if (cp.le != lmp_host_le_capable(hdev) ||
2975 cp.simul != lmp_host_le_br_capable(hdev))
2976 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2977 sizeof(cp), &cp);
2978 }
2979
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002980 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002981 /* Make sure the controller has a good default for
2982 * advertising data. This also applies to the case
2983 * where BR/EDR was toggled during the AUTO_OFF phase.
2984 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002985 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2986 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302987 int err;
2988
2989 if (ext_adv_capable(hdev)) {
2990 err = __hci_req_setup_ext_adv_instance(req,
2991 0x00);
2992 if (!err)
2993 __hci_req_update_scan_rsp_data(req,
2994 0x00);
2995 } else {
2996 err = 0;
2997 __hci_req_update_adv_data(req, 0x00);
2998 __hci_req_update_scan_rsp_data(req, 0x00);
2999 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003000
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303001 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303002 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303003 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303004 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003005 __hci_req_enable_ext_advertising(req,
3006 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303007 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003008 } else if (!list_empty(&hdev->adv_instances)) {
3009 struct adv_info *adv_instance;
3010
Johan Hedberg2ff13892015-11-25 16:15:44 +02003011 adv_instance = list_first_entry(&hdev->adv_instances,
3012 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003013 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003014 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003015 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003016 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003017 }
3018
3019 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3020 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3021 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3022 sizeof(link_sec), &link_sec);
3023
3024 if (lmp_bredr_capable(hdev)) {
3025 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3026 __hci_req_write_fast_connectable(req, true);
3027 else
3028 __hci_req_write_fast_connectable(req, false);
3029 __hci_req_update_scan(req);
3030 __hci_req_update_class(req);
3031 __hci_req_update_name(req);
3032 __hci_req_update_eir(req);
3033 }
3034
3035 hci_dev_unlock(hdev);
3036 return 0;
3037}
3038
3039int __hci_req_hci_power_on(struct hci_dev *hdev)
3040{
3041 /* Register the available SMP channels (BR/EDR and LE) only when
3042 * successfully powering on the controller. This late
3043 * registration is required so that LE SMP can clearly decide if
3044 * the public address or static address is used.
3045 */
3046 smp_register(hdev);
3047
3048 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3049 NULL);
3050}
3051
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003052void hci_request_setup(struct hci_dev *hdev)
3053{
Johan Hedberge68f0722015-11-11 08:30:30 +02003054 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003055 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003056 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003057 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003058 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003059 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003060 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3061 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003062 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003063}
3064
3065void hci_request_cancel_all(struct hci_dev *hdev)
3066{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003067 hci_req_sync_cancel(hdev, ENODEV);
3068
Johan Hedberge68f0722015-11-11 08:30:30 +02003069 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003070 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003071 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003072 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003073 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003074 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003075 cancel_delayed_work_sync(&hdev->le_scan_disable);
3076 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003077
3078 if (hdev->adv_instance_timeout) {
3079 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3080 hdev->adv_instance_timeout = 0;
3081 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003082}