blob: d6bf1517ddaec85186437575a4b66a3a8a3e166d [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
Howard Chung22fbcfc2020-11-11 15:02:19 +080061 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020062
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800105 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800118 bt_dev_dbg(hdev, "err 0x%2.2x", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
Howard Chung22fbcfc2020-11-11 15:02:19 +0800134 bt_dev_dbg(hdev, "");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
Howard Chung22fbcfc2020-11-11 15:02:19 +0800170 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
Howard Chung22fbcfc2020-11-11 15:02:19 +0800199 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
Howard Chung22fbcfc2020-11-11 15:02:19 +0800263 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
Howard Chung22fbcfc2020-11-11 15:02:19 +0800303 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
Howard Chung22fbcfc2020-11-11 15:02:19 +0800318 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200368 }
369
Alain Michaud10873f92020-06-11 02:01:56 +0000370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
Howard Chungc4f1f402020-11-26 12:22:21 +0800381static void start_interleave_scan(struct hci_dev *hdev)
382{
383 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
384 queue_delayed_work(hdev->req_workqueue,
385 &hdev->interleave_scan, 0);
386}
387
388static bool is_interleave_scanning(struct hci_dev *hdev)
389{
390 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
391}
392
393static void cancel_interleave_scan(struct hci_dev *hdev)
394{
395 bt_dev_dbg(hdev, "cancelling interleave scan");
396
397 cancel_delayed_work_sync(&hdev->interleave_scan);
398
399 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
400}
401
402/* Return true if interleave_scan wasn't started until exiting this function,
403 * otherwise, return false
404 */
405static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
406{
407 /* If there is at least one ADV monitors and one pending LE connection
408 * or one device to be scanned for, we should alternate between
409 * allowlist scan and one without any filters to save power.
410 */
411 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
412 !(list_empty(&hdev->pend_le_conns) &&
413 list_empty(&hdev->pend_le_reports));
414 bool is_interleaving = is_interleave_scanning(hdev);
415
416 if (use_interleaving && !is_interleaving) {
417 start_interleave_scan(hdev);
418 bt_dev_dbg(hdev, "starting interleave scan");
419 return true;
420 }
421
422 if (!use_interleaving && is_interleaving)
423 cancel_interleave_scan(hdev);
424
425 return false;
426}
427
Johan Hedberg196a5e92015-11-22 18:55:44 +0200428/* This function controls the background scanning based on hdev->pend_le_conns
429 * list. If there are pending LE connection we start the background scanning,
430 * otherwise we stop it.
431 *
432 * This function requires the caller holds hdev->lock.
433 */
434static void __hci_update_background_scan(struct hci_request *req)
435{
436 struct hci_dev *hdev = req->hdev;
437
438 if (!test_bit(HCI_UP, &hdev->flags) ||
439 test_bit(HCI_INIT, &hdev->flags) ||
440 hci_dev_test_flag(hdev, HCI_SETUP) ||
441 hci_dev_test_flag(hdev, HCI_CONFIG) ||
442 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
443 hci_dev_test_flag(hdev, HCI_UNREGISTER))
444 return;
445
446 /* No point in doing scanning if LE support hasn't been enabled */
447 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
448 return;
449
450 /* If discovery is active don't interfere with it */
451 if (hdev->discovery.state != DISCOVERY_STOPPED)
452 return;
453
454 /* Reset RSSI and UUID filters when starting background scanning
455 * since these filters are meant for service discovery only.
456 *
457 * The Start Discovery and Start Service Discovery operations
458 * ensure to set proper values for RSSI threshold and UUID
459 * filter list. So it is safe to just reset them here.
460 */
461 hci_discovery_filter_clear(hdev);
462
Howard Chung22fbcfc2020-11-11 15:02:19 +0800463 bt_dev_dbg(hdev, "ADV monitoring is %s",
464 hci_is_adv_monitoring(hdev) ? "on" : "off");
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200465
Johan Hedberg196a5e92015-11-22 18:55:44 +0200466 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200467 list_empty(&hdev->pend_le_reports) &&
468 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200469 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200470 * to be scanned for or no ADV monitors, we should stop the
471 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200472 */
473
474 /* If controller is not scanning we are done. */
475 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
476 return;
477
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530478 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200479
Howard Chung22fbcfc2020-11-11 15:02:19 +0800480 bt_dev_dbg(hdev, "stopping background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200481 } else {
482 /* If there is at least one pending LE connection, we should
483 * keep the background scan running.
484 */
485
486 /* If controller is connecting, we should not start scanning
487 * since some controllers are not able to scan and connect at
488 * the same time.
489 */
490 if (hci_lookup_le_connect(hdev))
491 return;
492
493 /* If controller is currently scanning, we stop it to ensure we
494 * don't miss any advertising (due to duplicates filter).
495 */
496 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530497 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200498
499 hci_req_add_le_passive_scan(req);
Howard Chungc4f1f402020-11-26 12:22:21 +0800500 bt_dev_dbg(hdev, "starting background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200501 }
502}
503
Johan Hedberg00cf5042015-11-25 16:15:41 +0200504void __hci_req_update_name(struct hci_request *req)
505{
506 struct hci_dev *hdev = req->hdev;
507 struct hci_cp_write_local_name cp;
508
509 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
510
511 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
512}
513
Johan Hedbergb1a89172015-11-25 16:15:42 +0200514#define PNP_INFO_SVCLASS_ID 0x1200
515
516static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517{
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 4)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 u16 uuid16;
526
527 if (uuid->size != 16)
528 continue;
529
530 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
531 if (uuid16 < 0x1100)
532 continue;
533
534 if (uuid16 == PNP_INFO_SVCLASS_ID)
535 continue;
536
537 if (!uuids_start) {
538 uuids_start = ptr;
539 uuids_start[0] = 1;
540 uuids_start[1] = EIR_UUID16_ALL;
541 ptr += 2;
542 }
543
544 /* Stop if not enough space to put next UUID */
545 if ((ptr - data) + sizeof(u16) > len) {
546 uuids_start[1] = EIR_UUID16_SOME;
547 break;
548 }
549
550 *ptr++ = (uuid16 & 0x00ff);
551 *ptr++ = (uuid16 & 0xff00) >> 8;
552 uuids_start[0] += sizeof(uuid16);
553 }
554
555 return ptr;
556}
557
558static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
559{
560 u8 *ptr = data, *uuids_start = NULL;
561 struct bt_uuid *uuid;
562
563 if (len < 6)
564 return ptr;
565
566 list_for_each_entry(uuid, &hdev->uuids, list) {
567 if (uuid->size != 32)
568 continue;
569
570 if (!uuids_start) {
571 uuids_start = ptr;
572 uuids_start[0] = 1;
573 uuids_start[1] = EIR_UUID32_ALL;
574 ptr += 2;
575 }
576
577 /* Stop if not enough space to put next UUID */
578 if ((ptr - data) + sizeof(u32) > len) {
579 uuids_start[1] = EIR_UUID32_SOME;
580 break;
581 }
582
583 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
584 ptr += sizeof(u32);
585 uuids_start[0] += sizeof(u32);
586 }
587
588 return ptr;
589}
590
591static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
592{
593 u8 *ptr = data, *uuids_start = NULL;
594 struct bt_uuid *uuid;
595
596 if (len < 18)
597 return ptr;
598
599 list_for_each_entry(uuid, &hdev->uuids, list) {
600 if (uuid->size != 128)
601 continue;
602
603 if (!uuids_start) {
604 uuids_start = ptr;
605 uuids_start[0] = 1;
606 uuids_start[1] = EIR_UUID128_ALL;
607 ptr += 2;
608 }
609
610 /* Stop if not enough space to put next UUID */
611 if ((ptr - data) + 16 > len) {
612 uuids_start[1] = EIR_UUID128_SOME;
613 break;
614 }
615
616 memcpy(ptr, uuid->uuid, 16);
617 ptr += 16;
618 uuids_start[0] += 16;
619 }
620
621 return ptr;
622}
623
624static void create_eir(struct hci_dev *hdev, u8 *data)
625{
626 u8 *ptr = data;
627 size_t name_len;
628
629 name_len = strlen(hdev->dev_name);
630
631 if (name_len > 0) {
632 /* EIR Data type */
633 if (name_len > 48) {
634 name_len = 48;
635 ptr[1] = EIR_NAME_SHORT;
636 } else
637 ptr[1] = EIR_NAME_COMPLETE;
638
639 /* EIR Data length */
640 ptr[0] = name_len + 1;
641
642 memcpy(ptr + 2, hdev->dev_name, name_len);
643
644 ptr += (name_len + 2);
645 }
646
647 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
648 ptr[0] = 2;
649 ptr[1] = EIR_TX_POWER;
650 ptr[2] = (u8) hdev->inq_tx_power;
651
652 ptr += 3;
653 }
654
655 if (hdev->devid_source > 0) {
656 ptr[0] = 9;
657 ptr[1] = EIR_DEVICE_ID;
658
659 put_unaligned_le16(hdev->devid_source, ptr + 2);
660 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
661 put_unaligned_le16(hdev->devid_product, ptr + 6);
662 put_unaligned_le16(hdev->devid_version, ptr + 8);
663
664 ptr += 10;
665 }
666
667 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
668 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
669 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
670}
671
672void __hci_req_update_eir(struct hci_request *req)
673{
674 struct hci_dev *hdev = req->hdev;
675 struct hci_cp_write_eir cp;
676
677 if (!hdev_is_powered(hdev))
678 return;
679
680 if (!lmp_ext_inq_capable(hdev))
681 return;
682
683 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
684 return;
685
686 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
687 return;
688
689 memset(&cp, 0, sizeof(cp));
690
691 create_eir(hdev, cp.data);
692
693 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
694 return;
695
696 memcpy(hdev->eir, cp.data, sizeof(cp.data));
697
698 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
699}
700
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530701void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200702{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530703 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200704
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700705 if (hdev->scanning_paused) {
706 bt_dev_dbg(hdev, "Scanning is paused for suspend");
707 return;
708 }
709
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530710 if (use_ext_scan(hdev)) {
711 struct hci_cp_le_set_ext_scan_enable cp;
712
713 memset(&cp, 0, sizeof(cp));
714 cp.enable = LE_SCAN_DISABLE;
715 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
716 &cp);
717 } else {
718 struct hci_cp_le_set_scan_enable cp;
719
720 memset(&cp, 0, sizeof(cp));
721 cp.enable = LE_SCAN_DISABLE;
722 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
723 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530724
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530725 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530726 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530727 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530728 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530729 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530730
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530731 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
732 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200733}
734
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700735static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
736 u8 bdaddr_type)
737{
738 struct hci_cp_le_del_from_white_list cp;
739
740 cp.bdaddr_type = bdaddr_type;
741 bacpy(&cp.bdaddr, bdaddr);
742
743 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
744 cp.bdaddr_type);
745 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530746
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530747 if (use_ll_privacy(req->hdev) &&
748 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530749 struct smp_irk *irk;
750
751 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
752 if (irk) {
753 struct hci_cp_le_del_from_resolv_list cp;
754
755 cp.bdaddr_type = bdaddr_type;
756 bacpy(&cp.bdaddr, bdaddr);
757
758 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
759 sizeof(cp), &cp);
760 }
761 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700762}
763
764/* Adds connection to white list if needed. On error, returns -1. */
765static int add_to_white_list(struct hci_request *req,
766 struct hci_conn_params *params, u8 *num_entries,
767 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200768{
769 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700770 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200771
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700772 /* Already in white list */
773 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
774 params->addr_type))
775 return 0;
776
777 /* Select filter policy to accept all advertising */
778 if (*num_entries >= hdev->le_white_list_size)
779 return -1;
780
781 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530782 if (!allow_rpa &&
783 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700784 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
785 return -1;
786 }
787
788 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200789 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
790 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700791 return 0;
792
793 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200794 cp.bdaddr_type = params->addr_type;
795 bacpy(&cp.bdaddr, &params->addr);
796
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700797 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
798 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200799 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700800
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530801 if (use_ll_privacy(hdev) &&
802 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530803 struct smp_irk *irk;
804
805 irk = hci_find_irk_by_addr(hdev, &params->addr,
806 params->addr_type);
807 if (irk) {
808 struct hci_cp_le_add_to_resolv_list cp;
809
810 cp.bdaddr_type = params->addr_type;
811 bacpy(&cp.bdaddr, &params->addr);
812 memcpy(cp.peer_irk, irk->val, 16);
813
814 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
815 memcpy(cp.local_irk, hdev->irk, 16);
816 else
817 memset(cp.local_irk, 0, 16);
818
819 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
820 sizeof(cp), &cp);
821 }
822 }
823
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700824 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200825}
826
827static u8 update_white_list(struct hci_request *req)
828{
829 struct hci_dev *hdev = req->hdev;
830 struct hci_conn_params *params;
831 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700832 u8 num_entries = 0;
833 bool pend_conn, pend_report;
834 /* We allow whitelisting even with RPAs in suspend. In the worst case,
835 * we won't be able to wake from devices that use the privacy1.2
836 * features. Additionally, once we support privacy1.2 and IRK
837 * offloading, we can update this to also check for those conditions.
838 */
839 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200840
841 /* Go through the current white list programmed into the
842 * controller one by one and check if that address is still
843 * in the list of pending connections or list of devices to
844 * report. If not present in either list, then queue the
845 * command to remove it from the controller.
846 */
847 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700848 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
849 &b->bdaddr,
850 b->bdaddr_type);
851 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
852 &b->bdaddr,
853 b->bdaddr_type);
854
855 /* If the device is not likely to connect or report,
856 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500857 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700858 if (!pend_conn && !pend_report) {
859 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200860 continue;
861 }
862
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700863 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530864 if (!allow_rpa &&
865 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700866 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500867 return 0x00;
868 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200869
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700870 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200871 }
872
873 /* Since all no longer valid white list entries have been
874 * removed, walk through the list of pending connections
875 * and ensure that any new device gets programmed into
876 * the controller.
877 *
878 * If the list of the devices is larger than the list of
879 * available white list entries in the controller, then
880 * just abort and return filer policy value to not use the
881 * white list.
882 */
883 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700884 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200885 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200886 }
887
888 /* After adding all new pending connections, walk through
889 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700890 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200891 */
892 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700893 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200894 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200895 }
896
Howard Chungc4f1f402020-11-26 12:22:21 +0800897 /* Use the allowlist unless the following conditions are all true:
898 * - We are not currently suspending
899 * - There are 1 or more ADV monitors registered
900 * - Interleaved scanning is not currently using the allowlist
901 *
902 * Once the controller offloading of advertisement monitor is in place,
903 * the above condition should include the support of MSFT extension
904 * support.
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200905 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800906 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
907 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200908 return 0x00;
909
Johan Hedberg0857dd32014-12-19 13:40:20 +0200910 /* Select filter policy to use white list */
911 return 0x01;
912}
913
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200914static bool scan_use_rpa(struct hci_dev *hdev)
915{
916 return hci_dev_test_flag(hdev, HCI_PRIVACY);
917}
918
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530919static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530920 u16 window, u8 own_addr_type, u8 filter_policy,
921 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200922{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530923 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530924
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700925 if (hdev->scanning_paused) {
926 bt_dev_dbg(hdev, "Scanning is paused for suspend");
927 return;
928 }
929
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530930 if (use_ll_privacy(hdev) &&
931 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
932 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530933 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530934
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530935 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
936 }
937
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530938 /* Use ext scanning if set ext scan param and ext scan enable is
939 * supported
940 */
941 if (use_ext_scan(hdev)) {
942 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
943 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
944 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530945 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
946 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530947
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530948 ext_param_cp = (void *)data;
949 phy_params = (void *)ext_param_cp->data;
950
951 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
952 ext_param_cp->own_addr_type = own_addr_type;
953 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530954
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530955 plen = sizeof(*ext_param_cp);
956
957 if (scan_1m(hdev) || scan_2m(hdev)) {
958 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
959
960 memset(phy_params, 0, sizeof(*phy_params));
961 phy_params->type = type;
962 phy_params->interval = cpu_to_le16(interval);
963 phy_params->window = cpu_to_le16(window);
964
965 plen += sizeof(*phy_params);
966 phy_params++;
967 }
968
969 if (scan_coded(hdev)) {
970 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
971
972 memset(phy_params, 0, sizeof(*phy_params));
973 phy_params->type = type;
974 phy_params->interval = cpu_to_le16(interval);
975 phy_params->window = cpu_to_le16(window);
976
977 plen += sizeof(*phy_params);
978 phy_params++;
979 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530980
981 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530982 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530983
984 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
985 ext_enable_cp.enable = LE_SCAN_ENABLE;
986 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
987
988 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
989 sizeof(ext_enable_cp), &ext_enable_cp);
990 } else {
991 struct hci_cp_le_set_scan_param param_cp;
992 struct hci_cp_le_set_scan_enable enable_cp;
993
994 memset(&param_cp, 0, sizeof(param_cp));
995 param_cp.type = type;
996 param_cp.interval = cpu_to_le16(interval);
997 param_cp.window = cpu_to_le16(window);
998 param_cp.own_address_type = own_addr_type;
999 param_cp.filter_policy = filter_policy;
1000 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1001 &param_cp);
1002
1003 memset(&enable_cp, 0, sizeof(enable_cp));
1004 enable_cp.enable = LE_SCAN_ENABLE;
1005 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1006 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1007 &enable_cp);
1008 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05301009}
1010
Alain Michaud9a9373f2020-07-31 01:05:34 +00001011/* Returns true if an le connection is in the scanning state */
1012static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1013{
1014 struct hci_conn_hash *h = &hdev->conn_hash;
1015 struct hci_conn *c;
1016
1017 rcu_read_lock();
1018
1019 list_for_each_entry_rcu(c, &h->list, list) {
1020 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1021 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1022 rcu_read_unlock();
1023 return true;
1024 }
1025 }
1026
1027 rcu_read_unlock();
1028
1029 return false;
1030}
1031
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301032/* Ensure to call hci_req_add_le_scan_disable() first to disable the
1033 * controller based address resolution to be able to reconfigure
1034 * resolving list.
1035 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05301036void hci_req_add_le_passive_scan(struct hci_request *req)
1037{
Johan Hedberg0857dd32014-12-19 13:40:20 +02001038 struct hci_dev *hdev = req->hdev;
1039 u8 own_addr_type;
1040 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -07001041 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301042 /* Background scanning should run with address resolution */
1043 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001044
1045 if (hdev->scanning_paused) {
1046 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1047 return;
1048 }
Johan Hedberg0857dd32014-12-19 13:40:20 +02001049
1050 /* Set require_privacy to false since no SCAN_REQ are send
1051 * during passive scanning. Not using an non-resolvable address
1052 * here is important so that peer devices using direct
1053 * advertising with our address will be correctly reported
1054 * by the controller.
1055 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001056 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1057 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001058 return;
1059
Howard Chungc4f1f402020-11-26 12:22:21 +08001060 if (__hci_update_interleaved_scan(hdev))
1061 return;
1062
1063 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001064 /* Adding or removing entries from the white list must
1065 * happen before enabling scanning. The controller does
1066 * not allow white list modification while scanning.
1067 */
1068 filter_policy = update_white_list(req);
1069
1070 /* When the controller is using random resolvable addresses and
1071 * with that having LE privacy enabled, then controllers with
1072 * Extended Scanner Filter Policies support can now enable support
1073 * for handling directed advertising.
1074 *
1075 * So instead of using filter polices 0x00 (no whitelist)
1076 * and 0x01 (whitelist enabled) use the new filter policies
1077 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1078 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001079 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001080 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1081 filter_policy |= 0x02;
1082
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001083 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +00001084 window = hdev->le_scan_window_suspend;
1085 interval = hdev->le_scan_int_suspend;
Alain Michaud9a9373f2020-07-31 01:05:34 +00001086 } else if (hci_is_le_conn_scanning(hdev)) {
1087 window = hdev->le_scan_window_connect;
1088 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +08001089 } else if (hci_is_adv_monitoring(hdev)) {
1090 window = hdev->le_scan_window_adv_monitor;
1091 interval = hdev->le_scan_int_adv_monitor;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001092 } else {
1093 window = hdev->le_scan_window;
1094 interval = hdev->le_scan_interval;
1095 }
1096
1097 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1098 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301099 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001100}
1101
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001102static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301103{
1104 struct adv_info *adv_instance;
1105
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001106 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301107 if (instance == 0x00)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001108 return true;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301109
1110 adv_instance = hci_find_adv_instance(hdev, instance);
1111 if (!adv_instance)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001112 return false;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301113
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001114 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1115 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001116 return true;
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001117
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001118 return adv_instance->scan_rsp_len ? true : false;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301119}
1120
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001121static void hci_req_clear_event_filter(struct hci_request *req)
1122{
1123 struct hci_cp_set_event_filter f;
1124
1125 memset(&f, 0, sizeof(f));
1126 f.flt_type = HCI_FLT_CLEAR_ALL;
1127 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1128
1129 /* Update page scan state (since we may have modified it when setting
1130 * the event filter).
1131 */
1132 __hci_req_update_scan(req);
1133}
1134
1135static void hci_req_set_event_filter(struct hci_request *req)
1136{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001137 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001138 struct hci_cp_set_event_filter f;
1139 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001140 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001141
1142 /* Always clear event filter when starting */
1143 hci_req_clear_event_filter(req);
1144
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001145 list_for_each_entry(b, &hdev->whitelist, list) {
1146 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1147 b->current_flags))
1148 continue;
1149
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001150 memset(&f, 0, sizeof(f));
1151 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1152 f.flt_type = HCI_FLT_CONN_SETUP;
1153 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1154 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1155
1156 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1157 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001158 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001159 }
1160
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001161 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1162}
1163
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001164static void hci_req_config_le_suspend_scan(struct hci_request *req)
1165{
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001166 /* Before changing params disable scan if enabled */
1167 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301168 hci_req_add_le_scan_disable(req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001169
1170 /* Configure params and enable scanning */
1171 hci_req_add_le_passive_scan(req);
1172
1173 /* Block suspend notifier on response */
1174 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1175}
1176
Daniel Winkler53274472020-09-15 14:14:27 -07001177static void cancel_adv_timeout(struct hci_dev *hdev)
1178{
1179 if (hdev->adv_instance_timeout) {
1180 hdev->adv_instance_timeout = 0;
1181 cancel_delayed_work(&hdev->adv_instance_expire);
1182 }
1183}
1184
1185/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001186void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001187{
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001188 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -07001189
1190 /* Call to disable any advertisements active on the controller.
1191 * This will succeed even if no advertisements are configured.
1192 */
1193 __hci_req_disable_advertising(req);
1194
1195 /* If we are using software rotation, pause the loop */
1196 if (!ext_adv_capable(req->hdev))
1197 cancel_adv_timeout(req->hdev);
1198}
1199
1200/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001201static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001202{
1203 struct adv_info *adv;
1204
1205 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1206
1207 if (ext_adv_capable(req->hdev)) {
1208 /* Call for each tracked instance to be re-enabled */
1209 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1210 __hci_req_enable_ext_advertising(req,
1211 adv->instance);
1212 }
1213
1214 } else {
1215 /* Schedule for most recent instance to be restarted and begin
1216 * the software rotation loop
1217 */
1218 __hci_req_schedule_adv_instance(req,
1219 req->hdev->cur_adv_instance,
1220 true);
1221 }
1222}
1223
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001224/* This function requires the caller holds hdev->lock */
1225int hci_req_resume_adv_instances(struct hci_dev *hdev)
1226{
1227 struct hci_request req;
1228
1229 hci_req_init(&req, hdev);
1230 __hci_req_resume_adv_instances(&req);
1231
1232 return hci_req_run(&req, NULL);
1233}
1234
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001235static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1236{
1237 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1238 status);
1239 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1240 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1241 wake_up(&hdev->suspend_wait_q);
1242 }
1243}
1244
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001245/* Call with hci_dev_lock */
1246void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1247{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001248 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001249 struct hci_conn *conn;
1250 struct hci_request req;
1251 u8 page_scan;
1252 int disconnect_counter;
1253
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001254 if (next == hdev->suspend_state) {
1255 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1256 goto done;
1257 }
1258
1259 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001260 hci_req_init(&req, hdev);
1261
1262 if (next == BT_SUSPEND_DISCONNECT) {
1263 /* Mark device as suspended */
1264 hdev->suspended = true;
1265
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001266 /* Pause discovery if not already stopped */
1267 old_state = hdev->discovery.state;
1268 if (old_state != DISCOVERY_STOPPED) {
1269 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1270 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1271 queue_work(hdev->req_workqueue, &hdev->discov_update);
1272 }
1273
1274 hdev->discovery_paused = true;
1275 hdev->discovery_old_state = old_state;
1276
Daniel Winkler53274472020-09-15 14:14:27 -07001277 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001278 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1279 if (old_state) {
1280 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1281 cancel_delayed_work(&hdev->discov_off);
1282 queue_delayed_work(hdev->req_workqueue,
1283 &hdev->discov_off, 0);
1284 }
1285
Daniel Winkler53274472020-09-15 14:14:27 -07001286 /* Pause other advertisements */
1287 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001288 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001289
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001290 hdev->advertising_paused = true;
1291 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001292 /* Disable page scan */
1293 page_scan = SCAN_DISABLED;
1294 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1295
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001296 /* Disable LE passive scan if enabled */
1297 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301298 hci_req_add_le_scan_disable(&req, false);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001299
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001300 /* Mark task needing completion */
1301 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1302
1303 /* Prevent disconnects from causing scanning to be re-enabled */
1304 hdev->scanning_paused = true;
1305
1306 /* Run commands before disconnecting */
1307 hci_req_run(&req, suspend_req_complete);
1308
1309 disconnect_counter = 0;
1310 /* Soft disconnect everything (power off) */
1311 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1312 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1313 disconnect_counter++;
1314 }
1315
1316 if (disconnect_counter > 0) {
1317 bt_dev_dbg(hdev,
1318 "Had %d disconnects. Will wait on them",
1319 disconnect_counter);
1320 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1321 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001322 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001323 /* Unpause to take care of updating scanning params */
1324 hdev->scanning_paused = false;
1325 /* Enable event filter for paired devices */
1326 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001327 /* Enable passive scan at lower duty cycle */
1328 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001329 /* Pause scan changes again. */
1330 hdev->scanning_paused = true;
1331 hci_req_run(&req, suspend_req_complete);
1332 } else {
1333 hdev->suspended = false;
1334 hdev->scanning_paused = false;
1335
1336 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001337 /* Reset passive/background scanning to normal */
1338 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001339
Daniel Winkler53274472020-09-15 14:14:27 -07001340 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001341 hdev->advertising_paused = false;
1342 if (hdev->advertising_old_state) {
1343 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1344 hdev->suspend_tasks);
1345 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1346 queue_work(hdev->req_workqueue,
1347 &hdev->discoverable_update);
1348 hdev->advertising_old_state = 0;
1349 }
1350
Daniel Winkler53274472020-09-15 14:14:27 -07001351 /* Resume other advertisements */
1352 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001353 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001354
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001355 /* Unpause discovery */
1356 hdev->discovery_paused = false;
1357 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1358 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1359 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1360 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1361 queue_work(hdev->req_workqueue, &hdev->discov_update);
1362 }
1363
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001364 hci_req_run(&req, suspend_req_complete);
1365 }
1366
1367 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001368
1369done:
1370 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1371 wake_up(&hdev->suspend_wait_q);
1372}
1373
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001374static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +02001375{
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001376 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001377}
1378
1379void __hci_req_disable_advertising(struct hci_request *req)
1380{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301381 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001382 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001383
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301384 } else {
1385 u8 enable = 0x00;
1386
1387 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1388 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001389}
1390
1391static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1392{
1393 u32 flags;
1394 struct adv_info *adv_instance;
1395
1396 if (instance == 0x00) {
1397 /* Instance 0 always manages the "Tx Power" and "Flags"
1398 * fields
1399 */
1400 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1401
1402 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1403 * corresponds to the "connectable" instance flag.
1404 */
1405 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1406 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1407
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001408 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1409 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1410 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001411 flags |= MGMT_ADV_FLAG_DISCOV;
1412
Johan Hedbergf2252572015-11-18 12:49:20 +02001413 return flags;
1414 }
1415
1416 adv_instance = hci_find_adv_instance(hdev, instance);
1417
1418 /* Return 0 when we got an invalid instance identifier. */
1419 if (!adv_instance)
1420 return 0;
1421
1422 return adv_instance->flags;
1423}
1424
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001425static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1426{
1427 /* If privacy is not enabled don't use RPA */
1428 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1429 return false;
1430
1431 /* If basic privacy mode is enabled use RPA */
1432 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1433 return true;
1434
1435 /* If limited privacy mode is enabled don't use RPA if we're
1436 * both discoverable and bondable.
1437 */
1438 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1439 hci_dev_test_flag(hdev, HCI_BONDABLE))
1440 return false;
1441
1442 /* We're neither bondable nor discoverable in the limited
1443 * privacy mode, therefore use RPA.
1444 */
1445 return true;
1446}
1447
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001448static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1449{
1450 /* If there is no connection we are OK to advertise. */
1451 if (hci_conn_num(hdev, LE_LINK) == 0)
1452 return true;
1453
1454 /* Check le_states if there is any connection in slave role. */
1455 if (hdev->conn_hash.le_num_slave > 0) {
1456 /* Slave connection state and non connectable mode bit 20. */
1457 if (!connectable && !(hdev->le_states[2] & 0x10))
1458 return false;
1459
1460 /* Slave connection state and connectable mode bit 38
1461 * and scannable bit 21.
1462 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001463 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1464 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001465 return false;
1466 }
1467
1468 /* Check le_states if there is any connection in master role. */
1469 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1470 /* Master connection state and non connectable mode bit 18. */
1471 if (!connectable && !(hdev->le_states[2] & 0x02))
1472 return false;
1473
1474 /* Master connection state and connectable mode bit 35 and
1475 * scannable 19.
1476 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001477 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001478 !(hdev->le_states[2] & 0x08)))
1479 return false;
1480 }
1481
1482 return true;
1483}
1484
Johan Hedbergf2252572015-11-18 12:49:20 +02001485void __hci_req_enable_advertising(struct hci_request *req)
1486{
1487 struct hci_dev *hdev = req->hdev;
1488 struct hci_cp_le_set_adv_param cp;
1489 u8 own_addr_type, enable = 0x01;
1490 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301491 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001492 u32 flags;
1493
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001494 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1495
1496 /* If the "connectable" instance flag was not set, then choose between
1497 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1498 */
1499 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1500 mgmt_get_connectable(hdev);
1501
1502 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001503 return;
1504
1505 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1506 __hci_req_disable_advertising(req);
1507
1508 /* Clear the HCI_LE_ADV bit temporarily so that the
1509 * hci_update_random_address knows that it's safe to go ahead
1510 * and write a new random address. The flag will be set back on
1511 * as soon as the SET_ADV_ENABLE HCI command completes.
1512 */
1513 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1514
Johan Hedbergf2252572015-11-18 12:49:20 +02001515 /* Set require_privacy to true only when non-connectable
1516 * advertising is used. In that case it is fine to use a
1517 * non-resolvable private address.
1518 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001519 if (hci_update_random_address(req, !connectable,
1520 adv_use_rpa(hdev, flags),
1521 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001522 return;
1523
1524 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001525
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301526 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001527 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001528
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301529 adv_min_interval = hdev->le_adv_min_interval;
1530 adv_max_interval = hdev->le_adv_max_interval;
1531 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001532 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301533 cp.type = LE_ADV_SCAN_IND;
1534 else
1535 cp.type = LE_ADV_NONCONN_IND;
1536
1537 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1538 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1539 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1540 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1541 } else {
1542 adv_min_interval = hdev->le_adv_min_interval;
1543 adv_max_interval = hdev->le_adv_max_interval;
1544 }
1545 }
1546
1547 cp.min_interval = cpu_to_le16(adv_min_interval);
1548 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001549 cp.own_address_type = own_addr_type;
1550 cp.channel_map = hdev->le_adv_channel_map;
1551
1552 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1553
1554 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1555}
1556
Michał Narajowskif61851f2016-10-19 10:20:27 +02001557u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001558{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001559 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001560 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001561
Michał Narajowskif61851f2016-10-19 10:20:27 +02001562 /* no space left for name (+ NULL + type + len) */
1563 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1564 return ad_len;
1565
1566 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001567 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001568 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001569 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001570 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001571
Michał Narajowskif61851f2016-10-19 10:20:27 +02001572 /* use short name if present */
1573 short_len = strlen(hdev->short_name);
1574 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001575 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001576 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001577
Michał Narajowskif61851f2016-10-19 10:20:27 +02001578 /* use shortened full name if present, we already know that name
1579 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1580 */
1581 if (complete_len) {
1582 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1583
1584 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1585 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1586
1587 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1588 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001589 }
1590
1591 return ad_len;
1592}
1593
Michał Narajowski1b422062016-10-05 12:28:27 +02001594static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1595{
1596 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1597}
1598
Michał Narajowski7c295c42016-09-18 12:50:02 +02001599static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1600{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001601 u8 scan_rsp_len = 0;
1602
1603 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001604 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001605 }
1606
Michał Narajowski1b422062016-10-05 12:28:27 +02001607 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001608}
1609
Johan Hedbergf2252572015-11-18 12:49:20 +02001610static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1611 u8 *ptr)
1612{
1613 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001614 u32 instance_flags;
1615 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001616
1617 adv_instance = hci_find_adv_instance(hdev, instance);
1618 if (!adv_instance)
1619 return 0;
1620
Michał Narajowski7c295c42016-09-18 12:50:02 +02001621 instance_flags = adv_instance->flags;
1622
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001623 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001624 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001625 }
1626
Michał Narajowski1b422062016-10-05 12:28:27 +02001627 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001628 adv_instance->scan_rsp_len);
1629
Michał Narajowski7c295c42016-09-18 12:50:02 +02001630 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001631
1632 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1633 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1634
1635 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001636}
1637
Johan Hedbergcab054a2015-11-30 11:21:45 +02001638void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001639{
1640 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001641 u8 len;
1642
1643 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1644 return;
1645
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301646 if (ext_adv_capable(hdev)) {
1647 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001648
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301649 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001650
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001651 if (instance)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301652 len = create_instance_scan_rsp_data(hdev, instance,
1653 cp.data);
1654 else
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001655 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001656
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301657 if (hdev->scan_rsp_data_len == len &&
1658 !memcmp(cp.data, hdev->scan_rsp_data, len))
1659 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001660
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301661 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1662 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001663
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001664 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301665 cp.length = len;
1666 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1667 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1668
1669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1670 &cp);
1671 } else {
1672 struct hci_cp_le_set_scan_rsp_data cp;
1673
1674 memset(&cp, 0, sizeof(cp));
1675
1676 if (instance)
1677 len = create_instance_scan_rsp_data(hdev, instance,
1678 cp.data);
1679 else
1680 len = create_default_scan_rsp_data(hdev, cp.data);
1681
1682 if (hdev->scan_rsp_data_len == len &&
1683 !memcmp(cp.data, hdev->scan_rsp_data, len))
1684 return;
1685
1686 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1687 hdev->scan_rsp_data_len = len;
1688
1689 cp.length = len;
1690
1691 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1692 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001693}
1694
Johan Hedbergf2252572015-11-18 12:49:20 +02001695static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1696{
1697 struct adv_info *adv_instance = NULL;
1698 u8 ad_len = 0, flags = 0;
1699 u32 instance_flags;
1700
1701 /* Return 0 when the current instance identifier is invalid. */
1702 if (instance) {
1703 adv_instance = hci_find_adv_instance(hdev, instance);
1704 if (!adv_instance)
1705 return 0;
1706 }
1707
1708 instance_flags = get_adv_instance_flags(hdev, instance);
1709
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001710 /* If instance already has the flags set skip adding it once
1711 * again.
1712 */
1713 if (adv_instance && eir_get_data(adv_instance->adv_data,
1714 adv_instance->adv_data_len, EIR_FLAGS,
1715 NULL))
1716 goto skip_flags;
1717
Johan Hedbergf2252572015-11-18 12:49:20 +02001718 /* The Add Advertising command allows userspace to set both the general
1719 * and limited discoverable flags.
1720 */
1721 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1722 flags |= LE_AD_GENERAL;
1723
1724 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1725 flags |= LE_AD_LIMITED;
1726
Johan Hedbergf18ba582016-04-06 13:09:05 +03001727 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1728 flags |= LE_AD_NO_BREDR;
1729
Johan Hedbergf2252572015-11-18 12:49:20 +02001730 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1731 /* If a discovery flag wasn't provided, simply use the global
1732 * settings.
1733 */
1734 if (!flags)
1735 flags |= mgmt_get_adv_discov_flags(hdev);
1736
Johan Hedbergf2252572015-11-18 12:49:20 +02001737 /* If flags would still be empty, then there is no need to
1738 * include the "Flags" AD field".
1739 */
1740 if (flags) {
1741 ptr[0] = 0x02;
1742 ptr[1] = EIR_FLAGS;
1743 ptr[2] = flags;
1744
1745 ad_len += 3;
1746 ptr += 3;
1747 }
1748 }
1749
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001750skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001751 if (adv_instance) {
1752 memcpy(ptr, adv_instance->adv_data,
1753 adv_instance->adv_data_len);
1754 ad_len += adv_instance->adv_data_len;
1755 ptr += adv_instance->adv_data_len;
1756 }
1757
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301758 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1759 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001760
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301761 if (ext_adv_capable(hdev)) {
1762 if (adv_instance)
1763 adv_tx_power = adv_instance->tx_power;
1764 else
1765 adv_tx_power = hdev->adv_tx_power;
1766 } else {
1767 adv_tx_power = hdev->adv_tx_power;
1768 }
1769
1770 /* Provide Tx Power only if we can provide a valid value for it */
1771 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1772 ptr[0] = 0x02;
1773 ptr[1] = EIR_TX_POWER;
1774 ptr[2] = (u8)adv_tx_power;
1775
1776 ad_len += 3;
1777 ptr += 3;
1778 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001779 }
1780
1781 return ad_len;
1782}
1783
Johan Hedbergcab054a2015-11-30 11:21:45 +02001784void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001785{
1786 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001787 u8 len;
1788
1789 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1790 return;
1791
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301792 if (ext_adv_capable(hdev)) {
1793 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001794
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301795 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001796
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301797 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001798
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301799 /* There's nothing to do if the data hasn't changed */
1800 if (hdev->adv_data_len == len &&
1801 memcmp(cp.data, hdev->adv_data, len) == 0)
1802 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001803
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301804 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1805 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001806
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301807 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001808 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301809 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1810 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1811
1812 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1813 } else {
1814 struct hci_cp_le_set_adv_data cp;
1815
1816 memset(&cp, 0, sizeof(cp));
1817
1818 len = create_instance_adv_data(hdev, instance, cp.data);
1819
1820 /* There's nothing to do if the data hasn't changed */
1821 if (hdev->adv_data_len == len &&
1822 memcmp(cp.data, hdev->adv_data, len) == 0)
1823 return;
1824
1825 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1826 hdev->adv_data_len = len;
1827
1828 cp.length = len;
1829
1830 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1831 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001832}
1833
Johan Hedbergcab054a2015-11-30 11:21:45 +02001834int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001835{
1836 struct hci_request req;
1837
1838 hci_req_init(&req, hdev);
1839 __hci_req_update_adv_data(&req, instance);
1840
1841 return hci_req_run(&req, NULL);
1842}
1843
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301844static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1845 u16 opcode)
1846{
1847 BT_DBG("%s status %u", hdev->name, status);
1848}
1849
1850void hci_req_disable_address_resolution(struct hci_dev *hdev)
1851{
1852 struct hci_request req;
1853 __u8 enable = 0x00;
1854
1855 if (!use_ll_privacy(hdev) &&
1856 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1857 return;
1858
1859 hci_req_init(&req, hdev);
1860
1861 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1862
1863 hci_req_run(&req, enable_addr_resolution_complete);
1864}
1865
Johan Hedbergf2252572015-11-18 12:49:20 +02001866static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1867{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001868 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001869}
1870
1871void hci_req_reenable_advertising(struct hci_dev *hdev)
1872{
1873 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001874
1875 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001876 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001877 return;
1878
Johan Hedbergf2252572015-11-18 12:49:20 +02001879 hci_req_init(&req, hdev);
1880
Johan Hedbergcab054a2015-11-30 11:21:45 +02001881 if (hdev->cur_adv_instance) {
1882 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1883 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001884 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301885 if (ext_adv_capable(hdev)) {
1886 __hci_req_start_ext_adv(&req, 0x00);
1887 } else {
1888 __hci_req_update_adv_data(&req, 0x00);
1889 __hci_req_update_scan_rsp_data(&req, 0x00);
1890 __hci_req_enable_advertising(&req);
1891 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001892 }
1893
1894 hci_req_run(&req, adv_enable_complete);
1895}
1896
1897static void adv_timeout_expire(struct work_struct *work)
1898{
1899 struct hci_dev *hdev = container_of(work, struct hci_dev,
1900 adv_instance_expire.work);
1901
1902 struct hci_request req;
1903 u8 instance;
1904
Howard Chung22fbcfc2020-11-11 15:02:19 +08001905 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001906
1907 hci_dev_lock(hdev);
1908
1909 hdev->adv_instance_timeout = 0;
1910
Johan Hedbergcab054a2015-11-30 11:21:45 +02001911 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001912 if (instance == 0x00)
1913 goto unlock;
1914
1915 hci_req_init(&req, hdev);
1916
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001917 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001918
1919 if (list_empty(&hdev->adv_instances))
1920 __hci_req_disable_advertising(&req);
1921
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001922 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001923
1924unlock:
1925 hci_dev_unlock(hdev);
1926}
1927
Howard Chungc4f1f402020-11-26 12:22:21 +08001928static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1929 unsigned long opt)
1930{
1931 struct hci_dev *hdev = req->hdev;
1932 int ret = 0;
1933
1934 hci_dev_lock(hdev);
1935
1936 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1937 hci_req_add_le_scan_disable(req, false);
1938 hci_req_add_le_passive_scan(req);
1939
1940 switch (hdev->interleave_scan_state) {
1941 case INTERLEAVE_SCAN_ALLOWLIST:
1942 bt_dev_dbg(hdev, "next state: allowlist");
1943 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1944 break;
1945 case INTERLEAVE_SCAN_NO_FILTER:
1946 bt_dev_dbg(hdev, "next state: no filter");
1947 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1948 break;
1949 case INTERLEAVE_SCAN_NONE:
1950 BT_ERR("unexpected error");
1951 ret = -1;
1952 }
1953
1954 hci_dev_unlock(hdev);
1955
1956 return ret;
1957}
1958
1959static void interleave_scan_work(struct work_struct *work)
1960{
1961 struct hci_dev *hdev = container_of(work, struct hci_dev,
1962 interleave_scan.work);
1963 u8 status;
1964 unsigned long timeout;
1965
1966 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1967 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1968 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1969 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1970 } else {
1971 bt_dev_err(hdev, "unexpected error");
1972 return;
1973 }
1974
1975 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1976 HCI_CMD_TIMEOUT, &status);
1977
1978 /* Don't continue interleaving if it was canceled */
1979 if (is_interleave_scanning(hdev))
1980 queue_delayed_work(hdev->req_workqueue,
1981 &hdev->interleave_scan, timeout);
1982}
1983
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301984int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1985 bool use_rpa, struct adv_info *adv_instance,
1986 u8 *own_addr_type, bdaddr_t *rand_addr)
1987{
1988 int err;
1989
1990 bacpy(rand_addr, BDADDR_ANY);
1991
1992 /* If privacy is enabled use a resolvable private address. If
1993 * current RPA has expired then generate a new one.
1994 */
1995 if (use_rpa) {
1996 int to;
1997
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301998 /* If Controller supports LL Privacy use own address type is
1999 * 0x03
2000 */
2001 if (use_ll_privacy(hdev))
2002 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2003 else
2004 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302005
2006 if (adv_instance) {
2007 if (!adv_instance->rpa_expired &&
2008 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2009 return 0;
2010
2011 adv_instance->rpa_expired = false;
2012 } else {
2013 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2014 !bacmp(&hdev->random_addr, &hdev->rpa))
2015 return 0;
2016 }
2017
2018 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2019 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01002020 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302021 return err;
2022 }
2023
2024 bacpy(rand_addr, &hdev->rpa);
2025
2026 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2027 if (adv_instance)
2028 queue_delayed_work(hdev->workqueue,
2029 &adv_instance->rpa_expired_cb, to);
2030 else
2031 queue_delayed_work(hdev->workqueue,
2032 &hdev->rpa_expired, to);
2033
2034 return 0;
2035 }
2036
2037 /* In case of required privacy without resolvable private address,
2038 * use an non-resolvable private address. This is useful for
2039 * non-connectable advertising.
2040 */
2041 if (require_privacy) {
2042 bdaddr_t nrpa;
2043
2044 while (true) {
2045 /* The non-resolvable private address is generated
2046 * from random six bytes with the two most significant
2047 * bits cleared.
2048 */
2049 get_random_bytes(&nrpa, 6);
2050 nrpa.b[5] &= 0x3f;
2051
2052 /* The non-resolvable private address shall not be
2053 * equal to the public address.
2054 */
2055 if (bacmp(&hdev->bdaddr, &nrpa))
2056 break;
2057 }
2058
2059 *own_addr_type = ADDR_LE_DEV_RANDOM;
2060 bacpy(rand_addr, &nrpa);
2061
2062 return 0;
2063 }
2064
2065 /* No privacy so use a public address. */
2066 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2067
2068 return 0;
2069}
2070
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302071void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2072{
2073 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2074}
2075
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302076int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302077{
2078 struct hci_cp_le_set_ext_adv_params cp;
2079 struct hci_dev *hdev = req->hdev;
2080 bool connectable;
2081 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302082 bdaddr_t random_addr;
2083 u8 own_addr_type;
2084 int err;
2085 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302086 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302087
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302088 if (instance > 0) {
2089 adv_instance = hci_find_adv_instance(hdev, instance);
2090 if (!adv_instance)
2091 return -EINVAL;
2092 } else {
2093 adv_instance = NULL;
2094 }
2095
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302096 flags = get_adv_instance_flags(hdev, instance);
2097
2098 /* If the "connectable" instance flag was not set, then choose between
2099 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2100 */
2101 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2102 mgmt_get_connectable(hdev);
2103
Colin Ian King75edd1f2018-11-09 13:27:36 +00002104 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302105 return -EPERM;
2106
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302107 /* Set require_privacy to true only when non-connectable
2108 * advertising is used. In that case it is fine to use a
2109 * non-resolvable private address.
2110 */
2111 err = hci_get_random_address(hdev, !connectable,
2112 adv_use_rpa(hdev, flags), adv_instance,
2113 &own_addr_type, &random_addr);
2114 if (err < 0)
2115 return err;
2116
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302117 memset(&cp, 0, sizeof(cp));
2118
Alain Michaud5cbd3eb2020-06-22 13:30:28 +00002119 /* In ext adv set param interval is 3 octets */
2120 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2121 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302122
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302123 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2124
2125 if (connectable) {
2126 if (secondary_adv)
2127 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2128 else
2129 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08002130 } else if (adv_instance_is_scannable(hdev, instance)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302131 if (secondary_adv)
2132 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2133 else
2134 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2135 } else {
2136 if (secondary_adv)
2137 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2138 else
2139 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2140 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302141
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302142 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302143 cp.channel_map = hdev->le_adv_channel_map;
2144 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002145 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302146
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302147 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2148 cp.primary_phy = HCI_ADV_PHY_1M;
2149 cp.secondary_phy = HCI_ADV_PHY_2M;
2150 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2151 cp.primary_phy = HCI_ADV_PHY_CODED;
2152 cp.secondary_phy = HCI_ADV_PHY_CODED;
2153 } else {
2154 /* In all other cases use 1M */
2155 cp.primary_phy = HCI_ADV_PHY_1M;
2156 cp.secondary_phy = HCI_ADV_PHY_1M;
2157 }
2158
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302159 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2160
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302161 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2162 bacmp(&random_addr, BDADDR_ANY)) {
2163 struct hci_cp_le_set_adv_set_rand_addr cp;
2164
2165 /* Check if random address need to be updated */
2166 if (adv_instance) {
2167 if (!bacmp(&random_addr, &adv_instance->random_addr))
2168 return 0;
2169 } else {
2170 if (!bacmp(&random_addr, &hdev->random_addr))
2171 return 0;
2172 }
2173
2174 memset(&cp, 0, sizeof(cp));
2175
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07002176 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302177 bacpy(&cp.bdaddr, &random_addr);
2178
2179 hci_req_add(req,
2180 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2181 sizeof(cp), &cp);
2182 }
2183
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302184 return 0;
2185}
2186
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002187int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302188{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002189 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302190 struct hci_cp_le_set_ext_adv_enable *cp;
2191 struct hci_cp_ext_adv_set *adv_set;
2192 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002193 struct adv_info *adv_instance;
2194
2195 if (instance > 0) {
2196 adv_instance = hci_find_adv_instance(hdev, instance);
2197 if (!adv_instance)
2198 return -EINVAL;
2199 } else {
2200 adv_instance = NULL;
2201 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302202
2203 cp = (void *) data;
2204 adv_set = (void *) cp->data;
2205
2206 memset(cp, 0, sizeof(*cp));
2207
2208 cp->enable = 0x01;
2209 cp->num_of_sets = 0x01;
2210
2211 memset(adv_set, 0, sizeof(*adv_set));
2212
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002213 adv_set->handle = instance;
2214
2215 /* Set duration per instance since controller is responsible for
2216 * scheduling it.
2217 */
2218 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03002219 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002220
2221 /* Time = N * 10 ms */
2222 adv_set->duration = cpu_to_le16(duration / 10);
2223 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302224
2225 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2226 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2227 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002228
2229 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302230}
2231
Daniel Winkler37adf702020-07-14 14:16:00 -07002232int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2233{
2234 struct hci_dev *hdev = req->hdev;
2235 struct hci_cp_le_set_ext_adv_enable *cp;
2236 struct hci_cp_ext_adv_set *adv_set;
2237 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2238 u8 req_size;
2239
2240 /* If request specifies an instance that doesn't exist, fail */
2241 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2242 return -EINVAL;
2243
2244 memset(data, 0, sizeof(data));
2245
2246 cp = (void *)data;
2247 adv_set = (void *)cp->data;
2248
2249 /* Instance 0x00 indicates all advertising instances will be disabled */
2250 cp->num_of_sets = !!instance;
2251 cp->enable = 0x00;
2252
2253 adv_set->handle = instance;
2254
2255 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2256 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2257
2258 return 0;
2259}
2260
2261int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2262{
2263 struct hci_dev *hdev = req->hdev;
2264
2265 /* If request specifies an instance that doesn't exist, fail */
2266 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2267 return -EINVAL;
2268
2269 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2270
2271 return 0;
2272}
2273
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302274int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2275{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302276 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002277 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302278 int err;
2279
Daniel Winkler37adf702020-07-14 14:16:00 -07002280 /* If instance isn't pending, the chip knows about it, and it's safe to
2281 * disable
2282 */
2283 if (adv_instance && !adv_instance->pending)
2284 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302285
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302286 err = __hci_req_setup_ext_adv_instance(req, instance);
2287 if (err < 0)
2288 return err;
2289
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302290 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002291 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302292
2293 return 0;
2294}
2295
Johan Hedbergf2252572015-11-18 12:49:20 +02002296int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2297 bool force)
2298{
2299 struct hci_dev *hdev = req->hdev;
2300 struct adv_info *adv_instance = NULL;
2301 u16 timeout;
2302
2303 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002304 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002305 return -EPERM;
2306
2307 if (hdev->adv_instance_timeout)
2308 return -EBUSY;
2309
2310 adv_instance = hci_find_adv_instance(hdev, instance);
2311 if (!adv_instance)
2312 return -ENOENT;
2313
2314 /* A zero timeout means unlimited advertising. As long as there is
2315 * only one instance, duration should be ignored. We still set a timeout
2316 * in case further instances are being added later on.
2317 *
2318 * If the remaining lifetime of the instance is more than the duration
2319 * then the timeout corresponds to the duration, otherwise it will be
2320 * reduced to the remaining instance lifetime.
2321 */
2322 if (adv_instance->timeout == 0 ||
2323 adv_instance->duration <= adv_instance->remaining_time)
2324 timeout = adv_instance->duration;
2325 else
2326 timeout = adv_instance->remaining_time;
2327
2328 /* The remaining time is being reduced unless the instance is being
2329 * advertised without time limit.
2330 */
2331 if (adv_instance->timeout)
2332 adv_instance->remaining_time =
2333 adv_instance->remaining_time - timeout;
2334
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002335 /* Only use work for scheduling instances with legacy advertising */
2336 if (!ext_adv_capable(hdev)) {
2337 hdev->adv_instance_timeout = timeout;
2338 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002339 &hdev->adv_instance_expire,
2340 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002341 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002342
2343 /* If we're just re-scheduling the same instance again then do not
2344 * execute any HCI commands. This happens when a single instance is
2345 * being advertised.
2346 */
2347 if (!force && hdev->cur_adv_instance == instance &&
2348 hci_dev_test_flag(hdev, HCI_LE_ADV))
2349 return 0;
2350
2351 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302352 if (ext_adv_capable(hdev)) {
2353 __hci_req_start_ext_adv(req, instance);
2354 } else {
2355 __hci_req_update_adv_data(req, instance);
2356 __hci_req_update_scan_rsp_data(req, instance);
2357 __hci_req_enable_advertising(req);
2358 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002359
2360 return 0;
2361}
2362
Johan Hedbergf2252572015-11-18 12:49:20 +02002363/* For a single instance:
2364 * - force == true: The instance will be removed even when its remaining
2365 * lifetime is not zero.
2366 * - force == false: the instance will be deactivated but kept stored unless
2367 * the remaining lifetime is zero.
2368 *
2369 * For instance == 0x00:
2370 * - force == true: All instances will be removed regardless of their timeout
2371 * setting.
2372 * - force == false: Only instances that have a timeout will be removed.
2373 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002374void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2375 struct hci_request *req, u8 instance,
2376 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002377{
2378 struct adv_info *adv_instance, *n, *next_instance = NULL;
2379 int err;
2380 u8 rem_inst;
2381
2382 /* Cancel any timeout concerning the removed instance(s). */
2383 if (!instance || hdev->cur_adv_instance == instance)
2384 cancel_adv_timeout(hdev);
2385
2386 /* Get the next instance to advertise BEFORE we remove
2387 * the current one. This can be the same instance again
2388 * if there is only one instance.
2389 */
2390 if (instance && hdev->cur_adv_instance == instance)
2391 next_instance = hci_get_next_instance(hdev, instance);
2392
2393 if (instance == 0x00) {
2394 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2395 list) {
2396 if (!(force || adv_instance->timeout))
2397 continue;
2398
2399 rem_inst = adv_instance->instance;
2400 err = hci_remove_adv_instance(hdev, rem_inst);
2401 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002402 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002403 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002404 } else {
2405 adv_instance = hci_find_adv_instance(hdev, instance);
2406
2407 if (force || (adv_instance && adv_instance->timeout &&
2408 !adv_instance->remaining_time)) {
2409 /* Don't advertise a removed instance. */
2410 if (next_instance &&
2411 next_instance->instance == instance)
2412 next_instance = NULL;
2413
2414 err = hci_remove_adv_instance(hdev, instance);
2415 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002416 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002417 }
2418 }
2419
Johan Hedbergf2252572015-11-18 12:49:20 +02002420 if (!req || !hdev_is_powered(hdev) ||
2421 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2422 return;
2423
Daniel Winkler37adf702020-07-14 14:16:00 -07002424 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002425 __hci_req_schedule_adv_instance(req, next_instance->instance,
2426 false);
2427}
2428
Johan Hedberg0857dd32014-12-19 13:40:20 +02002429static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2430{
2431 struct hci_dev *hdev = req->hdev;
2432
2433 /* If we're advertising or initiating an LE connection we can't
2434 * go ahead and change the random address at this time. This is
2435 * because the eventual initiator address used for the
2436 * subsequently created connection will be undefined (some
2437 * controllers use the new address and others the one we had
2438 * when the operation started).
2439 *
2440 * In this kind of scenario skip the update and let the random
2441 * address be updated at the next cycle.
2442 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002443 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002444 hci_lookup_le_connect(hdev)) {
Howard Chung22fbcfc2020-11-11 15:02:19 +08002445 bt_dev_dbg(hdev, "Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002446 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002447 return;
2448 }
2449
2450 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2451}
2452
2453int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002454 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002455{
2456 struct hci_dev *hdev = req->hdev;
2457 int err;
2458
2459 /* If privacy is enabled use a resolvable private address. If
2460 * current RPA has expired or there is something else than
2461 * the current RPA in use, then generate a new one.
2462 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002463 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002464 int to;
2465
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302466 /* If Controller supports LL Privacy use own address type is
2467 * 0x03
2468 */
2469 if (use_ll_privacy(hdev))
2470 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2471 else
2472 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002473
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002474 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002475 !bacmp(&hdev->random_addr, &hdev->rpa))
2476 return 0;
2477
2478 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2479 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002480 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002481 return err;
2482 }
2483
2484 set_random_addr(req, &hdev->rpa);
2485
2486 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2487 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2488
2489 return 0;
2490 }
2491
2492 /* In case of required privacy without resolvable private address,
2493 * use an non-resolvable private address. This is useful for active
2494 * scanning and non-connectable advertising.
2495 */
2496 if (require_privacy) {
2497 bdaddr_t nrpa;
2498
2499 while (true) {
2500 /* The non-resolvable private address is generated
2501 * from random six bytes with the two most significant
2502 * bits cleared.
2503 */
2504 get_random_bytes(&nrpa, 6);
2505 nrpa.b[5] &= 0x3f;
2506
2507 /* The non-resolvable private address shall not be
2508 * equal to the public address.
2509 */
2510 if (bacmp(&hdev->bdaddr, &nrpa))
2511 break;
2512 }
2513
2514 *own_addr_type = ADDR_LE_DEV_RANDOM;
2515 set_random_addr(req, &nrpa);
2516 return 0;
2517 }
2518
2519 /* If forcing static address is in use or there is no public
2520 * address use the static address as random address (but skip
2521 * the HCI command if the current random address is already the
2522 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002523 *
2524 * In case BR/EDR has been disabled on a dual-mode controller
2525 * and a static address has been configured, then use that
2526 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002527 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002528 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002529 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002530 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002531 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002532 *own_addr_type = ADDR_LE_DEV_RANDOM;
2533 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2534 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2535 &hdev->static_addr);
2536 return 0;
2537 }
2538
2539 /* Neither privacy nor static address is being used so use a
2540 * public address.
2541 */
2542 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2543
2544 return 0;
2545}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002546
Johan Hedberg405a2612014-12-19 23:18:22 +02002547static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2548{
2549 struct bdaddr_list *b;
2550
2551 list_for_each_entry(b, &hdev->whitelist, list) {
2552 struct hci_conn *conn;
2553
2554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2555 if (!conn)
2556 return true;
2557
2558 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2559 return true;
2560 }
2561
2562 return false;
2563}
2564
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002565void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002566{
2567 struct hci_dev *hdev = req->hdev;
2568 u8 scan;
2569
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002570 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002571 return;
2572
2573 if (!hdev_is_powered(hdev))
2574 return;
2575
2576 if (mgmt_powering_down(hdev))
2577 return;
2578
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002579 if (hdev->scanning_paused)
2580 return;
2581
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002582 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002583 disconnected_whitelist_entries(hdev))
2584 scan = SCAN_PAGE;
2585 else
2586 scan = SCAN_DISABLED;
2587
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002588 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002589 scan |= SCAN_INQUIRY;
2590
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002591 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2592 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2593 return;
2594
Johan Hedberg405a2612014-12-19 23:18:22 +02002595 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2596}
2597
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002598static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002599{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002600 hci_dev_lock(req->hdev);
2601 __hci_req_update_scan(req);
2602 hci_dev_unlock(req->hdev);
2603 return 0;
2604}
Johan Hedberg405a2612014-12-19 23:18:22 +02002605
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002606static void scan_update_work(struct work_struct *work)
2607{
2608 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2609
2610 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002611}
2612
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002613static int connectable_update(struct hci_request *req, unsigned long opt)
2614{
2615 struct hci_dev *hdev = req->hdev;
2616
2617 hci_dev_lock(hdev);
2618
2619 __hci_req_update_scan(req);
2620
2621 /* If BR/EDR is not enabled and we disable advertising as a
2622 * by-product of disabling connectable, we need to update the
2623 * advertising flags.
2624 */
2625 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002626 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002627
2628 /* Update the advertising parameters if necessary */
2629 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302630 !list_empty(&hdev->adv_instances)) {
2631 if (ext_adv_capable(hdev))
2632 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2633 else
2634 __hci_req_enable_advertising(req);
2635 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002636
2637 __hci_update_background_scan(req);
2638
2639 hci_dev_unlock(hdev);
2640
2641 return 0;
2642}
2643
2644static void connectable_update_work(struct work_struct *work)
2645{
2646 struct hci_dev *hdev = container_of(work, struct hci_dev,
2647 connectable_update);
2648 u8 status;
2649
2650 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2651 mgmt_set_connectable_complete(hdev, status);
2652}
2653
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002654static u8 get_service_classes(struct hci_dev *hdev)
2655{
2656 struct bt_uuid *uuid;
2657 u8 val = 0;
2658
2659 list_for_each_entry(uuid, &hdev->uuids, list)
2660 val |= uuid->svc_hint;
2661
2662 return val;
2663}
2664
2665void __hci_req_update_class(struct hci_request *req)
2666{
2667 struct hci_dev *hdev = req->hdev;
2668 u8 cod[3];
2669
Howard Chung22fbcfc2020-11-11 15:02:19 +08002670 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002671
2672 if (!hdev_is_powered(hdev))
2673 return;
2674
2675 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2676 return;
2677
2678 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2679 return;
2680
2681 cod[0] = hdev->minor_class;
2682 cod[1] = hdev->major_class;
2683 cod[2] = get_service_classes(hdev);
2684
2685 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2686 cod[1] |= 0x20;
2687
2688 if (memcmp(cod, hdev->dev_class, 3) == 0)
2689 return;
2690
2691 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2692}
2693
Johan Hedbergaed1a882015-11-22 17:24:44 +03002694static void write_iac(struct hci_request *req)
2695{
2696 struct hci_dev *hdev = req->hdev;
2697 struct hci_cp_write_current_iac_lap cp;
2698
2699 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2700 return;
2701
2702 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2703 /* Limited discoverable mode */
2704 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2705 cp.iac_lap[0] = 0x00; /* LIAC */
2706 cp.iac_lap[1] = 0x8b;
2707 cp.iac_lap[2] = 0x9e;
2708 cp.iac_lap[3] = 0x33; /* GIAC */
2709 cp.iac_lap[4] = 0x8b;
2710 cp.iac_lap[5] = 0x9e;
2711 } else {
2712 /* General discoverable mode */
2713 cp.num_iac = 1;
2714 cp.iac_lap[0] = 0x33; /* GIAC */
2715 cp.iac_lap[1] = 0x8b;
2716 cp.iac_lap[2] = 0x9e;
2717 }
2718
2719 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2720 (cp.num_iac * 3) + 1, &cp);
2721}
2722
2723static int discoverable_update(struct hci_request *req, unsigned long opt)
2724{
2725 struct hci_dev *hdev = req->hdev;
2726
2727 hci_dev_lock(hdev);
2728
2729 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2730 write_iac(req);
2731 __hci_req_update_scan(req);
2732 __hci_req_update_class(req);
2733 }
2734
2735 /* Advertising instances don't use the global discoverable setting, so
2736 * only update AD if advertising was enabled using Set Advertising.
2737 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002738 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002739 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002740
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002741 /* Discoverable mode affects the local advertising
2742 * address in limited privacy mode.
2743 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302744 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2745 if (ext_adv_capable(hdev))
2746 __hci_req_start_ext_adv(req, 0x00);
2747 else
2748 __hci_req_enable_advertising(req);
2749 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002750 }
2751
Johan Hedbergaed1a882015-11-22 17:24:44 +03002752 hci_dev_unlock(hdev);
2753
2754 return 0;
2755}
2756
2757static void discoverable_update_work(struct work_struct *work)
2758{
2759 struct hci_dev *hdev = container_of(work, struct hci_dev,
2760 discoverable_update);
2761 u8 status;
2762
2763 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2764 mgmt_set_discoverable_complete(hdev, status);
2765}
2766
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002767void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2768 u8 reason)
2769{
2770 switch (conn->state) {
2771 case BT_CONNECTED:
2772 case BT_CONFIG:
2773 if (conn->type == AMP_LINK) {
2774 struct hci_cp_disconn_phy_link cp;
2775
2776 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2777 cp.reason = reason;
2778 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2779 &cp);
2780 } else {
2781 struct hci_cp_disconnect dc;
2782
2783 dc.handle = cpu_to_le16(conn->handle);
2784 dc.reason = reason;
2785 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2786 }
2787
2788 conn->state = BT_DISCONN;
2789
2790 break;
2791 case BT_CONNECT:
2792 if (conn->type == LE_LINK) {
2793 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2794 break;
2795 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2796 0, NULL);
2797 } else if (conn->type == ACL_LINK) {
2798 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2799 break;
2800 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2801 6, &conn->dst);
2802 }
2803 break;
2804 case BT_CONNECT2:
2805 if (conn->type == ACL_LINK) {
2806 struct hci_cp_reject_conn_req rej;
2807
2808 bacpy(&rej.bdaddr, &conn->dst);
2809 rej.reason = reason;
2810
2811 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2812 sizeof(rej), &rej);
2813 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2814 struct hci_cp_reject_sync_conn_req rej;
2815
2816 bacpy(&rej.bdaddr, &conn->dst);
2817
2818 /* SCO rejection has its own limited set of
2819 * allowed error values (0x0D-0x0F) which isn't
2820 * compatible with most values passed to this
2821 * function. To be safe hard-code one of the
2822 * values that's suitable for SCO.
2823 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002824 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002825
2826 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2827 sizeof(rej), &rej);
2828 }
2829 break;
2830 default:
2831 conn->state = BT_CLOSED;
2832 break;
2833 }
2834}
2835
2836static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2837{
2838 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002839 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002840}
2841
2842int hci_abort_conn(struct hci_conn *conn, u8 reason)
2843{
2844 struct hci_request req;
2845 int err;
2846
2847 hci_req_init(&req, conn->hdev);
2848
2849 __hci_abort_conn(&req, conn, reason);
2850
2851 err = hci_req_run(&req, abort_conn_complete);
2852 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002853 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002854 return err;
2855 }
2856
2857 return 0;
2858}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002859
Johan Hedberga1d01db2015-11-11 08:11:25 +02002860static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002861{
2862 hci_dev_lock(req->hdev);
2863 __hci_update_background_scan(req);
2864 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002865 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002866}
2867
2868static void bg_scan_update(struct work_struct *work)
2869{
2870 struct hci_dev *hdev = container_of(work, struct hci_dev,
2871 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002872 struct hci_conn *conn;
2873 u8 status;
2874 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002875
Johan Hedberg84235d22015-11-11 08:11:20 +02002876 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2877 if (!err)
2878 return;
2879
2880 hci_dev_lock(hdev);
2881
2882 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2883 if (conn)
2884 hci_le_conn_failed(conn, status);
2885
2886 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002887}
2888
Johan Hedberga1d01db2015-11-11 08:11:25 +02002889static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002890{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302891 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002892 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002893}
2894
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002895static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2896{
2897 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002898 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2899 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002900 struct hci_cp_inquiry cp;
2901
Howard Chung22fbcfc2020-11-11 15:02:19 +08002902 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002903
2904 hci_dev_lock(req->hdev);
2905 hci_inquiry_cache_flush(req->hdev);
2906 hci_dev_unlock(req->hdev);
2907
2908 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002909
2910 if (req->hdev->discovery.limited)
2911 memcpy(&cp.lap, liac, sizeof(cp.lap));
2912 else
2913 memcpy(&cp.lap, giac, sizeof(cp.lap));
2914
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002915 cp.length = length;
2916
2917 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2918
2919 return 0;
2920}
2921
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002922static void le_scan_disable_work(struct work_struct *work)
2923{
2924 struct hci_dev *hdev = container_of(work, struct hci_dev,
2925 le_scan_disable.work);
2926 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002927
Howard Chung22fbcfc2020-11-11 15:02:19 +08002928 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002929
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002930 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002931 return;
2932
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002933 cancel_delayed_work(&hdev->le_scan_restart);
2934
2935 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2936 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002937 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2938 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002939 return;
2940 }
2941
2942 hdev->discovery.scan_start = 0;
2943
2944 /* If we were running LE only scan, change discovery state. If
2945 * we were running both LE and BR/EDR inquiry simultaneously,
2946 * and BR/EDR inquiry is already finished, stop discovery,
2947 * otherwise BR/EDR inquiry will stop discovery when finished.
2948 * If we will resolve remote device name, do not change
2949 * discovery state.
2950 */
2951
2952 if (hdev->discovery.type == DISCOV_TYPE_LE)
2953 goto discov_stopped;
2954
2955 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2956 return;
2957
2958 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2959 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2960 hdev->discovery.state != DISCOVERY_RESOLVING)
2961 goto discov_stopped;
2962
2963 return;
2964 }
2965
2966 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2967 HCI_CMD_TIMEOUT, &status);
2968 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002969 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002970 goto discov_stopped;
2971 }
2972
2973 return;
2974
2975discov_stopped:
2976 hci_dev_lock(hdev);
2977 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2978 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002979}
2980
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002981static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002982{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002983 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002984
2985 /* If controller is not scanning we are done. */
2986 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2987 return 0;
2988
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002989 if (hdev->scanning_paused) {
2990 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2991 return 0;
2992 }
2993
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302994 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002995
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302996 if (use_ext_scan(hdev)) {
2997 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2998
2999 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3000 ext_enable_cp.enable = LE_SCAN_ENABLE;
3001 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3002
3003 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3004 sizeof(ext_enable_cp), &ext_enable_cp);
3005 } else {
3006 struct hci_cp_le_set_scan_enable cp;
3007
3008 memset(&cp, 0, sizeof(cp));
3009 cp.enable = LE_SCAN_ENABLE;
3010 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3011 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3012 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003013
3014 return 0;
3015}
3016
3017static void le_scan_restart_work(struct work_struct *work)
3018{
3019 struct hci_dev *hdev = container_of(work, struct hci_dev,
3020 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003021 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003022 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003023
Howard Chung22fbcfc2020-11-11 15:02:19 +08003024 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003025
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003026 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003027 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003028 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3029 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003030 return;
3031 }
3032
3033 hci_dev_lock(hdev);
3034
3035 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3036 !hdev->discovery.scan_start)
3037 goto unlock;
3038
3039 /* When the scan was started, hdev->le_scan_disable has been queued
3040 * after duration from scan_start. During scan restart this job
3041 * has been canceled, and we need to queue it again after proper
3042 * timeout, to make sure that scan does not run indefinitely.
3043 */
3044 duration = hdev->discovery.scan_duration;
3045 scan_start = hdev->discovery.scan_start;
3046 now = jiffies;
3047 if (now - scan_start <= duration) {
3048 int elapsed;
3049
3050 if (now >= scan_start)
3051 elapsed = now - scan_start;
3052 else
3053 elapsed = ULONG_MAX - scan_start + now;
3054
3055 timeout = duration - elapsed;
3056 } else {
3057 timeout = 0;
3058 }
3059
3060 queue_delayed_work(hdev->req_workqueue,
3061 &hdev->le_scan_disable, timeout);
3062
3063unlock:
3064 hci_dev_unlock(hdev);
3065}
3066
Johan Hedberge68f0722015-11-11 08:30:30 +02003067static int active_scan(struct hci_request *req, unsigned long opt)
3068{
3069 uint16_t interval = opt;
3070 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02003071 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02003072 /* White list is not used for discovery */
3073 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303074 /* Discovery doesn't require controller address resolution */
3075 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02003076 int err;
3077
Howard Chung22fbcfc2020-11-11 15:02:19 +08003078 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02003079
Johan Hedberge68f0722015-11-11 08:30:30 +02003080 /* If controller is scanning, it means the background scanning is
3081 * running. Thus, we should temporarily stop it in order to set the
3082 * discovery scanning parameters.
3083 */
3084 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303085 hci_req_add_le_scan_disable(req, false);
Johan Hedberge68f0722015-11-11 08:30:30 +02003086
3087 /* All active scans will be done with either a resolvable private
3088 * address (when privacy feature has been enabled) or non-resolvable
3089 * private address.
3090 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02003091 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3092 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02003093 if (err < 0)
3094 own_addr_type = ADDR_LE_DEV_PUBLIC;
3095
Alain Michaudd4edda02020-06-29 17:04:15 +00003096 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3097 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303098 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02003099 return 0;
3100}
3101
3102static int interleaved_discov(struct hci_request *req, unsigned long opt)
3103{
3104 int err;
3105
Howard Chung22fbcfc2020-11-11 15:02:19 +08003106 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02003107
3108 err = active_scan(req, opt);
3109 if (err)
3110 return err;
3111
Johan Hedberg7df26b52015-11-11 12:24:21 +02003112 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02003113}
3114
3115static void start_discovery(struct hci_dev *hdev, u8 *status)
3116{
3117 unsigned long timeout;
3118
Howard Chung22fbcfc2020-11-11 15:02:19 +08003119 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02003120
3121 switch (hdev->discovery.type) {
3122 case DISCOV_TYPE_BREDR:
3123 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02003124 hci_req_sync(hdev, bredr_inquiry,
3125 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003126 status);
3127 return;
3128 case DISCOV_TYPE_INTERLEAVED:
3129 /* When running simultaneous discovery, the LE scanning time
3130 * should occupy the whole discovery time sine BR/EDR inquiry
3131 * and LE scanning are scheduled by the controller.
3132 *
3133 * For interleaving discovery in comparison, BR/EDR inquiry
3134 * and LE scanning are done sequentially with separate
3135 * timeouts.
3136 */
3137 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3138 &hdev->quirks)) {
3139 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3140 /* During simultaneous discovery, we double LE scan
3141 * interval. We must leave some time for the controller
3142 * to do BR/EDR inquiry.
3143 */
3144 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00003145 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003146 status);
3147 break;
3148 }
3149
3150 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00003151 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003152 HCI_CMD_TIMEOUT, status);
3153 break;
3154 case DISCOV_TYPE_LE:
3155 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00003156 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003157 HCI_CMD_TIMEOUT, status);
3158 break;
3159 default:
3160 *status = HCI_ERROR_UNSPECIFIED;
3161 return;
3162 }
3163
3164 if (*status)
3165 return;
3166
Howard Chung22fbcfc2020-11-11 15:02:19 +08003167 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02003168
3169 /* When service discovery is used and the controller has a
3170 * strict duplicate filter, it is important to remember the
3171 * start and duration of the scan. This is required for
3172 * restarting scanning during the discovery phase.
3173 */
3174 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3175 hdev->discovery.result_filtering) {
3176 hdev->discovery.scan_start = jiffies;
3177 hdev->discovery.scan_duration = timeout;
3178 }
3179
3180 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3181 timeout);
3182}
3183
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003184bool hci_req_stop_discovery(struct hci_request *req)
3185{
3186 struct hci_dev *hdev = req->hdev;
3187 struct discovery_state *d = &hdev->discovery;
3188 struct hci_cp_remote_name_req_cancel cp;
3189 struct inquiry_entry *e;
3190 bool ret = false;
3191
Howard Chung22fbcfc2020-11-11 15:02:19 +08003192 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003193
3194 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3195 if (test_bit(HCI_INQUIRY, &hdev->flags))
3196 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3197
3198 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3199 cancel_delayed_work(&hdev->le_scan_disable);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303200 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003201 }
3202
3203 ret = true;
3204 } else {
3205 /* Passive scanning */
3206 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303207 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003208 ret = true;
3209 }
3210 }
3211
3212 /* No further actions needed for LE-only discovery */
3213 if (d->type == DISCOV_TYPE_LE)
3214 return ret;
3215
3216 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3217 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3218 NAME_PENDING);
3219 if (!e)
3220 return ret;
3221
3222 bacpy(&cp.bdaddr, &e->data.bdaddr);
3223 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3224 &cp);
3225 ret = true;
3226 }
3227
3228 return ret;
3229}
3230
3231static int stop_discovery(struct hci_request *req, unsigned long opt)
3232{
3233 hci_dev_lock(req->hdev);
3234 hci_req_stop_discovery(req);
3235 hci_dev_unlock(req->hdev);
3236
3237 return 0;
3238}
3239
Johan Hedberge68f0722015-11-11 08:30:30 +02003240static void discov_update(struct work_struct *work)
3241{
3242 struct hci_dev *hdev = container_of(work, struct hci_dev,
3243 discov_update);
3244 u8 status = 0;
3245
3246 switch (hdev->discovery.state) {
3247 case DISCOVERY_STARTING:
3248 start_discovery(hdev, &status);
3249 mgmt_start_discovery_complete(hdev, status);
3250 if (status)
3251 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3252 else
3253 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3254 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003255 case DISCOVERY_STOPPING:
3256 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3257 mgmt_stop_discovery_complete(hdev, status);
3258 if (!status)
3259 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3260 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003261 case DISCOVERY_STOPPED:
3262 default:
3263 return;
3264 }
3265}
3266
Johan Hedbergc366f552015-11-23 15:43:06 +02003267static void discov_off(struct work_struct *work)
3268{
3269 struct hci_dev *hdev = container_of(work, struct hci_dev,
3270 discov_off.work);
3271
Howard Chung22fbcfc2020-11-11 15:02:19 +08003272 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02003273
3274 hci_dev_lock(hdev);
3275
3276 /* When discoverable timeout triggers, then just make sure
3277 * the limited discoverable flag is cleared. Even in the case
3278 * of a timeout triggered from general discoverable, it is
3279 * safe to unconditionally clear the flag.
3280 */
3281 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3282 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3283 hdev->discov_timeout = 0;
3284
3285 hci_dev_unlock(hdev);
3286
3287 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3288 mgmt_new_settings(hdev);
3289}
3290
Johan Hedberg2ff13892015-11-25 16:15:44 +02003291static int powered_update_hci(struct hci_request *req, unsigned long opt)
3292{
3293 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003294 u8 link_sec;
3295
3296 hci_dev_lock(hdev);
3297
3298 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3299 !lmp_host_ssp_capable(hdev)) {
3300 u8 mode = 0x01;
3301
3302 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3303
3304 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3305 u8 support = 0x01;
3306
3307 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3308 sizeof(support), &support);
3309 }
3310 }
3311
3312 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3313 lmp_bredr_capable(hdev)) {
3314 struct hci_cp_write_le_host_supported cp;
3315
3316 cp.le = 0x01;
3317 cp.simul = 0x00;
3318
3319 /* Check first if we already have the right
3320 * host state (host features set)
3321 */
3322 if (cp.le != lmp_host_le_capable(hdev) ||
3323 cp.simul != lmp_host_le_br_capable(hdev))
3324 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3325 sizeof(cp), &cp);
3326 }
3327
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003328 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003329 /* Make sure the controller has a good default for
3330 * advertising data. This also applies to the case
3331 * where BR/EDR was toggled during the AUTO_OFF phase.
3332 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003333 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3334 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303335 int err;
3336
3337 if (ext_adv_capable(hdev)) {
3338 err = __hci_req_setup_ext_adv_instance(req,
3339 0x00);
3340 if (!err)
3341 __hci_req_update_scan_rsp_data(req,
3342 0x00);
3343 } else {
3344 err = 0;
3345 __hci_req_update_adv_data(req, 0x00);
3346 __hci_req_update_scan_rsp_data(req, 0x00);
3347 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003348
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303349 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303350 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303351 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303352 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003353 __hci_req_enable_ext_advertising(req,
3354 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303355 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003356 } else if (!list_empty(&hdev->adv_instances)) {
3357 struct adv_info *adv_instance;
3358
Johan Hedberg2ff13892015-11-25 16:15:44 +02003359 adv_instance = list_first_entry(&hdev->adv_instances,
3360 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003361 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003362 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003363 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003364 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003365 }
3366
3367 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3368 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3369 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3370 sizeof(link_sec), &link_sec);
3371
3372 if (lmp_bredr_capable(hdev)) {
3373 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3374 __hci_req_write_fast_connectable(req, true);
3375 else
3376 __hci_req_write_fast_connectable(req, false);
3377 __hci_req_update_scan(req);
3378 __hci_req_update_class(req);
3379 __hci_req_update_name(req);
3380 __hci_req_update_eir(req);
3381 }
3382
3383 hci_dev_unlock(hdev);
3384 return 0;
3385}
3386
3387int __hci_req_hci_power_on(struct hci_dev *hdev)
3388{
3389 /* Register the available SMP channels (BR/EDR and LE) only when
3390 * successfully powering on the controller. This late
3391 * registration is required so that LE SMP can clearly decide if
3392 * the public address or static address is used.
3393 */
3394 smp_register(hdev);
3395
3396 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3397 NULL);
3398}
3399
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003400void hci_request_setup(struct hci_dev *hdev)
3401{
Johan Hedberge68f0722015-11-11 08:30:30 +02003402 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003403 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003404 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003405 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003406 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003407 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003408 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3409 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003410 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08003411 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003412}
3413
3414void hci_request_cancel_all(struct hci_dev *hdev)
3415{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003416 hci_req_sync_cancel(hdev, ENODEV);
3417
Johan Hedberge68f0722015-11-11 08:30:30 +02003418 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003419 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003420 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003421 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003422 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003423 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003424 cancel_delayed_work_sync(&hdev->le_scan_disable);
3425 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003426
3427 if (hdev->adv_instance_timeout) {
3428 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3429 hdev->adv_instance_timeout = 0;
3430 }
Howard Chungc4f1f402020-11-26 12:22:21 +08003431
3432 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003433}