blob: b1b33432c68d5f4bd1cb759540fa3f11af3ecb3e [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -070033#include "eir.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020034
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053042void hci_req_purge(struct hci_request *req)
43{
44 skb_queue_purge(&req->cmd_q);
45}
46
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080047bool hci_req_status_pend(struct hci_dev *hdev)
48{
49 return hdev->req_status == HCI_REQ_PEND;
50}
51
Johan Hedberge62144872015-04-02 13:41:08 +030052static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020054{
55 struct hci_dev *hdev = req->hdev;
56 struct sk_buff *skb;
57 unsigned long flags;
58
Howard Chung22fbcfc2020-11-11 15:02:19 +080059 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020060
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
63 */
64 if (req->err) {
65 skb_queue_purge(&req->cmd_q);
66 return req->err;
67 }
68
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
71 return -ENODATA;
72
73 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020074 if (complete) {
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020080
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86
87 return 0;
88}
89
Johan Hedberge62144872015-04-02 13:41:08 +030090int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91{
92 return req_run(req, complete, NULL);
93}
94
95int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96{
97 return req_run(req, NULL, complete);
98}
99
Luiz Augusto von Dentz161510c2021-10-27 16:58:39 -0700100void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 struct sk_buff *skb)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
108 if (skb)
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
Johan Hedbergb5044302015-11-10 09:44:55 +0200114void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200115{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800116 bt_dev_dbg(hdev, "err 0x%2.2x", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200125/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200126int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
127 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200128 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200129{
130 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200131 int err = 0;
132
Howard Chung22fbcfc2020-11-11 15:02:19 +0800133 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200134
135 hci_req_init(&req, hdev);
136
137 hdev->req_status = HCI_REQ_PEND;
138
Johan Hedberga1d01db2015-11-11 08:11:25 +0200139 err = func(&req, opt);
140 if (err) {
141 if (hci_status)
142 *hci_status = HCI_ERROR_UNSPECIFIED;
143 return err;
144 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200146 err = hci_req_run_skb(&req, hci_req_sync_complete);
147 if (err < 0) {
148 hdev->req_status = 0;
149
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 /* ENODATA means the HCI request command queue is empty.
151 * This can happen when a request with conditionals doesn't
152 * trigger any commands to be sent. This is normal behavior
153 * and should not trigger an error return.
154 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200155 if (err == -ENODATA) {
156 if (hci_status)
157 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200158 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200159 }
160
161 if (hci_status)
162 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200163
164 return err;
165 }
166
John Keeping67d8cee2018-04-19 16:29:37 +0100167 err = wait_event_interruptible_timeout(hdev->req_wait_q,
168 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200169
John Keeping67d8cee2018-04-19 16:29:37 +0100170 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200171 return -EINTR;
172
173 switch (hdev->req_status) {
174 case HCI_REQ_DONE:
175 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200176 if (hci_status)
177 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200178 break;
179
180 case HCI_REQ_CANCELED:
181 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200182 if (hci_status)
183 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200184 break;
185
186 default:
187 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200188 if (hci_status)
189 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190 break;
191 }
192
Frederic Dalleau9afee942016-08-23 07:59:19 +0200193 kfree_skb(hdev->req_skb);
194 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195 hdev->req_status = hdev->req_result = 0;
196
Howard Chung22fbcfc2020-11-11 15:02:19 +0800197 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200198
199 return err;
200}
201
Johan Hedberga1d01db2015-11-11 08:11:25 +0200202int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
203 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200204 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200205{
206 int ret;
207
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200208 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200209 hci_req_sync_lock(hdev);
Lin Mae2cb6b82021-04-12 19:17:57 +0800210 /* check the state after obtaing the lock to protect the HCI_UP
211 * against any races from hci_dev_do_close when the controller
212 * gets removed.
213 */
214 if (test_bit(HCI_UP, &hdev->flags))
215 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
216 else
217 ret = -ENETDOWN;
Johan Hedbergb5044302015-11-10 09:44:55 +0200218 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200219
220 return ret;
221}
222
Johan Hedberg0857dd32014-12-19 13:40:20 +0200223struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
224 const void *param)
225{
226 int len = HCI_COMMAND_HDR_SIZE + plen;
227 struct hci_command_hdr *hdr;
228 struct sk_buff *skb;
229
230 skb = bt_skb_alloc(len, GFP_ATOMIC);
231 if (!skb)
232 return NULL;
233
Johannes Berg4df864c2017-06-16 14:29:21 +0200234 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200235 hdr->opcode = cpu_to_le16(opcode);
236 hdr->plen = plen;
237
238 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200239 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200240
Howard Chung22fbcfc2020-11-11 15:02:19 +0800241 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200242
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100243 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
244 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200245
246 return skb;
247}
248
249/* Queue a command to an asynchronous HCI request */
250void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
251 const void *param, u8 event)
252{
253 struct hci_dev *hdev = req->hdev;
254 struct sk_buff *skb;
255
Howard Chung22fbcfc2020-11-11 15:02:19 +0800256 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200257
258 /* If an error occurred during request building, there is no point in
259 * queueing the HCI command. We can simply return.
260 */
261 if (req->err)
262 return;
263
264 skb = hci_prepare_cmd(hdev, opcode, plen, param);
265 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100266 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
267 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200268 req->err = -ENOMEM;
269 return;
270 }
271
272 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200273 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200274
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100275 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200276
277 skb_queue_tail(&req->cmd_q, skb);
278}
279
280void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
281 const void *param)
282{
283 hci_req_add_ev(req, opcode, plen, param, 0);
284}
285
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200286void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
287{
288 struct hci_dev *hdev = req->hdev;
289 struct hci_cp_write_page_scan_activity acp;
290 u8 type;
291
292 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
293 return;
294
295 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
296 return;
297
298 if (enable) {
299 type = PAGE_SCAN_TYPE_INTERLACED;
300
301 /* 160 msec page scan interval */
302 acp.interval = cpu_to_le16(0x0100);
303 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000304 type = hdev->def_page_scan_type;
305 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200306 }
307
Alain Michaud10873f92020-06-11 02:01:56 +0000308 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200309
310 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
311 __cpu_to_le16(hdev->page_scan_window) != acp.window)
312 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
313 sizeof(acp), &acp);
314
315 if (hdev->page_scan_type != type)
316 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
317}
318
Howard Chungc4f1f402020-11-26 12:22:21 +0800319static void start_interleave_scan(struct hci_dev *hdev)
320{
321 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
322 queue_delayed_work(hdev->req_workqueue,
323 &hdev->interleave_scan, 0);
324}
325
326static bool is_interleave_scanning(struct hci_dev *hdev)
327{
328 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
329}
330
331static void cancel_interleave_scan(struct hci_dev *hdev)
332{
333 bt_dev_dbg(hdev, "cancelling interleave scan");
334
335 cancel_delayed_work_sync(&hdev->interleave_scan);
336
337 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
338}
339
340/* Return true if interleave_scan wasn't started until exiting this function,
341 * otherwise, return false
342 */
343static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
344{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800345 /* Do interleaved scan only if all of the following are true:
346 * - There is at least one ADV monitor
347 * - At least one pending LE connection or one device to be scanned for
348 * - Monitor offloading is not supported
349 * If so, we should alternate between allowlist scan and one without
350 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800351 */
352 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
353 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800354 list_empty(&hdev->pend_le_reports)) &&
355 hci_get_adv_monitor_offload_ext(hdev) ==
356 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800357 bool is_interleaving = is_interleave_scanning(hdev);
358
359 if (use_interleaving && !is_interleaving) {
360 start_interleave_scan(hdev);
361 bt_dev_dbg(hdev, "starting interleave scan");
362 return true;
363 }
364
365 if (!use_interleaving && is_interleaving)
366 cancel_interleave_scan(hdev);
367
368 return false;
369}
370
Johan Hedberg196a5e92015-11-22 18:55:44 +0200371/* This function controls the background scanning based on hdev->pend_le_conns
372 * list. If there are pending LE connection we start the background scanning,
373 * otherwise we stop it.
374 *
375 * This function requires the caller holds hdev->lock.
376 */
377static void __hci_update_background_scan(struct hci_request *req)
378{
379 struct hci_dev *hdev = req->hdev;
380
381 if (!test_bit(HCI_UP, &hdev->flags) ||
382 test_bit(HCI_INIT, &hdev->flags) ||
383 hci_dev_test_flag(hdev, HCI_SETUP) ||
384 hci_dev_test_flag(hdev, HCI_CONFIG) ||
385 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
386 hci_dev_test_flag(hdev, HCI_UNREGISTER))
387 return;
388
389 /* No point in doing scanning if LE support hasn't been enabled */
390 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
391 return;
392
393 /* If discovery is active don't interfere with it */
394 if (hdev->discovery.state != DISCOVERY_STOPPED)
395 return;
396
397 /* Reset RSSI and UUID filters when starting background scanning
398 * since these filters are meant for service discovery only.
399 *
400 * The Start Discovery and Start Service Discovery operations
401 * ensure to set proper values for RSSI threshold and UUID
402 * filter list. So it is safe to just reset them here.
403 */
404 hci_discovery_filter_clear(hdev);
405
Howard Chung22fbcfc2020-11-11 15:02:19 +0800406 bt_dev_dbg(hdev, "ADV monitoring is %s",
407 hci_is_adv_monitoring(hdev) ? "on" : "off");
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200408
Johan Hedberg196a5e92015-11-22 18:55:44 +0200409 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200410 list_empty(&hdev->pend_le_reports) &&
411 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200412 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200413 * to be scanned for or no ADV monitors, we should stop the
414 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200415 */
416
417 /* If controller is not scanning we are done. */
418 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
419 return;
420
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530421 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200422
Howard Chung22fbcfc2020-11-11 15:02:19 +0800423 bt_dev_dbg(hdev, "stopping background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200424 } else {
425 /* If there is at least one pending LE connection, we should
426 * keep the background scan running.
427 */
428
429 /* If controller is connecting, we should not start scanning
430 * since some controllers are not able to scan and connect at
431 * the same time.
432 */
433 if (hci_lookup_le_connect(hdev))
434 return;
435
436 /* If controller is currently scanning, we stop it to ensure we
437 * don't miss any advertising (due to duplicates filter).
438 */
439 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530440 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200441
442 hci_req_add_le_passive_scan(req);
Howard Chungc4f1f402020-11-26 12:22:21 +0800443 bt_dev_dbg(hdev, "starting background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200444 }
445}
446
Johan Hedberg00cf5042015-11-25 16:15:41 +0200447void __hci_req_update_name(struct hci_request *req)
448{
449 struct hci_dev *hdev = req->hdev;
450 struct hci_cp_write_local_name cp;
451
452 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
453
454 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
455}
456
Johan Hedbergb1a89172015-11-25 16:15:42 +0200457void __hci_req_update_eir(struct hci_request *req)
458{
459 struct hci_dev *hdev = req->hdev;
460 struct hci_cp_write_eir cp;
461
462 if (!hdev_is_powered(hdev))
463 return;
464
465 if (!lmp_ext_inq_capable(hdev))
466 return;
467
468 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
469 return;
470
471 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
472 return;
473
474 memset(&cp, 0, sizeof(cp));
475
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700476 eir_create(hdev, cp.data);
Johan Hedbergb1a89172015-11-25 16:15:42 +0200477
478 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
479 return;
480
481 memcpy(hdev->eir, cp.data, sizeof(cp.data));
482
483 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
484}
485
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530486void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200487{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530488 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200489
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700490 if (hdev->scanning_paused) {
491 bt_dev_dbg(hdev, "Scanning is paused for suspend");
492 return;
493 }
494
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800495 if (hdev->suspended)
496 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
497
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530498 if (use_ext_scan(hdev)) {
499 struct hci_cp_le_set_ext_scan_enable cp;
500
501 memset(&cp, 0, sizeof(cp));
502 cp.enable = LE_SCAN_DISABLE;
503 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
504 &cp);
505 } else {
506 struct hci_cp_le_set_scan_enable cp;
507
508 memset(&cp, 0, sizeof(cp));
509 cp.enable = LE_SCAN_DISABLE;
510 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
511 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530512
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530513 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530514 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530515 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530516 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530517 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530518
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530519 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
520 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200521}
522
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800523static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
524 u8 bdaddr_type)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700525{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800526 struct hci_cp_le_del_from_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700527
528 cp.bdaddr_type = bdaddr_type;
529 bacpy(&cp.bdaddr, bdaddr);
530
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800531 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700532 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800533 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530534
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530535 if (use_ll_privacy(req->hdev) &&
536 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530537 struct smp_irk *irk;
538
539 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
540 if (irk) {
541 struct hci_cp_le_del_from_resolv_list cp;
542
543 cp.bdaddr_type = bdaddr_type;
544 bacpy(&cp.bdaddr, bdaddr);
545
546 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
547 sizeof(cp), &cp);
548 }
549 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700550}
551
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800552/* Adds connection to accept list if needed. On error, returns -1. */
553static int add_to_accept_list(struct hci_request *req,
554 struct hci_conn_params *params, u8 *num_entries,
555 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200556{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800557 struct hci_cp_le_add_to_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700558 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200559
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800560 /* Already in accept list */
561 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700562 params->addr_type))
563 return 0;
564
565 /* Select filter policy to accept all advertising */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800566 if (*num_entries >= hdev->le_accept_list_size)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700567 return -1;
568
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800569 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530570 if (!allow_rpa &&
571 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700572 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
573 return -1;
574 }
575
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800576 /* During suspend, only wakeable devices can be in accept list */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200577 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
578 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700579 return 0;
580
581 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200582 cp.bdaddr_type = params->addr_type;
583 bacpy(&cp.bdaddr, &params->addr);
584
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800585 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700586 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800587 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700588
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530589 if (use_ll_privacy(hdev) &&
590 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530591 struct smp_irk *irk;
592
593 irk = hci_find_irk_by_addr(hdev, &params->addr,
594 params->addr_type);
595 if (irk) {
596 struct hci_cp_le_add_to_resolv_list cp;
597
598 cp.bdaddr_type = params->addr_type;
599 bacpy(&cp.bdaddr, &params->addr);
600 memcpy(cp.peer_irk, irk->val, 16);
601
602 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
603 memcpy(cp.local_irk, hdev->irk, 16);
604 else
605 memset(cp.local_irk, 0, 16);
606
607 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
608 sizeof(cp), &cp);
609 }
610 }
611
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700612 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200613}
614
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800615static u8 update_accept_list(struct hci_request *req)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200616{
617 struct hci_dev *hdev = req->hdev;
618 struct hci_conn_params *params;
619 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700620 u8 num_entries = 0;
621 bool pend_conn, pend_report;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800622 /* We allow usage of accept list even with RPAs in suspend. In the worst
623 * case, we won't be able to wake from devices that use the privacy1.2
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700624 * features. Additionally, once we support privacy1.2 and IRK
625 * offloading, we can update this to also check for those conditions.
626 */
627 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200628
Sathish Narasimman8ce85ad2021-04-05 20:00:41 +0530629 if (use_ll_privacy(hdev) &&
630 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
631 allow_rpa = true;
632
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800633 /* Go through the current accept list programmed into the
Johan Hedberg0857dd32014-12-19 13:40:20 +0200634 * controller one by one and check if that address is still
635 * in the list of pending connections or list of devices to
636 * report. If not present in either list, then queue the
637 * command to remove it from the controller.
638 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800639 list_for_each_entry(b, &hdev->le_accept_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700640 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
641 &b->bdaddr,
642 b->bdaddr_type);
643 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
644 &b->bdaddr,
645 b->bdaddr_type);
646
647 /* If the device is not likely to connect or report,
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800648 * remove it from the accept list.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500649 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700650 if (!pend_conn && !pend_report) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800651 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200652 continue;
653 }
654
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800655 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530656 if (!allow_rpa &&
657 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700658 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500659 return 0x00;
660 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200661
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700662 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200663 }
664
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800665 /* Since all no longer valid accept list entries have been
Johan Hedberg0857dd32014-12-19 13:40:20 +0200666 * removed, walk through the list of pending connections
667 * and ensure that any new device gets programmed into
668 * the controller.
669 *
670 * If the list of the devices is larger than the list of
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800671 * available accept list entries in the controller, then
Johan Hedberg0857dd32014-12-19 13:40:20 +0200672 * just abort and return filer policy value to not use the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800673 * accept list.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200674 */
675 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800676 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200677 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200678 }
679
680 /* After adding all new pending connections, walk through
681 * the list of pending reports and also add these to the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800682 * accept list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200683 */
684 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800685 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200686 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200687 }
688
Howard Chungc4f1f402020-11-26 12:22:21 +0800689 /* Use the allowlist unless the following conditions are all true:
690 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800691 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800692 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200693 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800694 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800695 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800696 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200697 return 0x00;
698
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800699 /* Select filter policy to use accept list */
Johan Hedberg0857dd32014-12-19 13:40:20 +0200700 return 0x01;
701}
702
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200703static bool scan_use_rpa(struct hci_dev *hdev)
704{
705 return hci_dev_test_flag(hdev, HCI_PRIVACY);
706}
707
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530708static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530709 u16 window, u8 own_addr_type, u8 filter_policy,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800710 bool filter_dup, bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200711{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530712 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530713
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700714 if (hdev->scanning_paused) {
715 bt_dev_dbg(hdev, "Scanning is paused for suspend");
716 return;
717 }
718
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530719 if (use_ll_privacy(hdev) &&
720 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
721 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530722 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530723
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530724 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
725 }
726
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530727 /* Use ext scanning if set ext scan param and ext scan enable is
728 * supported
729 */
730 if (use_ext_scan(hdev)) {
731 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
732 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
733 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530734 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
735 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530736
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530737 ext_param_cp = (void *)data;
738 phy_params = (void *)ext_param_cp->data;
739
740 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
741 ext_param_cp->own_addr_type = own_addr_type;
742 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530743
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530744 plen = sizeof(*ext_param_cp);
745
746 if (scan_1m(hdev) || scan_2m(hdev)) {
747 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
748
749 memset(phy_params, 0, sizeof(*phy_params));
750 phy_params->type = type;
751 phy_params->interval = cpu_to_le16(interval);
752 phy_params->window = cpu_to_le16(window);
753
754 plen += sizeof(*phy_params);
755 phy_params++;
756 }
757
758 if (scan_coded(hdev)) {
759 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
760
761 memset(phy_params, 0, sizeof(*phy_params));
762 phy_params->type = type;
763 phy_params->interval = cpu_to_le16(interval);
764 phy_params->window = cpu_to_le16(window);
765
766 plen += sizeof(*phy_params);
767 phy_params++;
768 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530769
770 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530771 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530772
773 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
774 ext_enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800775 ext_enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530776
777 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
778 sizeof(ext_enable_cp), &ext_enable_cp);
779 } else {
780 struct hci_cp_le_set_scan_param param_cp;
781 struct hci_cp_le_set_scan_enable enable_cp;
782
783 memset(&param_cp, 0, sizeof(param_cp));
784 param_cp.type = type;
785 param_cp.interval = cpu_to_le16(interval);
786 param_cp.window = cpu_to_le16(window);
787 param_cp.own_address_type = own_addr_type;
788 param_cp.filter_policy = filter_policy;
789 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
790 &param_cp);
791
792 memset(&enable_cp, 0, sizeof(enable_cp));
793 enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800794 enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530795 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
796 &enable_cp);
797 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530798}
799
Alain Michaud9a9373f2020-07-31 01:05:34 +0000800/* Returns true if an le connection is in the scanning state */
801static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
802{
803 struct hci_conn_hash *h = &hdev->conn_hash;
804 struct hci_conn *c;
805
806 rcu_read_lock();
807
808 list_for_each_entry_rcu(c, &h->list, list) {
809 if (c->type == LE_LINK && c->state == BT_CONNECT &&
810 test_bit(HCI_CONN_SCANNING, &c->flags)) {
811 rcu_read_unlock();
812 return true;
813 }
814 }
815
816 rcu_read_unlock();
817
818 return false;
819}
820
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530821/* Ensure to call hci_req_add_le_scan_disable() first to disable the
822 * controller based address resolution to be able to reconfigure
823 * resolving list.
824 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530825void hci_req_add_le_passive_scan(struct hci_request *req)
826{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200827 struct hci_dev *hdev = req->hdev;
828 u8 own_addr_type;
829 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700830 u16 window, interval;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800831 /* Default is to enable duplicates filter */
832 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530833 /* Background scanning should run with address resolution */
834 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700835
836 if (hdev->scanning_paused) {
837 bt_dev_dbg(hdev, "Scanning is paused for suspend");
838 return;
839 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200840
841 /* Set require_privacy to false since no SCAN_REQ are send
842 * during passive scanning. Not using an non-resolvable address
843 * here is important so that peer devices using direct
844 * advertising with our address will be correctly reported
845 * by the controller.
846 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200847 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
848 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200849 return;
850
Howard Chung80af16a2020-11-26 12:22:25 +0800851 if (hdev->enable_advmon_interleave_scan &&
852 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +0800853 return;
854
855 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800856 /* Adding or removing entries from the accept list must
Johan Hedberg0857dd32014-12-19 13:40:20 +0200857 * happen before enabling scanning. The controller does
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800858 * not allow accept list modification while scanning.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200859 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800860 filter_policy = update_accept_list(req);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200861
862 /* When the controller is using random resolvable addresses and
863 * with that having LE privacy enabled, then controllers with
864 * Extended Scanner Filter Policies support can now enable support
865 * for handling directed advertising.
866 *
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800867 * So instead of using filter polices 0x00 (no accept list)
868 * and 0x01 (accept list enabled) use the new filter policies
869 * 0x02 (no accept list) and 0x03 (accept list enabled).
Johan Hedberg0857dd32014-12-19 13:40:20 +0200870 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700871 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200872 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
873 filter_policy |= 0x02;
874
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700875 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000876 window = hdev->le_scan_window_suspend;
877 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -0800878
879 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Alain Michaud9a9373f2020-07-31 01:05:34 +0000880 } else if (hci_is_le_conn_scanning(hdev)) {
881 window = hdev->le_scan_window_connect;
882 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +0800883 } else if (hci_is_adv_monitoring(hdev)) {
884 window = hdev->le_scan_window_adv_monitor;
885 interval = hdev->le_scan_int_adv_monitor;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800886
887 /* Disable duplicates filter when scanning for advertisement
888 * monitor for the following reasons.
889 *
890 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
891 * controllers ignore RSSI_Sampling_Period when the duplicates
892 * filter is enabled.
893 *
894 * For SW pattern filtering, when we're not doing interleaved
895 * scanning, it is necessary to disable duplicates filter,
896 * otherwise hosts can only receive one advertisement and it's
897 * impossible to know if a peer is still in range.
898 */
899 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700900 } else {
901 window = hdev->le_scan_window;
902 interval = hdev->le_scan_interval;
903 }
904
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800905 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
906 filter_policy);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700907 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800908 own_addr_type, filter_policy, filter_dup,
909 addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200910}
911
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700912static void hci_req_clear_event_filter(struct hci_request *req)
913{
914 struct hci_cp_set_event_filter f;
915
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800916 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
917 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700918
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800919 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
920 memset(&f, 0, sizeof(f));
921 f.flt_type = HCI_FLT_CLEAR_ALL;
922 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
923 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700924}
925
926static void hci_req_set_event_filter(struct hci_request *req)
927{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200928 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700929 struct hci_cp_set_event_filter f;
930 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200931 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800932 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
933
934 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
935 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700936
937 /* Always clear event filter when starting */
938 hci_req_clear_event_filter(req);
939
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800940 list_for_each_entry(b, &hdev->accept_list, list) {
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200941 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
942 b->current_flags))
943 continue;
944
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700945 memset(&f, 0, sizeof(f));
946 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
947 f.flt_type = HCI_FLT_CONN_SETUP;
948 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
949 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
950
951 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
952 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200953 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700954 }
955
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800956 if (scan && !scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800957 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800958 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
959 } else if (!scan && scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800960 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800961 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
962 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700963}
964
Daniel Winkler53274472020-09-15 14:14:27 -0700965static void cancel_adv_timeout(struct hci_dev *hdev)
966{
967 if (hdev->adv_instance_timeout) {
968 hdev->adv_instance_timeout = 0;
969 cancel_delayed_work(&hdev->adv_instance_expire);
970 }
971}
972
973/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800974void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -0700975{
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800976 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -0700977
978 /* Call to disable any advertisements active on the controller.
979 * This will succeed even if no advertisements are configured.
980 */
981 __hci_req_disable_advertising(req);
982
983 /* If we are using software rotation, pause the loop */
984 if (!ext_adv_capable(req->hdev))
985 cancel_adv_timeout(req->hdev);
986}
987
988/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800989static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -0700990{
991 struct adv_info *adv;
992
993 bt_dev_dbg(req->hdev, "Resuming advertising instances");
994
995 if (ext_adv_capable(req->hdev)) {
996 /* Call for each tracked instance to be re-enabled */
997 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
998 __hci_req_enable_ext_advertising(req,
999 adv->instance);
1000 }
1001
1002 } else {
1003 /* Schedule for most recent instance to be restarted and begin
1004 * the software rotation loop
1005 */
1006 __hci_req_schedule_adv_instance(req,
1007 req->hdev->cur_adv_instance,
1008 true);
1009 }
1010}
1011
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001012/* This function requires the caller holds hdev->lock */
1013int hci_req_resume_adv_instances(struct hci_dev *hdev)
1014{
1015 struct hci_request req;
1016
1017 hci_req_init(&req, hdev);
1018 __hci_req_resume_adv_instances(&req);
1019
1020 return hci_req_run(&req, NULL);
1021}
1022
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001023static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1024{
1025 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1026 status);
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001027 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1028 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1029 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1030 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001031 wake_up(&hdev->suspend_wait_q);
1032 }
Howard Chungbf6a4e32021-01-22 16:36:17 +08001033
1034 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1035 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1036 wake_up(&hdev->suspend_wait_q);
1037 }
1038}
1039
Manish Mandlikce818432021-09-21 14:47:10 -07001040static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
1041 bool suspending)
Howard Chungbf6a4e32021-01-22 16:36:17 +08001042{
1043 struct hci_dev *hdev = req->hdev;
1044
1045 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1046 case HCI_ADV_MONITOR_EXT_MSFT:
Manish Mandlikce818432021-09-21 14:47:10 -07001047 if (suspending)
1048 msft_suspend(hdev);
1049 else
1050 msft_resume(hdev);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001051 break;
1052 default:
1053 return;
1054 }
1055
1056 /* No need to block when enabling since it's on resume path */
Manish Mandlikce818432021-09-21 14:47:10 -07001057 if (hdev->suspended && suspending)
Howard Chungbf6a4e32021-01-22 16:36:17 +08001058 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001059}
1060
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001061/* Call with hci_dev_lock */
1062void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1063{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001064 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001065 struct hci_conn *conn;
1066 struct hci_request req;
1067 u8 page_scan;
1068 int disconnect_counter;
1069
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001070 if (next == hdev->suspend_state) {
1071 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1072 goto done;
1073 }
1074
1075 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001076 hci_req_init(&req, hdev);
1077
1078 if (next == BT_SUSPEND_DISCONNECT) {
1079 /* Mark device as suspended */
1080 hdev->suspended = true;
1081
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001082 /* Pause discovery if not already stopped */
1083 old_state = hdev->discovery.state;
1084 if (old_state != DISCOVERY_STOPPED) {
1085 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1086 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1087 queue_work(hdev->req_workqueue, &hdev->discov_update);
1088 }
1089
1090 hdev->discovery_paused = true;
1091 hdev->discovery_old_state = old_state;
1092
Daniel Winkler53274472020-09-15 14:14:27 -07001093 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001094 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1095 if (old_state) {
1096 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1097 cancel_delayed_work(&hdev->discov_off);
1098 queue_delayed_work(hdev->req_workqueue,
1099 &hdev->discov_off, 0);
1100 }
1101
Daniel Winkler53274472020-09-15 14:14:27 -07001102 /* Pause other advertisements */
1103 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001104 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001105
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001106 hdev->advertising_paused = true;
1107 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001108
1109 /* Disable page scan if enabled */
1110 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1111 page_scan = SCAN_DISABLED;
1112 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1113 &page_scan);
1114 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1115 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001116
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001117 /* Disable LE passive scan if enabled */
Howard Chung36afe872020-11-26 12:22:22 +08001118 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1119 cancel_interleave_scan(hdev);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301120 hci_req_add_le_scan_disable(&req, false);
Howard Chung36afe872020-11-26 12:22:22 +08001121 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001122
Howard Chungbf6a4e32021-01-22 16:36:17 +08001123 /* Disable advertisement filters */
Manish Mandlikce818432021-09-21 14:47:10 -07001124 hci_req_prepare_adv_monitor_suspend(&req, true);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001125
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001126 /* Prevent disconnects from causing scanning to be re-enabled */
1127 hdev->scanning_paused = true;
1128
1129 /* Run commands before disconnecting */
1130 hci_req_run(&req, suspend_req_complete);
1131
1132 disconnect_counter = 0;
1133 /* Soft disconnect everything (power off) */
1134 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1135 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1136 disconnect_counter++;
1137 }
1138
1139 if (disconnect_counter > 0) {
1140 bt_dev_dbg(hdev,
1141 "Had %d disconnects. Will wait on them",
1142 disconnect_counter);
1143 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1144 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001145 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001146 /* Unpause to take care of updating scanning params */
1147 hdev->scanning_paused = false;
1148 /* Enable event filter for paired devices */
1149 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001150 /* Enable passive scan at lower duty cycle */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001151 __hci_update_background_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001152 /* Pause scan changes again. */
1153 hdev->scanning_paused = true;
1154 hci_req_run(&req, suspend_req_complete);
1155 } else {
1156 hdev->suspended = false;
1157 hdev->scanning_paused = false;
1158
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001159 /* Clear any event filters and restore scan state */
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001160 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001161 __hci_req_update_scan(&req);
1162
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001163 /* Reset passive/background scanning to normal */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001164 __hci_update_background_scan(&req);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001165 /* Enable all of the advertisement filters */
Manish Mandlikce818432021-09-21 14:47:10 -07001166 hci_req_prepare_adv_monitor_suspend(&req, false);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001167
Daniel Winkler53274472020-09-15 14:14:27 -07001168 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001169 hdev->advertising_paused = false;
1170 if (hdev->advertising_old_state) {
1171 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1172 hdev->suspend_tasks);
1173 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1174 queue_work(hdev->req_workqueue,
1175 &hdev->discoverable_update);
1176 hdev->advertising_old_state = 0;
1177 }
1178
Daniel Winkler53274472020-09-15 14:14:27 -07001179 /* Resume other advertisements */
1180 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001181 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001182
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001183 /* Unpause discovery */
1184 hdev->discovery_paused = false;
1185 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1186 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1187 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1188 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1189 queue_work(hdev->req_workqueue, &hdev->discov_update);
1190 }
1191
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001192 hci_req_run(&req, suspend_req_complete);
1193 }
1194
1195 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001196
1197done:
1198 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1199 wake_up(&hdev->suspend_wait_q);
1200}
1201
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001202static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +02001203{
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001204 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001205}
1206
1207void __hci_req_disable_advertising(struct hci_request *req)
1208{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301209 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001210 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001211
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301212 } else {
1213 u8 enable = 0x00;
1214
1215 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1216 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001217}
1218
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001219static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1220{
1221 /* If privacy is not enabled don't use RPA */
1222 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1223 return false;
1224
1225 /* If basic privacy mode is enabled use RPA */
1226 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1227 return true;
1228
1229 /* If limited privacy mode is enabled don't use RPA if we're
1230 * both discoverable and bondable.
1231 */
1232 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1233 hci_dev_test_flag(hdev, HCI_BONDABLE))
1234 return false;
1235
1236 /* We're neither bondable nor discoverable in the limited
1237 * privacy mode, therefore use RPA.
1238 */
1239 return true;
1240}
1241
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001242static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1243{
1244 /* If there is no connection we are OK to advertise. */
1245 if (hci_conn_num(hdev, LE_LINK) == 0)
1246 return true;
1247
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001248 /* Check le_states if there is any connection in peripheral role. */
1249 if (hdev->conn_hash.le_num_peripheral > 0) {
1250 /* Peripheral connection state and non connectable mode bit 20.
1251 */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001252 if (!connectable && !(hdev->le_states[2] & 0x10))
1253 return false;
1254
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001255 /* Peripheral connection state and connectable mode bit 38
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001256 * and scannable bit 21.
1257 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001258 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1259 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001260 return false;
1261 }
1262
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001263 /* Check le_states if there is any connection in central role. */
1264 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1265 /* Central connection state and non connectable mode bit 18. */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001266 if (!connectable && !(hdev->le_states[2] & 0x02))
1267 return false;
1268
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001269 /* Central connection state and connectable mode bit 35 and
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001270 * scannable 19.
1271 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001272 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001273 !(hdev->le_states[2] & 0x08)))
1274 return false;
1275 }
1276
1277 return true;
1278}
1279
Johan Hedbergf2252572015-11-18 12:49:20 +02001280void __hci_req_enable_advertising(struct hci_request *req)
1281{
1282 struct hci_dev *hdev = req->hdev;
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001283 struct adv_info *adv;
Johan Hedbergf2252572015-11-18 12:49:20 +02001284 struct hci_cp_le_set_adv_param cp;
1285 u8 own_addr_type, enable = 0x01;
1286 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301287 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001288 u32 flags;
1289
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001290 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1291 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001292
1293 /* If the "connectable" instance flag was not set, then choose between
1294 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1295 */
1296 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1297 mgmt_get_connectable(hdev);
1298
1299 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001300 return;
1301
1302 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1303 __hci_req_disable_advertising(req);
1304
1305 /* Clear the HCI_LE_ADV bit temporarily so that the
1306 * hci_update_random_address knows that it's safe to go ahead
1307 * and write a new random address. The flag will be set back on
1308 * as soon as the SET_ADV_ENABLE HCI command completes.
1309 */
1310 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1311
Johan Hedbergf2252572015-11-18 12:49:20 +02001312 /* Set require_privacy to true only when non-connectable
1313 * advertising is used. In that case it is fine to use a
1314 * non-resolvable private address.
1315 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001316 if (hci_update_random_address(req, !connectable,
1317 adv_use_rpa(hdev, flags),
1318 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001319 return;
1320
1321 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001322
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001323 if (adv) {
1324 adv_min_interval = adv->min_interval;
1325 adv_max_interval = adv->max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001326 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301327 adv_min_interval = hdev->le_adv_min_interval;
1328 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001329 }
1330
1331 if (connectable) {
1332 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301333 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001334 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301335 cp.type = LE_ADV_SCAN_IND;
1336 else
1337 cp.type = LE_ADV_NONCONN_IND;
1338
1339 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1340 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1341 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1342 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301343 }
1344 }
1345
1346 cp.min_interval = cpu_to_le16(adv_min_interval);
1347 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001348 cp.own_address_type = own_addr_type;
1349 cp.channel_map = hdev->le_adv_channel_map;
1350
1351 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1352
1353 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1354}
1355
Johan Hedbergcab054a2015-11-30 11:21:45 +02001356void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001357{
1358 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001359 u8 len;
1360
1361 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1362 return;
1363
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301364 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001365 struct {
1366 struct hci_cp_le_set_ext_scan_rsp_data cp;
1367 u8 data[HCI_MAX_EXT_AD_LENGTH];
1368 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001369
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001370 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001371
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001372 len = eir_create_scan_rsp(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001373
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301374 if (hdev->scan_rsp_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001375 !memcmp(pdu.data, hdev->scan_rsp_data, len))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301376 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001377
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001378 memcpy(hdev->scan_rsp_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301379 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001380
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001381 pdu.cp.handle = instance;
1382 pdu.cp.length = len;
1383 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1384 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301385
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001386 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1387 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301388 } else {
1389 struct hci_cp_le_set_scan_rsp_data cp;
1390
1391 memset(&cp, 0, sizeof(cp));
1392
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001393 len = eir_create_scan_rsp(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301394
1395 if (hdev->scan_rsp_data_len == len &&
1396 !memcmp(cp.data, hdev->scan_rsp_data, len))
1397 return;
1398
1399 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1400 hdev->scan_rsp_data_len = len;
1401
1402 cp.length = len;
1403
1404 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1405 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001406}
1407
Johan Hedbergcab054a2015-11-30 11:21:45 +02001408void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001409{
1410 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001411 u8 len;
1412
1413 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1414 return;
1415
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301416 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001417 struct {
1418 struct hci_cp_le_set_ext_adv_data cp;
1419 u8 data[HCI_MAX_EXT_AD_LENGTH];
1420 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001421
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001422 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001423
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001424 len = eir_create_adv_data(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001425
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301426 /* There's nothing to do if the data hasn't changed */
1427 if (hdev->adv_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001428 memcmp(pdu.data, hdev->adv_data, len) == 0)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301429 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001430
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001431 memcpy(hdev->adv_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301432 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001433
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001434 pdu.cp.length = len;
1435 pdu.cp.handle = instance;
1436 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1437 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301438
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001439 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1440 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301441 } else {
1442 struct hci_cp_le_set_adv_data cp;
1443
1444 memset(&cp, 0, sizeof(cp));
1445
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001446 len = eir_create_adv_data(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301447
1448 /* There's nothing to do if the data hasn't changed */
1449 if (hdev->adv_data_len == len &&
1450 memcmp(cp.data, hdev->adv_data, len) == 0)
1451 return;
1452
1453 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1454 hdev->adv_data_len = len;
1455
1456 cp.length = len;
1457
1458 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1459 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001460}
1461
Johan Hedbergcab054a2015-11-30 11:21:45 +02001462int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001463{
1464 struct hci_request req;
1465
1466 hci_req_init(&req, hdev);
1467 __hci_req_update_adv_data(&req, instance);
1468
1469 return hci_req_run(&req, NULL);
1470}
1471
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301472static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1473 u16 opcode)
1474{
1475 BT_DBG("%s status %u", hdev->name, status);
1476}
1477
1478void hci_req_disable_address_resolution(struct hci_dev *hdev)
1479{
1480 struct hci_request req;
1481 __u8 enable = 0x00;
1482
1483 if (!use_ll_privacy(hdev) &&
1484 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1485 return;
1486
1487 hci_req_init(&req, hdev);
1488
1489 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1490
1491 hci_req_run(&req, enable_addr_resolution_complete);
1492}
1493
Johan Hedbergf2252572015-11-18 12:49:20 +02001494static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1495{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001496 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001497}
1498
1499void hci_req_reenable_advertising(struct hci_dev *hdev)
1500{
1501 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001502
1503 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001504 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001505 return;
1506
Johan Hedbergf2252572015-11-18 12:49:20 +02001507 hci_req_init(&req, hdev);
1508
Johan Hedbergcab054a2015-11-30 11:21:45 +02001509 if (hdev->cur_adv_instance) {
1510 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1511 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001512 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301513 if (ext_adv_capable(hdev)) {
1514 __hci_req_start_ext_adv(&req, 0x00);
1515 } else {
1516 __hci_req_update_adv_data(&req, 0x00);
1517 __hci_req_update_scan_rsp_data(&req, 0x00);
1518 __hci_req_enable_advertising(&req);
1519 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001520 }
1521
1522 hci_req_run(&req, adv_enable_complete);
1523}
1524
1525static void adv_timeout_expire(struct work_struct *work)
1526{
1527 struct hci_dev *hdev = container_of(work, struct hci_dev,
1528 adv_instance_expire.work);
1529
1530 struct hci_request req;
1531 u8 instance;
1532
Howard Chung22fbcfc2020-11-11 15:02:19 +08001533 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001534
1535 hci_dev_lock(hdev);
1536
1537 hdev->adv_instance_timeout = 0;
1538
Johan Hedbergcab054a2015-11-30 11:21:45 +02001539 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001540 if (instance == 0x00)
1541 goto unlock;
1542
1543 hci_req_init(&req, hdev);
1544
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001545 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001546
1547 if (list_empty(&hdev->adv_instances))
1548 __hci_req_disable_advertising(&req);
1549
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001550 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001551
1552unlock:
1553 hci_dev_unlock(hdev);
1554}
1555
Howard Chungc4f1f402020-11-26 12:22:21 +08001556static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1557 unsigned long opt)
1558{
1559 struct hci_dev *hdev = req->hdev;
1560 int ret = 0;
1561
1562 hci_dev_lock(hdev);
1563
1564 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1565 hci_req_add_le_scan_disable(req, false);
1566 hci_req_add_le_passive_scan(req);
1567
1568 switch (hdev->interleave_scan_state) {
1569 case INTERLEAVE_SCAN_ALLOWLIST:
1570 bt_dev_dbg(hdev, "next state: allowlist");
1571 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1572 break;
1573 case INTERLEAVE_SCAN_NO_FILTER:
1574 bt_dev_dbg(hdev, "next state: no filter");
1575 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1576 break;
1577 case INTERLEAVE_SCAN_NONE:
1578 BT_ERR("unexpected error");
1579 ret = -1;
1580 }
1581
1582 hci_dev_unlock(hdev);
1583
1584 return ret;
1585}
1586
1587static void interleave_scan_work(struct work_struct *work)
1588{
1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
1590 interleave_scan.work);
1591 u8 status;
1592 unsigned long timeout;
1593
1594 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1595 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1596 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1597 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1598 } else {
1599 bt_dev_err(hdev, "unexpected error");
1600 return;
1601 }
1602
1603 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1604 HCI_CMD_TIMEOUT, &status);
1605
1606 /* Don't continue interleaving if it was canceled */
1607 if (is_interleave_scanning(hdev))
1608 queue_delayed_work(hdev->req_workqueue,
1609 &hdev->interleave_scan, timeout);
1610}
1611
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301612int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1613 bool use_rpa, struct adv_info *adv_instance,
1614 u8 *own_addr_type, bdaddr_t *rand_addr)
1615{
1616 int err;
1617
1618 bacpy(rand_addr, BDADDR_ANY);
1619
1620 /* If privacy is enabled use a resolvable private address. If
1621 * current RPA has expired then generate a new one.
1622 */
1623 if (use_rpa) {
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301624 /* If Controller supports LL Privacy use own address type is
1625 * 0x03
1626 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05301627 if (use_ll_privacy(hdev) &&
1628 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301629 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1630 else
1631 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301632
1633 if (adv_instance) {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001634 if (adv_rpa_valid(adv_instance))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301635 return 0;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301636 } else {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001637 if (rpa_valid(hdev))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301638 return 0;
1639 }
1640
1641 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1642 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001643 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301644 return err;
1645 }
1646
1647 bacpy(rand_addr, &hdev->rpa);
1648
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301649 return 0;
1650 }
1651
1652 /* In case of required privacy without resolvable private address,
1653 * use an non-resolvable private address. This is useful for
1654 * non-connectable advertising.
1655 */
1656 if (require_privacy) {
1657 bdaddr_t nrpa;
1658
1659 while (true) {
1660 /* The non-resolvable private address is generated
1661 * from random six bytes with the two most significant
1662 * bits cleared.
1663 */
1664 get_random_bytes(&nrpa, 6);
1665 nrpa.b[5] &= 0x3f;
1666
1667 /* The non-resolvable private address shall not be
1668 * equal to the public address.
1669 */
1670 if (bacmp(&hdev->bdaddr, &nrpa))
1671 break;
1672 }
1673
1674 *own_addr_type = ADDR_LE_DEV_RANDOM;
1675 bacpy(rand_addr, &nrpa);
1676
1677 return 0;
1678 }
1679
1680 /* No privacy so use a public address. */
1681 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1682
1683 return 0;
1684}
1685
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301686void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1687{
1688 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1689}
1690
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001691static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1692{
1693 struct hci_dev *hdev = req->hdev;
1694
1695 /* If we're advertising or initiating an LE connection we can't
1696 * go ahead and change the random address at this time. This is
1697 * because the eventual initiator address used for the
1698 * subsequently created connection will be undefined (some
1699 * controllers use the new address and others the one we had
1700 * when the operation started).
1701 *
1702 * In this kind of scenario skip the update and let the random
1703 * address be updated at the next cycle.
1704 */
1705 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1706 hci_lookup_le_connect(hdev)) {
1707 bt_dev_dbg(hdev, "Deferring random address update");
1708 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1709 return;
1710 }
1711
1712 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1713}
1714
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301715int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301716{
1717 struct hci_cp_le_set_ext_adv_params cp;
1718 struct hci_dev *hdev = req->hdev;
1719 bool connectable;
1720 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301721 bdaddr_t random_addr;
1722 u8 own_addr_type;
1723 int err;
1724 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301725 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301726
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301727 if (instance > 0) {
1728 adv_instance = hci_find_adv_instance(hdev, instance);
1729 if (!adv_instance)
1730 return -EINVAL;
1731 } else {
1732 adv_instance = NULL;
1733 }
1734
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001735 flags = hci_adv_instance_flags(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301736
1737 /* If the "connectable" instance flag was not set, then choose between
1738 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1739 */
1740 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1741 mgmt_get_connectable(hdev);
1742
Colin Ian King75edd1f2018-11-09 13:27:36 +00001743 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301744 return -EPERM;
1745
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301746 /* Set require_privacy to true only when non-connectable
1747 * advertising is used. In that case it is fine to use a
1748 * non-resolvable private address.
1749 */
1750 err = hci_get_random_address(hdev, !connectable,
1751 adv_use_rpa(hdev, flags), adv_instance,
1752 &own_addr_type, &random_addr);
1753 if (err < 0)
1754 return err;
1755
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301756 memset(&cp, 0, sizeof(cp));
1757
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001758 if (adv_instance) {
1759 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1760 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1761 cp.tx_power = adv_instance->tx_power;
1762 } else {
1763 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1764 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1765 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1766 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301767
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301768 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1769
1770 if (connectable) {
1771 if (secondary_adv)
1772 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1773 else
1774 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001775 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
Daniel Winklerff02db12021-03-03 11:15:23 -08001776 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301777 if (secondary_adv)
1778 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1779 else
1780 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1781 } else {
1782 if (secondary_adv)
1783 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1784 else
1785 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1786 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301787
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301788 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301789 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001790 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301791
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301792 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1793 cp.primary_phy = HCI_ADV_PHY_1M;
1794 cp.secondary_phy = HCI_ADV_PHY_2M;
1795 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1796 cp.primary_phy = HCI_ADV_PHY_CODED;
1797 cp.secondary_phy = HCI_ADV_PHY_CODED;
1798 } else {
1799 /* In all other cases use 1M */
1800 cp.primary_phy = HCI_ADV_PHY_1M;
1801 cp.secondary_phy = HCI_ADV_PHY_1M;
1802 }
1803
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301804 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1805
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301806 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1807 bacmp(&random_addr, BDADDR_ANY)) {
1808 struct hci_cp_le_set_adv_set_rand_addr cp;
1809
1810 /* Check if random address need to be updated */
1811 if (adv_instance) {
1812 if (!bacmp(&random_addr, &adv_instance->random_addr))
1813 return 0;
1814 } else {
1815 if (!bacmp(&random_addr, &hdev->random_addr))
1816 return 0;
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001817 /* Instance 0x00 doesn't have an adv_info, instead it
1818 * uses hdev->random_addr to track its address so
1819 * whenever it needs to be updated this also set the
1820 * random address since hdev->random_addr is shared with
1821 * scan state machine.
1822 */
1823 set_random_addr(req, &random_addr);
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301824 }
1825
1826 memset(&cp, 0, sizeof(cp));
1827
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001828 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301829 bacpy(&cp.bdaddr, &random_addr);
1830
1831 hci_req_add(req,
1832 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1833 sizeof(cp), &cp);
1834 }
1835
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301836 return 0;
1837}
1838
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001839int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301840{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001841 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301842 struct hci_cp_le_set_ext_adv_enable *cp;
1843 struct hci_cp_ext_adv_set *adv_set;
1844 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001845 struct adv_info *adv_instance;
1846
1847 if (instance > 0) {
1848 adv_instance = hci_find_adv_instance(hdev, instance);
1849 if (!adv_instance)
1850 return -EINVAL;
1851 } else {
1852 adv_instance = NULL;
1853 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301854
1855 cp = (void *) data;
1856 adv_set = (void *) cp->data;
1857
1858 memset(cp, 0, sizeof(*cp));
1859
1860 cp->enable = 0x01;
1861 cp->num_of_sets = 0x01;
1862
1863 memset(adv_set, 0, sizeof(*adv_set));
1864
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001865 adv_set->handle = instance;
1866
1867 /* Set duration per instance since controller is responsible for
1868 * scheduling it.
1869 */
1870 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001871 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001872
1873 /* Time = N * 10 ms */
1874 adv_set->duration = cpu_to_le16(duration / 10);
1875 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301876
1877 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1878 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1879 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001880
1881 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301882}
1883
Daniel Winkler37adf702020-07-14 14:16:00 -07001884int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1885{
1886 struct hci_dev *hdev = req->hdev;
1887 struct hci_cp_le_set_ext_adv_enable *cp;
1888 struct hci_cp_ext_adv_set *adv_set;
1889 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1890 u8 req_size;
1891
1892 /* If request specifies an instance that doesn't exist, fail */
1893 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1894 return -EINVAL;
1895
1896 memset(data, 0, sizeof(data));
1897
1898 cp = (void *)data;
1899 adv_set = (void *)cp->data;
1900
1901 /* Instance 0x00 indicates all advertising instances will be disabled */
1902 cp->num_of_sets = !!instance;
1903 cp->enable = 0x00;
1904
1905 adv_set->handle = instance;
1906
1907 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1908 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1909
1910 return 0;
1911}
1912
1913int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1914{
1915 struct hci_dev *hdev = req->hdev;
1916
1917 /* If request specifies an instance that doesn't exist, fail */
1918 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1919 return -EINVAL;
1920
1921 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1922
1923 return 0;
1924}
1925
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301926int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1927{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301928 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07001929 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301930 int err;
1931
Daniel Winkler37adf702020-07-14 14:16:00 -07001932 /* If instance isn't pending, the chip knows about it, and it's safe to
1933 * disable
1934 */
1935 if (adv_instance && !adv_instance->pending)
1936 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301937
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301938 err = __hci_req_setup_ext_adv_instance(req, instance);
1939 if (err < 0)
1940 return err;
1941
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301942 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001943 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301944
1945 return 0;
1946}
1947
Johan Hedbergf2252572015-11-18 12:49:20 +02001948int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1949 bool force)
1950{
1951 struct hci_dev *hdev = req->hdev;
1952 struct adv_info *adv_instance = NULL;
1953 u16 timeout;
1954
1955 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001956 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001957 return -EPERM;
1958
1959 if (hdev->adv_instance_timeout)
1960 return -EBUSY;
1961
1962 adv_instance = hci_find_adv_instance(hdev, instance);
1963 if (!adv_instance)
1964 return -ENOENT;
1965
1966 /* A zero timeout means unlimited advertising. As long as there is
1967 * only one instance, duration should be ignored. We still set a timeout
1968 * in case further instances are being added later on.
1969 *
1970 * If the remaining lifetime of the instance is more than the duration
1971 * then the timeout corresponds to the duration, otherwise it will be
1972 * reduced to the remaining instance lifetime.
1973 */
1974 if (adv_instance->timeout == 0 ||
1975 adv_instance->duration <= adv_instance->remaining_time)
1976 timeout = adv_instance->duration;
1977 else
1978 timeout = adv_instance->remaining_time;
1979
1980 /* The remaining time is being reduced unless the instance is being
1981 * advertised without time limit.
1982 */
1983 if (adv_instance->timeout)
1984 adv_instance->remaining_time =
1985 adv_instance->remaining_time - timeout;
1986
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001987 /* Only use work for scheduling instances with legacy advertising */
1988 if (!ext_adv_capable(hdev)) {
1989 hdev->adv_instance_timeout = timeout;
1990 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02001991 &hdev->adv_instance_expire,
1992 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001993 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001994
1995 /* If we're just re-scheduling the same instance again then do not
1996 * execute any HCI commands. This happens when a single instance is
1997 * being advertised.
1998 */
1999 if (!force && hdev->cur_adv_instance == instance &&
2000 hci_dev_test_flag(hdev, HCI_LE_ADV))
2001 return 0;
2002
2003 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302004 if (ext_adv_capable(hdev)) {
2005 __hci_req_start_ext_adv(req, instance);
2006 } else {
2007 __hci_req_update_adv_data(req, instance);
2008 __hci_req_update_scan_rsp_data(req, instance);
2009 __hci_req_enable_advertising(req);
2010 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002011
2012 return 0;
2013}
2014
Johan Hedbergf2252572015-11-18 12:49:20 +02002015/* For a single instance:
2016 * - force == true: The instance will be removed even when its remaining
2017 * lifetime is not zero.
2018 * - force == false: the instance will be deactivated but kept stored unless
2019 * the remaining lifetime is zero.
2020 *
2021 * For instance == 0x00:
2022 * - force == true: All instances will be removed regardless of their timeout
2023 * setting.
2024 * - force == false: Only instances that have a timeout will be removed.
2025 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002026void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2027 struct hci_request *req, u8 instance,
2028 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002029{
2030 struct adv_info *adv_instance, *n, *next_instance = NULL;
2031 int err;
2032 u8 rem_inst;
2033
2034 /* Cancel any timeout concerning the removed instance(s). */
2035 if (!instance || hdev->cur_adv_instance == instance)
2036 cancel_adv_timeout(hdev);
2037
2038 /* Get the next instance to advertise BEFORE we remove
2039 * the current one. This can be the same instance again
2040 * if there is only one instance.
2041 */
2042 if (instance && hdev->cur_adv_instance == instance)
2043 next_instance = hci_get_next_instance(hdev, instance);
2044
2045 if (instance == 0x00) {
2046 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2047 list) {
2048 if (!(force || adv_instance->timeout))
2049 continue;
2050
2051 rem_inst = adv_instance->instance;
2052 err = hci_remove_adv_instance(hdev, rem_inst);
2053 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002054 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002055 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002056 } else {
2057 adv_instance = hci_find_adv_instance(hdev, instance);
2058
2059 if (force || (adv_instance && adv_instance->timeout &&
2060 !adv_instance->remaining_time)) {
2061 /* Don't advertise a removed instance. */
2062 if (next_instance &&
2063 next_instance->instance == instance)
2064 next_instance = NULL;
2065
2066 err = hci_remove_adv_instance(hdev, instance);
2067 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002068 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002069 }
2070 }
2071
Johan Hedbergf2252572015-11-18 12:49:20 +02002072 if (!req || !hdev_is_powered(hdev) ||
2073 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2074 return;
2075
Daniel Winkler37adf702020-07-14 14:16:00 -07002076 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002077 __hci_req_schedule_adv_instance(req, next_instance->instance,
2078 false);
2079}
2080
Johan Hedberg0857dd32014-12-19 13:40:20 +02002081int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002082 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002083{
2084 struct hci_dev *hdev = req->hdev;
2085 int err;
2086
2087 /* If privacy is enabled use a resolvable private address. If
2088 * current RPA has expired or there is something else than
2089 * the current RPA in use, then generate a new one.
2090 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002091 if (use_rpa) {
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302092 /* If Controller supports LL Privacy use own address type is
2093 * 0x03
2094 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05302095 if (use_ll_privacy(hdev) &&
2096 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302097 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2098 else
2099 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002100
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07002101 if (rpa_valid(hdev))
Johan Hedberg0857dd32014-12-19 13:40:20 +02002102 return 0;
2103
2104 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2105 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002106 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002107 return err;
2108 }
2109
2110 set_random_addr(req, &hdev->rpa);
2111
Johan Hedberg0857dd32014-12-19 13:40:20 +02002112 return 0;
2113 }
2114
2115 /* In case of required privacy without resolvable private address,
2116 * use an non-resolvable private address. This is useful for active
2117 * scanning and non-connectable advertising.
2118 */
2119 if (require_privacy) {
2120 bdaddr_t nrpa;
2121
2122 while (true) {
2123 /* The non-resolvable private address is generated
2124 * from random six bytes with the two most significant
2125 * bits cleared.
2126 */
2127 get_random_bytes(&nrpa, 6);
2128 nrpa.b[5] &= 0x3f;
2129
2130 /* The non-resolvable private address shall not be
2131 * equal to the public address.
2132 */
2133 if (bacmp(&hdev->bdaddr, &nrpa))
2134 break;
2135 }
2136
2137 *own_addr_type = ADDR_LE_DEV_RANDOM;
2138 set_random_addr(req, &nrpa);
2139 return 0;
2140 }
2141
2142 /* If forcing static address is in use or there is no public
2143 * address use the static address as random address (but skip
2144 * the HCI command if the current random address is already the
2145 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002146 *
2147 * In case BR/EDR has been disabled on a dual-mode controller
2148 * and a static address has been configured, then use that
2149 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002150 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002151 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002152 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002153 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002154 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002155 *own_addr_type = ADDR_LE_DEV_RANDOM;
2156 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2157 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2158 &hdev->static_addr);
2159 return 0;
2160 }
2161
2162 /* Neither privacy nor static address is being used so use a
2163 * public address.
2164 */
2165 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2166
2167 return 0;
2168}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002169
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002170static bool disconnected_accept_list_entries(struct hci_dev *hdev)
Johan Hedberg405a2612014-12-19 23:18:22 +02002171{
2172 struct bdaddr_list *b;
2173
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002174 list_for_each_entry(b, &hdev->accept_list, list) {
Johan Hedberg405a2612014-12-19 23:18:22 +02002175 struct hci_conn *conn;
2176
2177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2178 if (!conn)
2179 return true;
2180
2181 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2182 return true;
2183 }
2184
2185 return false;
2186}
2187
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002188void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002189{
2190 struct hci_dev *hdev = req->hdev;
2191 u8 scan;
2192
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002193 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002194 return;
2195
2196 if (!hdev_is_powered(hdev))
2197 return;
2198
2199 if (mgmt_powering_down(hdev))
2200 return;
2201
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002202 if (hdev->scanning_paused)
2203 return;
2204
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002205 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002206 disconnected_accept_list_entries(hdev))
Johan Hedberg405a2612014-12-19 23:18:22 +02002207 scan = SCAN_PAGE;
2208 else
2209 scan = SCAN_DISABLED;
2210
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002211 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002212 scan |= SCAN_INQUIRY;
2213
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002214 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2215 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2216 return;
2217
Johan Hedberg405a2612014-12-19 23:18:22 +02002218 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2219}
2220
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002221static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002222{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002223 hci_dev_lock(req->hdev);
2224 __hci_req_update_scan(req);
2225 hci_dev_unlock(req->hdev);
2226 return 0;
2227}
Johan Hedberg405a2612014-12-19 23:18:22 +02002228
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002229static void scan_update_work(struct work_struct *work)
2230{
2231 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2232
2233 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002234}
2235
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002236static int connectable_update(struct hci_request *req, unsigned long opt)
2237{
2238 struct hci_dev *hdev = req->hdev;
2239
2240 hci_dev_lock(hdev);
2241
2242 __hci_req_update_scan(req);
2243
2244 /* If BR/EDR is not enabled and we disable advertising as a
2245 * by-product of disabling connectable, we need to update the
2246 * advertising flags.
2247 */
2248 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002249 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002250
2251 /* Update the advertising parameters if necessary */
2252 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302253 !list_empty(&hdev->adv_instances)) {
2254 if (ext_adv_capable(hdev))
2255 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2256 else
2257 __hci_req_enable_advertising(req);
2258 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002259
2260 __hci_update_background_scan(req);
2261
2262 hci_dev_unlock(hdev);
2263
2264 return 0;
2265}
2266
2267static void connectable_update_work(struct work_struct *work)
2268{
2269 struct hci_dev *hdev = container_of(work, struct hci_dev,
2270 connectable_update);
2271 u8 status;
2272
2273 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2274 mgmt_set_connectable_complete(hdev, status);
2275}
2276
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002277static u8 get_service_classes(struct hci_dev *hdev)
2278{
2279 struct bt_uuid *uuid;
2280 u8 val = 0;
2281
2282 list_for_each_entry(uuid, &hdev->uuids, list)
2283 val |= uuid->svc_hint;
2284
2285 return val;
2286}
2287
2288void __hci_req_update_class(struct hci_request *req)
2289{
2290 struct hci_dev *hdev = req->hdev;
2291 u8 cod[3];
2292
Howard Chung22fbcfc2020-11-11 15:02:19 +08002293 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002294
2295 if (!hdev_is_powered(hdev))
2296 return;
2297
2298 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2299 return;
2300
2301 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2302 return;
2303
2304 cod[0] = hdev->minor_class;
2305 cod[1] = hdev->major_class;
2306 cod[2] = get_service_classes(hdev);
2307
2308 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2309 cod[1] |= 0x20;
2310
2311 if (memcmp(cod, hdev->dev_class, 3) == 0)
2312 return;
2313
2314 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2315}
2316
Johan Hedbergaed1a882015-11-22 17:24:44 +03002317static void write_iac(struct hci_request *req)
2318{
2319 struct hci_dev *hdev = req->hdev;
2320 struct hci_cp_write_current_iac_lap cp;
2321
2322 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2323 return;
2324
2325 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2326 /* Limited discoverable mode */
2327 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2328 cp.iac_lap[0] = 0x00; /* LIAC */
2329 cp.iac_lap[1] = 0x8b;
2330 cp.iac_lap[2] = 0x9e;
2331 cp.iac_lap[3] = 0x33; /* GIAC */
2332 cp.iac_lap[4] = 0x8b;
2333 cp.iac_lap[5] = 0x9e;
2334 } else {
2335 /* General discoverable mode */
2336 cp.num_iac = 1;
2337 cp.iac_lap[0] = 0x33; /* GIAC */
2338 cp.iac_lap[1] = 0x8b;
2339 cp.iac_lap[2] = 0x9e;
2340 }
2341
2342 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2343 (cp.num_iac * 3) + 1, &cp);
2344}
2345
2346static int discoverable_update(struct hci_request *req, unsigned long opt)
2347{
2348 struct hci_dev *hdev = req->hdev;
2349
2350 hci_dev_lock(hdev);
2351
2352 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2353 write_iac(req);
2354 __hci_req_update_scan(req);
2355 __hci_req_update_class(req);
2356 }
2357
2358 /* Advertising instances don't use the global discoverable setting, so
2359 * only update AD if advertising was enabled using Set Advertising.
2360 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002361 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002362 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002363
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002364 /* Discoverable mode affects the local advertising
2365 * address in limited privacy mode.
2366 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302367 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2368 if (ext_adv_capable(hdev))
2369 __hci_req_start_ext_adv(req, 0x00);
2370 else
2371 __hci_req_enable_advertising(req);
2372 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002373 }
2374
Johan Hedbergaed1a882015-11-22 17:24:44 +03002375 hci_dev_unlock(hdev);
2376
2377 return 0;
2378}
2379
2380static void discoverable_update_work(struct work_struct *work)
2381{
2382 struct hci_dev *hdev = container_of(work, struct hci_dev,
2383 discoverable_update);
2384 u8 status;
2385
2386 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2387 mgmt_set_discoverable_complete(hdev, status);
2388}
2389
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002390void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2391 u8 reason)
2392{
2393 switch (conn->state) {
2394 case BT_CONNECTED:
2395 case BT_CONFIG:
2396 if (conn->type == AMP_LINK) {
2397 struct hci_cp_disconn_phy_link cp;
2398
2399 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2400 cp.reason = reason;
2401 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2402 &cp);
2403 } else {
2404 struct hci_cp_disconnect dc;
2405
2406 dc.handle = cpu_to_le16(conn->handle);
2407 dc.reason = reason;
2408 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2409 }
2410
2411 conn->state = BT_DISCONN;
2412
2413 break;
2414 case BT_CONNECT:
2415 if (conn->type == LE_LINK) {
2416 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2417 break;
2418 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2419 0, NULL);
2420 } else if (conn->type == ACL_LINK) {
2421 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2422 break;
2423 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2424 6, &conn->dst);
2425 }
2426 break;
2427 case BT_CONNECT2:
2428 if (conn->type == ACL_LINK) {
2429 struct hci_cp_reject_conn_req rej;
2430
2431 bacpy(&rej.bdaddr, &conn->dst);
2432 rej.reason = reason;
2433
2434 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2435 sizeof(rej), &rej);
2436 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2437 struct hci_cp_reject_sync_conn_req rej;
2438
2439 bacpy(&rej.bdaddr, &conn->dst);
2440
2441 /* SCO rejection has its own limited set of
2442 * allowed error values (0x0D-0x0F) which isn't
2443 * compatible with most values passed to this
2444 * function. To be safe hard-code one of the
2445 * values that's suitable for SCO.
2446 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002447 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002448
2449 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2450 sizeof(rej), &rej);
2451 }
2452 break;
2453 default:
2454 conn->state = BT_CLOSED;
2455 break;
2456 }
2457}
2458
2459static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2460{
2461 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002462 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002463}
2464
2465int hci_abort_conn(struct hci_conn *conn, u8 reason)
2466{
2467 struct hci_request req;
2468 int err;
2469
2470 hci_req_init(&req, conn->hdev);
2471
2472 __hci_abort_conn(&req, conn, reason);
2473
2474 err = hci_req_run(&req, abort_conn_complete);
2475 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002476 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002477 return err;
2478 }
2479
2480 return 0;
2481}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002482
Johan Hedberga1d01db2015-11-11 08:11:25 +02002483static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002484{
2485 hci_dev_lock(req->hdev);
2486 __hci_update_background_scan(req);
2487 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002488 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002489}
2490
2491static void bg_scan_update(struct work_struct *work)
2492{
2493 struct hci_dev *hdev = container_of(work, struct hci_dev,
2494 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002495 struct hci_conn *conn;
2496 u8 status;
2497 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002498
Johan Hedberg84235d22015-11-11 08:11:20 +02002499 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2500 if (!err)
2501 return;
2502
2503 hci_dev_lock(hdev);
2504
2505 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2506 if (conn)
2507 hci_le_conn_failed(conn, status);
2508
2509 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002510}
2511
Johan Hedberga1d01db2015-11-11 08:11:25 +02002512static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002513{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302514 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002515 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002516}
2517
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002518static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2519{
2520 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002521 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2522 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002523 struct hci_cp_inquiry cp;
2524
Archie Pusaka06752d12021-04-01 11:11:33 +08002525 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2526 return 0;
2527
Howard Chung22fbcfc2020-11-11 15:02:19 +08002528 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002529
2530 hci_dev_lock(req->hdev);
2531 hci_inquiry_cache_flush(req->hdev);
2532 hci_dev_unlock(req->hdev);
2533
2534 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002535
2536 if (req->hdev->discovery.limited)
2537 memcpy(&cp.lap, liac, sizeof(cp.lap));
2538 else
2539 memcpy(&cp.lap, giac, sizeof(cp.lap));
2540
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002541 cp.length = length;
2542
2543 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2544
2545 return 0;
2546}
2547
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002548static void le_scan_disable_work(struct work_struct *work)
2549{
2550 struct hci_dev *hdev = container_of(work, struct hci_dev,
2551 le_scan_disable.work);
2552 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002553
Howard Chung22fbcfc2020-11-11 15:02:19 +08002554 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002555
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002556 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002557 return;
2558
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002559 cancel_delayed_work(&hdev->le_scan_restart);
2560
2561 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2562 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002563 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2564 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002565 return;
2566 }
2567
2568 hdev->discovery.scan_start = 0;
2569
2570 /* If we were running LE only scan, change discovery state. If
2571 * we were running both LE and BR/EDR inquiry simultaneously,
2572 * and BR/EDR inquiry is already finished, stop discovery,
2573 * otherwise BR/EDR inquiry will stop discovery when finished.
2574 * If we will resolve remote device name, do not change
2575 * discovery state.
2576 */
2577
2578 if (hdev->discovery.type == DISCOV_TYPE_LE)
2579 goto discov_stopped;
2580
2581 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2582 return;
2583
2584 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2585 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2586 hdev->discovery.state != DISCOVERY_RESOLVING)
2587 goto discov_stopped;
2588
2589 return;
2590 }
2591
2592 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2593 HCI_CMD_TIMEOUT, &status);
2594 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002595 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002596 goto discov_stopped;
2597 }
2598
2599 return;
2600
2601discov_stopped:
2602 hci_dev_lock(hdev);
2603 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2604 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002605}
2606
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002607static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002608{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002609 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002610
2611 /* If controller is not scanning we are done. */
2612 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2613 return 0;
2614
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002615 if (hdev->scanning_paused) {
2616 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2617 return 0;
2618 }
2619
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302620 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002621
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302622 if (use_ext_scan(hdev)) {
2623 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2624
2625 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2626 ext_enable_cp.enable = LE_SCAN_ENABLE;
2627 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2628
2629 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2630 sizeof(ext_enable_cp), &ext_enable_cp);
2631 } else {
2632 struct hci_cp_le_set_scan_enable cp;
2633
2634 memset(&cp, 0, sizeof(cp));
2635 cp.enable = LE_SCAN_ENABLE;
2636 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2637 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2638 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002639
2640 return 0;
2641}
2642
2643static void le_scan_restart_work(struct work_struct *work)
2644{
2645 struct hci_dev *hdev = container_of(work, struct hci_dev,
2646 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002647 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002648 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002649
Howard Chung22fbcfc2020-11-11 15:02:19 +08002650 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002651
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002652 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002653 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002654 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2655 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002656 return;
2657 }
2658
2659 hci_dev_lock(hdev);
2660
2661 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2662 !hdev->discovery.scan_start)
2663 goto unlock;
2664
2665 /* When the scan was started, hdev->le_scan_disable has been queued
2666 * after duration from scan_start. During scan restart this job
2667 * has been canceled, and we need to queue it again after proper
2668 * timeout, to make sure that scan does not run indefinitely.
2669 */
2670 duration = hdev->discovery.scan_duration;
2671 scan_start = hdev->discovery.scan_start;
2672 now = jiffies;
2673 if (now - scan_start <= duration) {
2674 int elapsed;
2675
2676 if (now >= scan_start)
2677 elapsed = now - scan_start;
2678 else
2679 elapsed = ULONG_MAX - scan_start + now;
2680
2681 timeout = duration - elapsed;
2682 } else {
2683 timeout = 0;
2684 }
2685
2686 queue_delayed_work(hdev->req_workqueue,
2687 &hdev->le_scan_disable, timeout);
2688
2689unlock:
2690 hci_dev_unlock(hdev);
2691}
2692
Johan Hedberge68f0722015-11-11 08:30:30 +02002693static int active_scan(struct hci_request *req, unsigned long opt)
2694{
2695 uint16_t interval = opt;
2696 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002697 u8 own_addr_type;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002698 /* Accept list is not used for discovery */
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002699 u8 filter_policy = 0x00;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002700 /* Default is to enable duplicates filter */
2701 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302702 /* Discovery doesn't require controller address resolution */
2703 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002704 int err;
2705
Howard Chung22fbcfc2020-11-11 15:02:19 +08002706 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002707
Johan Hedberge68f0722015-11-11 08:30:30 +02002708 /* If controller is scanning, it means the background scanning is
2709 * running. Thus, we should temporarily stop it in order to set the
2710 * discovery scanning parameters.
2711 */
Howard Chung422bb172020-11-26 12:22:23 +08002712 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302713 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08002714 cancel_interleave_scan(hdev);
2715 }
Johan Hedberge68f0722015-11-11 08:30:30 +02002716
2717 /* All active scans will be done with either a resolvable private
2718 * address (when privacy feature has been enabled) or non-resolvable
2719 * private address.
2720 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002721 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2722 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002723 if (err < 0)
2724 own_addr_type = ADDR_LE_DEV_PUBLIC;
2725
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002726 if (hci_is_adv_monitoring(hdev)) {
2727 /* Duplicate filter should be disabled when some advertisement
2728 * monitor is activated, otherwise AdvMon can only receive one
2729 * advertisement for one peer(*) during active scanning, and
2730 * might report loss to these peers.
2731 *
2732 * Note that different controllers have different meanings of
2733 * |duplicate|. Some of them consider packets with the same
2734 * address as duplicate, and others consider packets with the
2735 * same address and the same RSSI as duplicate. Although in the
2736 * latter case we don't need to disable duplicate filter, but
2737 * it is common to have active scanning for a short period of
2738 * time, the power impact should be neglectable.
2739 */
2740 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2741 }
2742
Alain Michaudd4edda02020-06-29 17:04:15 +00002743 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2744 hdev->le_scan_window_discovery, own_addr_type,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002745 filter_policy, filter_dup, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002746 return 0;
2747}
2748
2749static int interleaved_discov(struct hci_request *req, unsigned long opt)
2750{
2751 int err;
2752
Howard Chung22fbcfc2020-11-11 15:02:19 +08002753 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002754
2755 err = active_scan(req, opt);
2756 if (err)
2757 return err;
2758
Johan Hedberg7df26b52015-11-11 12:24:21 +02002759 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002760}
2761
2762static void start_discovery(struct hci_dev *hdev, u8 *status)
2763{
2764 unsigned long timeout;
2765
Howard Chung22fbcfc2020-11-11 15:02:19 +08002766 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002767
2768 switch (hdev->discovery.type) {
2769 case DISCOV_TYPE_BREDR:
2770 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002771 hci_req_sync(hdev, bredr_inquiry,
2772 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002773 status);
2774 return;
2775 case DISCOV_TYPE_INTERLEAVED:
2776 /* When running simultaneous discovery, the LE scanning time
2777 * should occupy the whole discovery time sine BR/EDR inquiry
2778 * and LE scanning are scheduled by the controller.
2779 *
2780 * For interleaving discovery in comparison, BR/EDR inquiry
2781 * and LE scanning are done sequentially with separate
2782 * timeouts.
2783 */
2784 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2785 &hdev->quirks)) {
2786 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2787 /* During simultaneous discovery, we double LE scan
2788 * interval. We must leave some time for the controller
2789 * to do BR/EDR inquiry.
2790 */
2791 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00002792 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002793 status);
2794 break;
2795 }
2796
2797 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00002798 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002799 HCI_CMD_TIMEOUT, status);
2800 break;
2801 case DISCOV_TYPE_LE:
2802 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00002803 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002804 HCI_CMD_TIMEOUT, status);
2805 break;
2806 default:
2807 *status = HCI_ERROR_UNSPECIFIED;
2808 return;
2809 }
2810
2811 if (*status)
2812 return;
2813
Howard Chung22fbcfc2020-11-11 15:02:19 +08002814 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02002815
2816 /* When service discovery is used and the controller has a
2817 * strict duplicate filter, it is important to remember the
2818 * start and duration of the scan. This is required for
2819 * restarting scanning during the discovery phase.
2820 */
2821 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2822 hdev->discovery.result_filtering) {
2823 hdev->discovery.scan_start = jiffies;
2824 hdev->discovery.scan_duration = timeout;
2825 }
2826
2827 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2828 timeout);
2829}
2830
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002831bool hci_req_stop_discovery(struct hci_request *req)
2832{
2833 struct hci_dev *hdev = req->hdev;
2834 struct discovery_state *d = &hdev->discovery;
2835 struct hci_cp_remote_name_req_cancel cp;
2836 struct inquiry_entry *e;
2837 bool ret = false;
2838
Howard Chung22fbcfc2020-11-11 15:02:19 +08002839 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002840
2841 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2842 if (test_bit(HCI_INQUIRY, &hdev->flags))
2843 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2844
2845 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2846 cancel_delayed_work(&hdev->le_scan_disable);
Sonny Sasakac06632a2021-03-15 10:30:59 -07002847 cancel_delayed_work(&hdev->le_scan_restart);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302848 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002849 }
2850
2851 ret = true;
2852 } else {
2853 /* Passive scanning */
2854 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302855 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002856 ret = true;
2857 }
2858 }
2859
2860 /* No further actions needed for LE-only discovery */
2861 if (d->type == DISCOV_TYPE_LE)
2862 return ret;
2863
2864 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2865 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2866 NAME_PENDING);
2867 if (!e)
2868 return ret;
2869
2870 bacpy(&cp.bdaddr, &e->data.bdaddr);
2871 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2872 &cp);
2873 ret = true;
2874 }
2875
2876 return ret;
2877}
2878
Kiran K9798fbd2021-09-07 15:42:44 +05302879static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2880 u16 opcode)
2881{
2882 bt_dev_dbg(hdev, "status %u", status);
2883}
2884
2885int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2886{
2887 struct hci_request req;
2888 int err;
2889 __u8 vnd_len, *vnd_data = NULL;
2890 struct hci_op_configure_data_path *cmd = NULL;
2891
2892 hci_req_init(&req, hdev);
2893
2894 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2895 &vnd_data);
2896 if (err < 0)
2897 goto error;
2898
2899 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2900 if (!cmd) {
2901 err = -ENOMEM;
2902 goto error;
2903 }
2904
2905 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2906 if (err < 0)
2907 goto error;
2908
2909 cmd->vnd_len = vnd_len;
2910 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2911
2912 cmd->direction = 0x00;
2913 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2914
2915 cmd->direction = 0x01;
2916 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2917
2918 err = hci_req_run(&req, config_data_path_complete);
2919error:
2920
2921 kfree(cmd);
2922 kfree(vnd_data);
2923 return err;
2924}
2925
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002926static int stop_discovery(struct hci_request *req, unsigned long opt)
2927{
2928 hci_dev_lock(req->hdev);
2929 hci_req_stop_discovery(req);
2930 hci_dev_unlock(req->hdev);
2931
2932 return 0;
2933}
2934
Johan Hedberge68f0722015-11-11 08:30:30 +02002935static void discov_update(struct work_struct *work)
2936{
2937 struct hci_dev *hdev = container_of(work, struct hci_dev,
2938 discov_update);
2939 u8 status = 0;
2940
2941 switch (hdev->discovery.state) {
2942 case DISCOVERY_STARTING:
2943 start_discovery(hdev, &status);
2944 mgmt_start_discovery_complete(hdev, status);
2945 if (status)
2946 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2947 else
2948 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2949 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002950 case DISCOVERY_STOPPING:
2951 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2952 mgmt_stop_discovery_complete(hdev, status);
2953 if (!status)
2954 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2955 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002956 case DISCOVERY_STOPPED:
2957 default:
2958 return;
2959 }
2960}
2961
Johan Hedbergc366f552015-11-23 15:43:06 +02002962static void discov_off(struct work_struct *work)
2963{
2964 struct hci_dev *hdev = container_of(work, struct hci_dev,
2965 discov_off.work);
2966
Howard Chung22fbcfc2020-11-11 15:02:19 +08002967 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02002968
2969 hci_dev_lock(hdev);
2970
2971 /* When discoverable timeout triggers, then just make sure
2972 * the limited discoverable flag is cleared. Even in the case
2973 * of a timeout triggered from general discoverable, it is
2974 * safe to unconditionally clear the flag.
2975 */
2976 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2977 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2978 hdev->discov_timeout = 0;
2979
2980 hci_dev_unlock(hdev);
2981
2982 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2983 mgmt_new_settings(hdev);
2984}
2985
Johan Hedberg2ff13892015-11-25 16:15:44 +02002986static int powered_update_hci(struct hci_request *req, unsigned long opt)
2987{
2988 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002989 u8 link_sec;
2990
2991 hci_dev_lock(hdev);
2992
2993 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2994 !lmp_host_ssp_capable(hdev)) {
2995 u8 mode = 0x01;
2996
2997 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2998
2999 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3000 u8 support = 0x01;
3001
3002 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3003 sizeof(support), &support);
3004 }
3005 }
3006
3007 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3008 lmp_bredr_capable(hdev)) {
3009 struct hci_cp_write_le_host_supported cp;
3010
3011 cp.le = 0x01;
3012 cp.simul = 0x00;
3013
3014 /* Check first if we already have the right
3015 * host state (host features set)
3016 */
3017 if (cp.le != lmp_host_le_capable(hdev) ||
3018 cp.simul != lmp_host_le_br_capable(hdev))
3019 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3020 sizeof(cp), &cp);
3021 }
3022
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003023 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003024 /* Make sure the controller has a good default for
3025 * advertising data. This also applies to the case
3026 * where BR/EDR was toggled during the AUTO_OFF phase.
3027 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003028 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3029 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303030 int err;
3031
3032 if (ext_adv_capable(hdev)) {
3033 err = __hci_req_setup_ext_adv_instance(req,
3034 0x00);
3035 if (!err)
3036 __hci_req_update_scan_rsp_data(req,
3037 0x00);
3038 } else {
3039 err = 0;
3040 __hci_req_update_adv_data(req, 0x00);
3041 __hci_req_update_scan_rsp_data(req, 0x00);
3042 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003043
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303044 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303045 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303046 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303047 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003048 __hci_req_enable_ext_advertising(req,
3049 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303050 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003051 } else if (!list_empty(&hdev->adv_instances)) {
3052 struct adv_info *adv_instance;
3053
Johan Hedberg2ff13892015-11-25 16:15:44 +02003054 adv_instance = list_first_entry(&hdev->adv_instances,
3055 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003056 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003057 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003058 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003059 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003060 }
3061
3062 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3063 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3064 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3065 sizeof(link_sec), &link_sec);
3066
3067 if (lmp_bredr_capable(hdev)) {
3068 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3069 __hci_req_write_fast_connectable(req, true);
3070 else
3071 __hci_req_write_fast_connectable(req, false);
3072 __hci_req_update_scan(req);
3073 __hci_req_update_class(req);
3074 __hci_req_update_name(req);
3075 __hci_req_update_eir(req);
3076 }
3077
3078 hci_dev_unlock(hdev);
3079 return 0;
3080}
3081
3082int __hci_req_hci_power_on(struct hci_dev *hdev)
3083{
3084 /* Register the available SMP channels (BR/EDR and LE) only when
3085 * successfully powering on the controller. This late
3086 * registration is required so that LE SMP can clearly decide if
3087 * the public address or static address is used.
3088 */
3089 smp_register(hdev);
3090
3091 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3092 NULL);
3093}
3094
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003095void hci_request_setup(struct hci_dev *hdev)
3096{
Johan Hedberge68f0722015-11-11 08:30:30 +02003097 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003098 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003099 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003100 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003101 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003102 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003103 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3104 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003105 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08003106 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003107}
3108
3109void hci_request_cancel_all(struct hci_dev *hdev)
3110{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003111 hci_req_sync_cancel(hdev, ENODEV);
3112
Johan Hedberge68f0722015-11-11 08:30:30 +02003113 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003114 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003115 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003116 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003117 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003118 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003119 cancel_delayed_work_sync(&hdev->le_scan_disable);
3120 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003121
3122 if (hdev->adv_instance_timeout) {
3123 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3124 hdev->adv_instance_timeout = 0;
3125 }
Howard Chungc4f1f402020-11-26 12:22:21 +08003126
3127 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003128}