blob: 55885c4651abc942cbe819232dc61497bbb4f87a [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -070033#include "eir.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020034
Johan Hedbergbe91cd02015-11-10 09:44:54 +020035#define HCI_REQ_DONE 0
36#define HCI_REQ_PEND 1
37#define HCI_REQ_CANCELED 2
38
Johan Hedberg0857dd32014-12-19 13:40:20 +020039void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40{
41 skb_queue_head_init(&req->cmd_q);
42 req->hdev = hdev;
43 req->err = 0;
44}
45
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053046void hci_req_purge(struct hci_request *req)
47{
48 skb_queue_purge(&req->cmd_q);
49}
50
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080051bool hci_req_status_pend(struct hci_dev *hdev)
52{
53 return hdev->req_status == HCI_REQ_PEND;
54}
55
Johan Hedberge62144872015-04-02 13:41:08 +030056static int req_run(struct hci_request *req, hci_req_complete_t complete,
57 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020058{
59 struct hci_dev *hdev = req->hdev;
60 struct sk_buff *skb;
61 unsigned long flags;
62
Howard Chung22fbcfc2020-11-11 15:02:19 +080063 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020064
65 /* If an error occurred during request building, remove all HCI
66 * commands queued on the HCI request queue.
67 */
68 if (req->err) {
69 skb_queue_purge(&req->cmd_q);
70 return req->err;
71 }
72
73 /* Do not allow empty requests */
74 if (skb_queue_empty(&req->cmd_q))
75 return -ENODATA;
76
77 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020078 if (complete) {
79 bt_cb(skb)->hci.req_complete = complete;
80 } else if (complete_skb) {
81 bt_cb(skb)->hci.req_complete_skb = complete_skb;
82 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020084
85 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
86 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
87 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88
89 queue_work(hdev->workqueue, &hdev->cmd_work);
90
91 return 0;
92}
93
Johan Hedberge62144872015-04-02 13:41:08 +030094int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95{
96 return req_run(req, complete, NULL);
97}
98
99int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100{
101 return req_run(req, NULL, complete);
102}
103
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 struct sk_buff *skb)
106{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800107 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 if (skb)
113 hdev->req_skb = skb_get(skb);
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
Johan Hedbergb5044302015-11-10 09:44:55 +0200118void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200119{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800120 bt_dev_dbg(hdev, "err 0x%2.2x", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200121
122 if (hdev->req_status == HCI_REQ_PEND) {
123 hdev->req_result = err;
124 hdev->req_status = HCI_REQ_CANCELED;
125 wake_up_interruptible(&hdev->req_wait_q);
126 }
127}
128
129struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
130 const void *param, u8 event, u32 timeout)
131{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200132 struct hci_request req;
133 struct sk_buff *skb;
134 int err = 0;
135
Howard Chung22fbcfc2020-11-11 15:02:19 +0800136 bt_dev_dbg(hdev, "");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200137
138 hci_req_init(&req, hdev);
139
140 hci_req_add_ev(&req, opcode, plen, param, event);
141
142 hdev->req_status = HCI_REQ_PEND;
143
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100145 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200146 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200147
John Keeping67d8cee2018-04-19 16:29:37 +0100148 err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150
John Keeping67d8cee2018-04-19 16:29:37 +0100151 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200152 return ERR_PTR(-EINTR);
153
154 switch (hdev->req_status) {
155 case HCI_REQ_DONE:
156 err = -bt_to_errno(hdev->req_result);
157 break;
158
159 case HCI_REQ_CANCELED:
160 err = -hdev->req_result;
161 break;
162
163 default:
164 err = -ETIMEDOUT;
165 break;
166 }
167
168 hdev->req_status = hdev->req_result = 0;
169 skb = hdev->req_skb;
170 hdev->req_skb = NULL;
171
Howard Chung22fbcfc2020-11-11 15:02:19 +0800172 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200173
174 if (err < 0) {
175 kfree_skb(skb);
176 return ERR_PTR(err);
177 }
178
179 if (!skb)
180 return ERR_PTR(-ENODATA);
181
182 return skb;
183}
184EXPORT_SYMBOL(__hci_cmd_sync_ev);
185
186struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
187 const void *param, u32 timeout)
188{
189 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190}
191EXPORT_SYMBOL(__hci_cmd_sync);
192
193/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200194int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200196 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197{
198 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200199 int err = 0;
200
Howard Chung22fbcfc2020-11-11 15:02:19 +0800201 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200202
203 hci_req_init(&req, hdev);
204
205 hdev->req_status = HCI_REQ_PEND;
206
Johan Hedberga1d01db2015-11-11 08:11:25 +0200207 err = func(&req, opt);
208 if (err) {
209 if (hci_status)
210 *hci_status = HCI_ERROR_UNSPECIFIED;
211 return err;
212 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200213
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200226 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200231
232 return err;
233 }
234
John Keeping67d8cee2018-04-19 16:29:37 +0100235 err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237
John Keeping67d8cee2018-04-19 16:29:37 +0100238 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200244 if (hci_status)
245 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200246 break;
247
248 case HCI_REQ_CANCELED:
249 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200250 if (hci_status)
251 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200252 break;
253
254 default:
255 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200256 if (hci_status)
257 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200258 break;
259 }
260
Frederic Dalleau9afee942016-08-23 07:59:19 +0200261 kfree_skb(hdev->req_skb);
262 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200263 hdev->req_status = hdev->req_result = 0;
264
Howard Chung22fbcfc2020-11-11 15:02:19 +0800265 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266
267 return err;
268}
269
Johan Hedberga1d01db2015-11-11 08:11:25 +0200270int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200272 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200273{
274 int ret;
275
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200277 hci_req_sync_lock(hdev);
Lin Mae2cb6b82021-04-12 19:17:57 +0800278 /* check the state after obtaing the lock to protect the HCI_UP
279 * against any races from hci_dev_do_close when the controller
280 * gets removed.
281 */
282 if (test_bit(HCI_UP, &hdev->flags))
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 else
285 ret = -ENETDOWN;
Johan Hedbergb5044302015-11-10 09:44:55 +0200286 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200287
288 return ret;
289}
290
Johan Hedberg0857dd32014-12-19 13:40:20 +0200291struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param)
293{
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
296 struct sk_buff *skb;
297
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
Johannes Berg4df864c2017-06-16 14:29:21 +0200302 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200303 hdr->opcode = cpu_to_le16(opcode);
304 hdr->plen = plen;
305
306 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200307 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200308
Howard Chung22fbcfc2020-11-11 15:02:19 +0800309 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200310
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200313
314 return skb;
315}
316
317/* Queue a command to an asynchronous HCI request */
318void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
320{
321 struct hci_dev *hdev = req->hdev;
322 struct sk_buff *skb;
323
Howard Chung22fbcfc2020-11-11 15:02:19 +0800324 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200325
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
328 */
329 if (req->err)
330 return;
331
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100334 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
335 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336 req->err = -ENOMEM;
337 return;
338 }
339
340 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200342
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100343 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200344
345 skb_queue_tail(&req->cmd_q, skb);
346}
347
348void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 const void *param)
350{
351 hci_req_add_ev(req, opcode, plen, param, 0);
352}
353
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200354void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355{
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
358 u8 type;
359
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 return;
362
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 return;
365
366 if (enable) {
367 type = PAGE_SCAN_TYPE_INTERLACED;
368
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
371 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000372 type = hdev->def_page_scan_type;
373 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200374 }
375
Alain Michaud10873f92020-06-11 02:01:56 +0000376 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
Howard Chungc4f1f402020-11-26 12:22:21 +0800387static void start_interleave_scan(struct hci_dev *hdev)
388{
389 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
390 queue_delayed_work(hdev->req_workqueue,
391 &hdev->interleave_scan, 0);
392}
393
394static bool is_interleave_scanning(struct hci_dev *hdev)
395{
396 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
397}
398
399static void cancel_interleave_scan(struct hci_dev *hdev)
400{
401 bt_dev_dbg(hdev, "cancelling interleave scan");
402
403 cancel_delayed_work_sync(&hdev->interleave_scan);
404
405 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
406}
407
408/* Return true if interleave_scan wasn't started until exiting this function,
409 * otherwise, return false
410 */
411static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800413 /* Do interleaved scan only if all of the following are true:
414 * - There is at least one ADV monitor
415 * - At least one pending LE connection or one device to be scanned for
416 * - Monitor offloading is not supported
417 * If so, we should alternate between allowlist scan and one without
418 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800419 */
420 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
421 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800422 list_empty(&hdev->pend_le_reports)) &&
423 hci_get_adv_monitor_offload_ext(hdev) ==
424 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800425 bool is_interleaving = is_interleave_scanning(hdev);
426
427 if (use_interleaving && !is_interleaving) {
428 start_interleave_scan(hdev);
429 bt_dev_dbg(hdev, "starting interleave scan");
430 return true;
431 }
432
433 if (!use_interleaving && is_interleaving)
434 cancel_interleave_scan(hdev);
435
436 return false;
437}
438
Johan Hedberg196a5e92015-11-22 18:55:44 +0200439/* This function controls the background scanning based on hdev->pend_le_conns
440 * list. If there are pending LE connection we start the background scanning,
441 * otherwise we stop it.
442 *
443 * This function requires the caller holds hdev->lock.
444 */
445static void __hci_update_background_scan(struct hci_request *req)
446{
447 struct hci_dev *hdev = req->hdev;
448
449 if (!test_bit(HCI_UP, &hdev->flags) ||
450 test_bit(HCI_INIT, &hdev->flags) ||
451 hci_dev_test_flag(hdev, HCI_SETUP) ||
452 hci_dev_test_flag(hdev, HCI_CONFIG) ||
453 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
454 hci_dev_test_flag(hdev, HCI_UNREGISTER))
455 return;
456
457 /* No point in doing scanning if LE support hasn't been enabled */
458 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
459 return;
460
461 /* If discovery is active don't interfere with it */
462 if (hdev->discovery.state != DISCOVERY_STOPPED)
463 return;
464
465 /* Reset RSSI and UUID filters when starting background scanning
466 * since these filters are meant for service discovery only.
467 *
468 * The Start Discovery and Start Service Discovery operations
469 * ensure to set proper values for RSSI threshold and UUID
470 * filter list. So it is safe to just reset them here.
471 */
472 hci_discovery_filter_clear(hdev);
473
Howard Chung22fbcfc2020-11-11 15:02:19 +0800474 bt_dev_dbg(hdev, "ADV monitoring is %s",
475 hci_is_adv_monitoring(hdev) ? "on" : "off");
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200476
Johan Hedberg196a5e92015-11-22 18:55:44 +0200477 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200478 list_empty(&hdev->pend_le_reports) &&
479 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200480 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200481 * to be scanned for or no ADV monitors, we should stop the
482 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200483 */
484
485 /* If controller is not scanning we are done. */
486 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
487 return;
488
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530489 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200490
Howard Chung22fbcfc2020-11-11 15:02:19 +0800491 bt_dev_dbg(hdev, "stopping background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200492 } else {
493 /* If there is at least one pending LE connection, we should
494 * keep the background scan running.
495 */
496
497 /* If controller is connecting, we should not start scanning
498 * since some controllers are not able to scan and connect at
499 * the same time.
500 */
501 if (hci_lookup_le_connect(hdev))
502 return;
503
504 /* If controller is currently scanning, we stop it to ensure we
505 * don't miss any advertising (due to duplicates filter).
506 */
507 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530508 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200509
510 hci_req_add_le_passive_scan(req);
Howard Chungc4f1f402020-11-26 12:22:21 +0800511 bt_dev_dbg(hdev, "starting background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200512 }
513}
514
Johan Hedberg00cf5042015-11-25 16:15:41 +0200515void __hci_req_update_name(struct hci_request *req)
516{
517 struct hci_dev *hdev = req->hdev;
518 struct hci_cp_write_local_name cp;
519
520 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
521
522 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
523}
524
Johan Hedbergb1a89172015-11-25 16:15:42 +0200525void __hci_req_update_eir(struct hci_request *req)
526{
527 struct hci_dev *hdev = req->hdev;
528 struct hci_cp_write_eir cp;
529
530 if (!hdev_is_powered(hdev))
531 return;
532
533 if (!lmp_ext_inq_capable(hdev))
534 return;
535
536 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
537 return;
538
539 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
540 return;
541
542 memset(&cp, 0, sizeof(cp));
543
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700544 eir_create(hdev, cp.data);
Johan Hedbergb1a89172015-11-25 16:15:42 +0200545
546 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
547 return;
548
549 memcpy(hdev->eir, cp.data, sizeof(cp.data));
550
551 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
552}
553
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530554void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200555{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530556 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200557
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700558 if (hdev->scanning_paused) {
559 bt_dev_dbg(hdev, "Scanning is paused for suspend");
560 return;
561 }
562
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800563 if (hdev->suspended)
564 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
565
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530566 if (use_ext_scan(hdev)) {
567 struct hci_cp_le_set_ext_scan_enable cp;
568
569 memset(&cp, 0, sizeof(cp));
570 cp.enable = LE_SCAN_DISABLE;
571 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
572 &cp);
573 } else {
574 struct hci_cp_le_set_scan_enable cp;
575
576 memset(&cp, 0, sizeof(cp));
577 cp.enable = LE_SCAN_DISABLE;
578 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
579 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530580
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530581 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530582 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530583 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530584 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530585 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530586
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530587 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
588 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200589}
590
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800591static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
592 u8 bdaddr_type)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700593{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800594 struct hci_cp_le_del_from_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700595
596 cp.bdaddr_type = bdaddr_type;
597 bacpy(&cp.bdaddr, bdaddr);
598
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800599 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700600 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800601 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530602
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530603 if (use_ll_privacy(req->hdev) &&
604 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530605 struct smp_irk *irk;
606
607 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
608 if (irk) {
609 struct hci_cp_le_del_from_resolv_list cp;
610
611 cp.bdaddr_type = bdaddr_type;
612 bacpy(&cp.bdaddr, bdaddr);
613
614 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
615 sizeof(cp), &cp);
616 }
617 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700618}
619
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800620/* Adds connection to accept list if needed. On error, returns -1. */
621static int add_to_accept_list(struct hci_request *req,
622 struct hci_conn_params *params, u8 *num_entries,
623 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200624{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800625 struct hci_cp_le_add_to_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700626 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200627
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800628 /* Already in accept list */
629 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700630 params->addr_type))
631 return 0;
632
633 /* Select filter policy to accept all advertising */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800634 if (*num_entries >= hdev->le_accept_list_size)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700635 return -1;
636
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800637 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530638 if (!allow_rpa &&
639 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700640 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
641 return -1;
642 }
643
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800644 /* During suspend, only wakeable devices can be in accept list */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200645 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
646 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700647 return 0;
648
649 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200650 cp.bdaddr_type = params->addr_type;
651 bacpy(&cp.bdaddr, &params->addr);
652
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800653 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700654 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800655 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700656
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530657 if (use_ll_privacy(hdev) &&
658 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530659 struct smp_irk *irk;
660
661 irk = hci_find_irk_by_addr(hdev, &params->addr,
662 params->addr_type);
663 if (irk) {
664 struct hci_cp_le_add_to_resolv_list cp;
665
666 cp.bdaddr_type = params->addr_type;
667 bacpy(&cp.bdaddr, &params->addr);
668 memcpy(cp.peer_irk, irk->val, 16);
669
670 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
671 memcpy(cp.local_irk, hdev->irk, 16);
672 else
673 memset(cp.local_irk, 0, 16);
674
675 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
676 sizeof(cp), &cp);
677 }
678 }
679
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700680 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200681}
682
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800683static u8 update_accept_list(struct hci_request *req)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200684{
685 struct hci_dev *hdev = req->hdev;
686 struct hci_conn_params *params;
687 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700688 u8 num_entries = 0;
689 bool pend_conn, pend_report;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800690 /* We allow usage of accept list even with RPAs in suspend. In the worst
691 * case, we won't be able to wake from devices that use the privacy1.2
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700692 * features. Additionally, once we support privacy1.2 and IRK
693 * offloading, we can update this to also check for those conditions.
694 */
695 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200696
Sathish Narasimman8ce85ad2021-04-05 20:00:41 +0530697 if (use_ll_privacy(hdev) &&
698 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
699 allow_rpa = true;
700
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800701 /* Go through the current accept list programmed into the
Johan Hedberg0857dd32014-12-19 13:40:20 +0200702 * controller one by one and check if that address is still
703 * in the list of pending connections or list of devices to
704 * report. If not present in either list, then queue the
705 * command to remove it from the controller.
706 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800707 list_for_each_entry(b, &hdev->le_accept_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700708 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
709 &b->bdaddr,
710 b->bdaddr_type);
711 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
712 &b->bdaddr,
713 b->bdaddr_type);
714
715 /* If the device is not likely to connect or report,
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800716 * remove it from the accept list.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500717 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700718 if (!pend_conn && !pend_report) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800719 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200720 continue;
721 }
722
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800723 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530724 if (!allow_rpa &&
725 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700726 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500727 return 0x00;
728 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200729
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700730 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200731 }
732
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800733 /* Since all no longer valid accept list entries have been
Johan Hedberg0857dd32014-12-19 13:40:20 +0200734 * removed, walk through the list of pending connections
735 * and ensure that any new device gets programmed into
736 * the controller.
737 *
738 * If the list of the devices is larger than the list of
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800739 * available accept list entries in the controller, then
Johan Hedberg0857dd32014-12-19 13:40:20 +0200740 * just abort and return filer policy value to not use the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800741 * accept list.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200742 */
743 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800744 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200745 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800750 * accept list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800753 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200754 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200755 }
756
Howard Chungc4f1f402020-11-26 12:22:21 +0800757 /* Use the allowlist unless the following conditions are all true:
758 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800759 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800760 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200761 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800762 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800763 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800764 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200765 return 0x00;
766
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800767 /* Select filter policy to use accept list */
Johan Hedberg0857dd32014-12-19 13:40:20 +0200768 return 0x01;
769}
770
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200771static bool scan_use_rpa(struct hci_dev *hdev)
772{
773 return hci_dev_test_flag(hdev, HCI_PRIVACY);
774}
775
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530776static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530777 u16 window, u8 own_addr_type, u8 filter_policy,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800778 bool filter_dup, bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200779{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530780 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530781
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700782 if (hdev->scanning_paused) {
783 bt_dev_dbg(hdev, "Scanning is paused for suspend");
784 return;
785 }
786
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530787 if (use_ll_privacy(hdev) &&
788 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
789 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530790 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530791
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530792 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
793 }
794
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530795 /* Use ext scanning if set ext scan param and ext scan enable is
796 * supported
797 */
798 if (use_ext_scan(hdev)) {
799 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530802 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
803 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530804
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530805 ext_param_cp = (void *)data;
806 phy_params = (void *)ext_param_cp->data;
807
808 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 ext_param_cp->own_addr_type = own_addr_type;
810 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530811
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530812 plen = sizeof(*ext_param_cp);
813
814 if (scan_1m(hdev) || scan_2m(hdev)) {
815 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
816
817 memset(phy_params, 0, sizeof(*phy_params));
818 phy_params->type = type;
819 phy_params->interval = cpu_to_le16(interval);
820 phy_params->window = cpu_to_le16(window);
821
822 plen += sizeof(*phy_params);
823 phy_params++;
824 }
825
826 if (scan_coded(hdev)) {
827 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
828
829 memset(phy_params, 0, sizeof(*phy_params));
830 phy_params->type = type;
831 phy_params->interval = cpu_to_le16(interval);
832 phy_params->window = cpu_to_le16(window);
833
834 plen += sizeof(*phy_params);
835 phy_params++;
836 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530837
838 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530839 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530840
841 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 ext_enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800843 ext_enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530844
845 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 sizeof(ext_enable_cp), &ext_enable_cp);
847 } else {
848 struct hci_cp_le_set_scan_param param_cp;
849 struct hci_cp_le_set_scan_enable enable_cp;
850
851 memset(&param_cp, 0, sizeof(param_cp));
852 param_cp.type = type;
853 param_cp.interval = cpu_to_le16(interval);
854 param_cp.window = cpu_to_le16(window);
855 param_cp.own_address_type = own_addr_type;
856 param_cp.filter_policy = filter_policy;
857 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
858 &param_cp);
859
860 memset(&enable_cp, 0, sizeof(enable_cp));
861 enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800862 enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530863 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
864 &enable_cp);
865 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530866}
867
Alain Michaud9a9373f2020-07-31 01:05:34 +0000868/* Returns true if an le connection is in the scanning state */
869static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
870{
871 struct hci_conn_hash *h = &hdev->conn_hash;
872 struct hci_conn *c;
873
874 rcu_read_lock();
875
876 list_for_each_entry_rcu(c, &h->list, list) {
877 if (c->type == LE_LINK && c->state == BT_CONNECT &&
878 test_bit(HCI_CONN_SCANNING, &c->flags)) {
879 rcu_read_unlock();
880 return true;
881 }
882 }
883
884 rcu_read_unlock();
885
886 return false;
887}
888
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530889/* Ensure to call hci_req_add_le_scan_disable() first to disable the
890 * controller based address resolution to be able to reconfigure
891 * resolving list.
892 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530893void hci_req_add_le_passive_scan(struct hci_request *req)
894{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200895 struct hci_dev *hdev = req->hdev;
896 u8 own_addr_type;
897 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700898 u16 window, interval;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800899 /* Default is to enable duplicates filter */
900 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530901 /* Background scanning should run with address resolution */
902 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700903
904 if (hdev->scanning_paused) {
905 bt_dev_dbg(hdev, "Scanning is paused for suspend");
906 return;
907 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200908
909 /* Set require_privacy to false since no SCAN_REQ are send
910 * during passive scanning. Not using an non-resolvable address
911 * here is important so that peer devices using direct
912 * advertising with our address will be correctly reported
913 * by the controller.
914 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200915 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
916 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200917 return;
918
Howard Chung80af16a2020-11-26 12:22:25 +0800919 if (hdev->enable_advmon_interleave_scan &&
920 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +0800921 return;
922
923 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800924 /* Adding or removing entries from the accept list must
Johan Hedberg0857dd32014-12-19 13:40:20 +0200925 * happen before enabling scanning. The controller does
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800926 * not allow accept list modification while scanning.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200927 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800928 filter_policy = update_accept_list(req);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200929
930 /* When the controller is using random resolvable addresses and
931 * with that having LE privacy enabled, then controllers with
932 * Extended Scanner Filter Policies support can now enable support
933 * for handling directed advertising.
934 *
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800935 * So instead of using filter polices 0x00 (no accept list)
936 * and 0x01 (accept list enabled) use the new filter policies
937 * 0x02 (no accept list) and 0x03 (accept list enabled).
Johan Hedberg0857dd32014-12-19 13:40:20 +0200938 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700939 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200940 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
941 filter_policy |= 0x02;
942
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700943 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000944 window = hdev->le_scan_window_suspend;
945 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -0800946
947 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Alain Michaud9a9373f2020-07-31 01:05:34 +0000948 } else if (hci_is_le_conn_scanning(hdev)) {
949 window = hdev->le_scan_window_connect;
950 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +0800951 } else if (hci_is_adv_monitoring(hdev)) {
952 window = hdev->le_scan_window_adv_monitor;
953 interval = hdev->le_scan_int_adv_monitor;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800954
955 /* Disable duplicates filter when scanning for advertisement
956 * monitor for the following reasons.
957 *
958 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
959 * controllers ignore RSSI_Sampling_Period when the duplicates
960 * filter is enabled.
961 *
962 * For SW pattern filtering, when we're not doing interleaved
963 * scanning, it is necessary to disable duplicates filter,
964 * otherwise hosts can only receive one advertisement and it's
965 * impossible to know if a peer is still in range.
966 */
967 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700968 } else {
969 window = hdev->le_scan_window;
970 interval = hdev->le_scan_interval;
971 }
972
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800973 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
974 filter_policy);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700975 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800976 own_addr_type, filter_policy, filter_dup,
977 addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200978}
979
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700980static void hci_req_clear_event_filter(struct hci_request *req)
981{
982 struct hci_cp_set_event_filter f;
983
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800984 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
985 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700986
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800987 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
988 memset(&f, 0, sizeof(f));
989 f.flt_type = HCI_FLT_CLEAR_ALL;
990 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
991 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700992}
993
994static void hci_req_set_event_filter(struct hci_request *req)
995{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200996 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700997 struct hci_cp_set_event_filter f;
998 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200999 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001000 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1001
1002 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1003 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001004
1005 /* Always clear event filter when starting */
1006 hci_req_clear_event_filter(req);
1007
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001008 list_for_each_entry(b, &hdev->accept_list, list) {
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001009 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1010 b->current_flags))
1011 continue;
1012
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001013 memset(&f, 0, sizeof(f));
1014 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1015 f.flt_type = HCI_FLT_CONN_SETUP;
1016 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1017 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1018
1019 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1020 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001021 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001022 }
1023
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001024 if (scan && !scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001025 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001026 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1027 } else if (!scan && scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001028 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001029 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1030 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001031}
1032
Daniel Winkler53274472020-09-15 14:14:27 -07001033static void cancel_adv_timeout(struct hci_dev *hdev)
1034{
1035 if (hdev->adv_instance_timeout) {
1036 hdev->adv_instance_timeout = 0;
1037 cancel_delayed_work(&hdev->adv_instance_expire);
1038 }
1039}
1040
1041/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001042void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001043{
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001044 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -07001045
1046 /* Call to disable any advertisements active on the controller.
1047 * This will succeed even if no advertisements are configured.
1048 */
1049 __hci_req_disable_advertising(req);
1050
1051 /* If we are using software rotation, pause the loop */
1052 if (!ext_adv_capable(req->hdev))
1053 cancel_adv_timeout(req->hdev);
1054}
1055
1056/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001057static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001058{
1059 struct adv_info *adv;
1060
1061 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1062
1063 if (ext_adv_capable(req->hdev)) {
1064 /* Call for each tracked instance to be re-enabled */
1065 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1066 __hci_req_enable_ext_advertising(req,
1067 adv->instance);
1068 }
1069
1070 } else {
1071 /* Schedule for most recent instance to be restarted and begin
1072 * the software rotation loop
1073 */
1074 __hci_req_schedule_adv_instance(req,
1075 req->hdev->cur_adv_instance,
1076 true);
1077 }
1078}
1079
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001080/* This function requires the caller holds hdev->lock */
1081int hci_req_resume_adv_instances(struct hci_dev *hdev)
1082{
1083 struct hci_request req;
1084
1085 hci_req_init(&req, hdev);
1086 __hci_req_resume_adv_instances(&req);
1087
1088 return hci_req_run(&req, NULL);
1089}
1090
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001091static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1092{
1093 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1094 status);
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001095 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1096 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1097 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1098 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001099 wake_up(&hdev->suspend_wait_q);
1100 }
Howard Chungbf6a4e32021-01-22 16:36:17 +08001101
1102 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1103 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1104 wake_up(&hdev->suspend_wait_q);
1105 }
1106}
1107
1108static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1109 bool enable)
1110{
1111 struct hci_dev *hdev = req->hdev;
1112
1113 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1114 case HCI_ADV_MONITOR_EXT_MSFT:
1115 msft_req_add_set_filter_enable(req, enable);
1116 break;
1117 default:
1118 return;
1119 }
1120
1121 /* No need to block when enabling since it's on resume path */
1122 if (hdev->suspended && !enable)
1123 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001124}
1125
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001126/* Call with hci_dev_lock */
1127void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1128{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001129 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001130 struct hci_conn *conn;
1131 struct hci_request req;
1132 u8 page_scan;
1133 int disconnect_counter;
1134
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001135 if (next == hdev->suspend_state) {
1136 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1137 goto done;
1138 }
1139
1140 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001141 hci_req_init(&req, hdev);
1142
1143 if (next == BT_SUSPEND_DISCONNECT) {
1144 /* Mark device as suspended */
1145 hdev->suspended = true;
1146
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001147 /* Pause discovery if not already stopped */
1148 old_state = hdev->discovery.state;
1149 if (old_state != DISCOVERY_STOPPED) {
1150 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1151 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1152 queue_work(hdev->req_workqueue, &hdev->discov_update);
1153 }
1154
1155 hdev->discovery_paused = true;
1156 hdev->discovery_old_state = old_state;
1157
Daniel Winkler53274472020-09-15 14:14:27 -07001158 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001159 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1160 if (old_state) {
1161 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1162 cancel_delayed_work(&hdev->discov_off);
1163 queue_delayed_work(hdev->req_workqueue,
1164 &hdev->discov_off, 0);
1165 }
1166
Daniel Winkler53274472020-09-15 14:14:27 -07001167 /* Pause other advertisements */
1168 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001169 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001170
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001171 hdev->advertising_paused = true;
1172 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001173
1174 /* Disable page scan if enabled */
1175 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1176 page_scan = SCAN_DISABLED;
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1178 &page_scan);
1179 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1180 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001181
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001182 /* Disable LE passive scan if enabled */
Howard Chung36afe872020-11-26 12:22:22 +08001183 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1184 cancel_interleave_scan(hdev);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301185 hci_req_add_le_scan_disable(&req, false);
Howard Chung36afe872020-11-26 12:22:22 +08001186 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001187
Howard Chungbf6a4e32021-01-22 16:36:17 +08001188 /* Disable advertisement filters */
1189 hci_req_add_set_adv_filter_enable(&req, false);
1190
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001191 /* Prevent disconnects from causing scanning to be re-enabled */
1192 hdev->scanning_paused = true;
1193
1194 /* Run commands before disconnecting */
1195 hci_req_run(&req, suspend_req_complete);
1196
1197 disconnect_counter = 0;
1198 /* Soft disconnect everything (power off) */
1199 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1200 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1201 disconnect_counter++;
1202 }
1203
1204 if (disconnect_counter > 0) {
1205 bt_dev_dbg(hdev,
1206 "Had %d disconnects. Will wait on them",
1207 disconnect_counter);
1208 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1209 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001210 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001211 /* Unpause to take care of updating scanning params */
1212 hdev->scanning_paused = false;
1213 /* Enable event filter for paired devices */
1214 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001215 /* Enable passive scan at lower duty cycle */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001216 __hci_update_background_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001217 /* Pause scan changes again. */
1218 hdev->scanning_paused = true;
1219 hci_req_run(&req, suspend_req_complete);
1220 } else {
1221 hdev->suspended = false;
1222 hdev->scanning_paused = false;
1223
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001224 /* Clear any event filters and restore scan state */
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001225 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001226 __hci_req_update_scan(&req);
1227
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001228 /* Reset passive/background scanning to normal */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001229 __hci_update_background_scan(&req);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001230 /* Enable all of the advertisement filters */
1231 hci_req_add_set_adv_filter_enable(&req, true);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001232
Daniel Winkler53274472020-09-15 14:14:27 -07001233 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001234 hdev->advertising_paused = false;
1235 if (hdev->advertising_old_state) {
1236 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1237 hdev->suspend_tasks);
1238 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1239 queue_work(hdev->req_workqueue,
1240 &hdev->discoverable_update);
1241 hdev->advertising_old_state = 0;
1242 }
1243
Daniel Winkler53274472020-09-15 14:14:27 -07001244 /* Resume other advertisements */
1245 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001246 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001247
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001248 /* Unpause discovery */
1249 hdev->discovery_paused = false;
1250 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1251 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1252 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1253 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1254 queue_work(hdev->req_workqueue, &hdev->discov_update);
1255 }
1256
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001257 hci_req_run(&req, suspend_req_complete);
1258 }
1259
1260 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001261
1262done:
1263 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1264 wake_up(&hdev->suspend_wait_q);
1265}
1266
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001267static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +02001268{
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001269 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001270}
1271
1272void __hci_req_disable_advertising(struct hci_request *req)
1273{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301274 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001275 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001276
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301277 } else {
1278 u8 enable = 0x00;
1279
1280 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1281 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001282}
1283
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001284static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1285{
1286 /* If privacy is not enabled don't use RPA */
1287 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1288 return false;
1289
1290 /* If basic privacy mode is enabled use RPA */
1291 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1292 return true;
1293
1294 /* If limited privacy mode is enabled don't use RPA if we're
1295 * both discoverable and bondable.
1296 */
1297 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1298 hci_dev_test_flag(hdev, HCI_BONDABLE))
1299 return false;
1300
1301 /* We're neither bondable nor discoverable in the limited
1302 * privacy mode, therefore use RPA.
1303 */
1304 return true;
1305}
1306
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001307static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1308{
1309 /* If there is no connection we are OK to advertise. */
1310 if (hci_conn_num(hdev, LE_LINK) == 0)
1311 return true;
1312
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001313 /* Check le_states if there is any connection in peripheral role. */
1314 if (hdev->conn_hash.le_num_peripheral > 0) {
1315 /* Peripheral connection state and non connectable mode bit 20.
1316 */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001317 if (!connectable && !(hdev->le_states[2] & 0x10))
1318 return false;
1319
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001320 /* Peripheral connection state and connectable mode bit 38
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001321 * and scannable bit 21.
1322 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001323 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1324 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001325 return false;
1326 }
1327
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001328 /* Check le_states if there is any connection in central role. */
1329 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1330 /* Central connection state and non connectable mode bit 18. */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001331 if (!connectable && !(hdev->le_states[2] & 0x02))
1332 return false;
1333
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001334 /* Central connection state and connectable mode bit 35 and
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001335 * scannable 19.
1336 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001337 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001338 !(hdev->le_states[2] & 0x08)))
1339 return false;
1340 }
1341
1342 return true;
1343}
1344
Johan Hedbergf2252572015-11-18 12:49:20 +02001345void __hci_req_enable_advertising(struct hci_request *req)
1346{
1347 struct hci_dev *hdev = req->hdev;
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001348 struct adv_info *adv;
Johan Hedbergf2252572015-11-18 12:49:20 +02001349 struct hci_cp_le_set_adv_param cp;
1350 u8 own_addr_type, enable = 0x01;
1351 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301352 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001353 u32 flags;
1354
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001355 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1356 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001357
1358 /* If the "connectable" instance flag was not set, then choose between
1359 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1360 */
1361 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1362 mgmt_get_connectable(hdev);
1363
1364 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001365 return;
1366
1367 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1368 __hci_req_disable_advertising(req);
1369
1370 /* Clear the HCI_LE_ADV bit temporarily so that the
1371 * hci_update_random_address knows that it's safe to go ahead
1372 * and write a new random address. The flag will be set back on
1373 * as soon as the SET_ADV_ENABLE HCI command completes.
1374 */
1375 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1376
Johan Hedbergf2252572015-11-18 12:49:20 +02001377 /* Set require_privacy to true only when non-connectable
1378 * advertising is used. In that case it is fine to use a
1379 * non-resolvable private address.
1380 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001381 if (hci_update_random_address(req, !connectable,
1382 adv_use_rpa(hdev, flags),
1383 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001384 return;
1385
1386 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001387
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001388 if (adv) {
1389 adv_min_interval = adv->min_interval;
1390 adv_max_interval = adv->max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001391 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301392 adv_min_interval = hdev->le_adv_min_interval;
1393 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001394 }
1395
1396 if (connectable) {
1397 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301398 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001399 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301400 cp.type = LE_ADV_SCAN_IND;
1401 else
1402 cp.type = LE_ADV_NONCONN_IND;
1403
1404 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1405 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1406 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1407 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301408 }
1409 }
1410
1411 cp.min_interval = cpu_to_le16(adv_min_interval);
1412 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001413 cp.own_address_type = own_addr_type;
1414 cp.channel_map = hdev->le_adv_channel_map;
1415
1416 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1417
1418 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1419}
1420
Johan Hedbergcab054a2015-11-30 11:21:45 +02001421void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001422{
1423 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001424 u8 len;
1425
1426 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1427 return;
1428
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301429 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001430 struct {
1431 struct hci_cp_le_set_ext_scan_rsp_data cp;
1432 u8 data[HCI_MAX_EXT_AD_LENGTH];
1433 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001434
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001435 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001436
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001437 len = eir_create_scan_rsp(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001438
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301439 if (hdev->scan_rsp_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001440 !memcmp(pdu.data, hdev->scan_rsp_data, len))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301441 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001442
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001443 memcpy(hdev->scan_rsp_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301444 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001445
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001446 pdu.cp.handle = instance;
1447 pdu.cp.length = len;
1448 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1449 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301450
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001451 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1452 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301453 } else {
1454 struct hci_cp_le_set_scan_rsp_data cp;
1455
1456 memset(&cp, 0, sizeof(cp));
1457
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001458 len = eir_create_scan_rsp(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301459
1460 if (hdev->scan_rsp_data_len == len &&
1461 !memcmp(cp.data, hdev->scan_rsp_data, len))
1462 return;
1463
1464 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1465 hdev->scan_rsp_data_len = len;
1466
1467 cp.length = len;
1468
1469 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1470 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001471}
1472
Johan Hedbergcab054a2015-11-30 11:21:45 +02001473void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001474{
1475 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001476 u8 len;
1477
1478 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1479 return;
1480
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301481 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001482 struct {
1483 struct hci_cp_le_set_ext_adv_data cp;
1484 u8 data[HCI_MAX_EXT_AD_LENGTH];
1485 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001486
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001487 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001488
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001489 len = eir_create_adv_data(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001490
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301491 /* There's nothing to do if the data hasn't changed */
1492 if (hdev->adv_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001493 memcmp(pdu.data, hdev->adv_data, len) == 0)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301494 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001495
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001496 memcpy(hdev->adv_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301497 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001498
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001499 pdu.cp.length = len;
1500 pdu.cp.handle = instance;
1501 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1502 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301503
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001504 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1505 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301506 } else {
1507 struct hci_cp_le_set_adv_data cp;
1508
1509 memset(&cp, 0, sizeof(cp));
1510
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001511 len = eir_create_adv_data(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301512
1513 /* There's nothing to do if the data hasn't changed */
1514 if (hdev->adv_data_len == len &&
1515 memcmp(cp.data, hdev->adv_data, len) == 0)
1516 return;
1517
1518 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1519 hdev->adv_data_len = len;
1520
1521 cp.length = len;
1522
1523 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1524 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001525}
1526
Johan Hedbergcab054a2015-11-30 11:21:45 +02001527int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001528{
1529 struct hci_request req;
1530
1531 hci_req_init(&req, hdev);
1532 __hci_req_update_adv_data(&req, instance);
1533
1534 return hci_req_run(&req, NULL);
1535}
1536
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301537static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1538 u16 opcode)
1539{
1540 BT_DBG("%s status %u", hdev->name, status);
1541}
1542
1543void hci_req_disable_address_resolution(struct hci_dev *hdev)
1544{
1545 struct hci_request req;
1546 __u8 enable = 0x00;
1547
1548 if (!use_ll_privacy(hdev) &&
1549 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1550 return;
1551
1552 hci_req_init(&req, hdev);
1553
1554 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1555
1556 hci_req_run(&req, enable_addr_resolution_complete);
1557}
1558
Johan Hedbergf2252572015-11-18 12:49:20 +02001559static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1560{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001561 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001562}
1563
1564void hci_req_reenable_advertising(struct hci_dev *hdev)
1565{
1566 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001567
1568 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001569 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001570 return;
1571
Johan Hedbergf2252572015-11-18 12:49:20 +02001572 hci_req_init(&req, hdev);
1573
Johan Hedbergcab054a2015-11-30 11:21:45 +02001574 if (hdev->cur_adv_instance) {
1575 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1576 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001577 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301578 if (ext_adv_capable(hdev)) {
1579 __hci_req_start_ext_adv(&req, 0x00);
1580 } else {
1581 __hci_req_update_adv_data(&req, 0x00);
1582 __hci_req_update_scan_rsp_data(&req, 0x00);
1583 __hci_req_enable_advertising(&req);
1584 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001585 }
1586
1587 hci_req_run(&req, adv_enable_complete);
1588}
1589
1590static void adv_timeout_expire(struct work_struct *work)
1591{
1592 struct hci_dev *hdev = container_of(work, struct hci_dev,
1593 adv_instance_expire.work);
1594
1595 struct hci_request req;
1596 u8 instance;
1597
Howard Chung22fbcfc2020-11-11 15:02:19 +08001598 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001599
1600 hci_dev_lock(hdev);
1601
1602 hdev->adv_instance_timeout = 0;
1603
Johan Hedbergcab054a2015-11-30 11:21:45 +02001604 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001605 if (instance == 0x00)
1606 goto unlock;
1607
1608 hci_req_init(&req, hdev);
1609
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001610 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001611
1612 if (list_empty(&hdev->adv_instances))
1613 __hci_req_disable_advertising(&req);
1614
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001615 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001616
1617unlock:
1618 hci_dev_unlock(hdev);
1619}
1620
Howard Chungc4f1f402020-11-26 12:22:21 +08001621static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1622 unsigned long opt)
1623{
1624 struct hci_dev *hdev = req->hdev;
1625 int ret = 0;
1626
1627 hci_dev_lock(hdev);
1628
1629 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1630 hci_req_add_le_scan_disable(req, false);
1631 hci_req_add_le_passive_scan(req);
1632
1633 switch (hdev->interleave_scan_state) {
1634 case INTERLEAVE_SCAN_ALLOWLIST:
1635 bt_dev_dbg(hdev, "next state: allowlist");
1636 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1637 break;
1638 case INTERLEAVE_SCAN_NO_FILTER:
1639 bt_dev_dbg(hdev, "next state: no filter");
1640 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1641 break;
1642 case INTERLEAVE_SCAN_NONE:
1643 BT_ERR("unexpected error");
1644 ret = -1;
1645 }
1646
1647 hci_dev_unlock(hdev);
1648
1649 return ret;
1650}
1651
1652static void interleave_scan_work(struct work_struct *work)
1653{
1654 struct hci_dev *hdev = container_of(work, struct hci_dev,
1655 interleave_scan.work);
1656 u8 status;
1657 unsigned long timeout;
1658
1659 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1660 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1661 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1662 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1663 } else {
1664 bt_dev_err(hdev, "unexpected error");
1665 return;
1666 }
1667
1668 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1669 HCI_CMD_TIMEOUT, &status);
1670
1671 /* Don't continue interleaving if it was canceled */
1672 if (is_interleave_scanning(hdev))
1673 queue_delayed_work(hdev->req_workqueue,
1674 &hdev->interleave_scan, timeout);
1675}
1676
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301677int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1678 bool use_rpa, struct adv_info *adv_instance,
1679 u8 *own_addr_type, bdaddr_t *rand_addr)
1680{
1681 int err;
1682
1683 bacpy(rand_addr, BDADDR_ANY);
1684
1685 /* If privacy is enabled use a resolvable private address. If
1686 * current RPA has expired then generate a new one.
1687 */
1688 if (use_rpa) {
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301689 /* If Controller supports LL Privacy use own address type is
1690 * 0x03
1691 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05301692 if (use_ll_privacy(hdev) &&
1693 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301694 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1695 else
1696 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301697
1698 if (adv_instance) {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001699 if (adv_rpa_valid(adv_instance))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301700 return 0;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301701 } else {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001702 if (rpa_valid(hdev))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301703 return 0;
1704 }
1705
1706 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1707 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001708 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301709 return err;
1710 }
1711
1712 bacpy(rand_addr, &hdev->rpa);
1713
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301714 return 0;
1715 }
1716
1717 /* In case of required privacy without resolvable private address,
1718 * use an non-resolvable private address. This is useful for
1719 * non-connectable advertising.
1720 */
1721 if (require_privacy) {
1722 bdaddr_t nrpa;
1723
1724 while (true) {
1725 /* The non-resolvable private address is generated
1726 * from random six bytes with the two most significant
1727 * bits cleared.
1728 */
1729 get_random_bytes(&nrpa, 6);
1730 nrpa.b[5] &= 0x3f;
1731
1732 /* The non-resolvable private address shall not be
1733 * equal to the public address.
1734 */
1735 if (bacmp(&hdev->bdaddr, &nrpa))
1736 break;
1737 }
1738
1739 *own_addr_type = ADDR_LE_DEV_RANDOM;
1740 bacpy(rand_addr, &nrpa);
1741
1742 return 0;
1743 }
1744
1745 /* No privacy so use a public address. */
1746 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1747
1748 return 0;
1749}
1750
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301751void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1752{
1753 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1754}
1755
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001756static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1757{
1758 struct hci_dev *hdev = req->hdev;
1759
1760 /* If we're advertising or initiating an LE connection we can't
1761 * go ahead and change the random address at this time. This is
1762 * because the eventual initiator address used for the
1763 * subsequently created connection will be undefined (some
1764 * controllers use the new address and others the one we had
1765 * when the operation started).
1766 *
1767 * In this kind of scenario skip the update and let the random
1768 * address be updated at the next cycle.
1769 */
1770 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1771 hci_lookup_le_connect(hdev)) {
1772 bt_dev_dbg(hdev, "Deferring random address update");
1773 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1774 return;
1775 }
1776
1777 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1778}
1779
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301780int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301781{
1782 struct hci_cp_le_set_ext_adv_params cp;
1783 struct hci_dev *hdev = req->hdev;
1784 bool connectable;
1785 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301786 bdaddr_t random_addr;
1787 u8 own_addr_type;
1788 int err;
1789 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301790 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301791
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301792 if (instance > 0) {
1793 adv_instance = hci_find_adv_instance(hdev, instance);
1794 if (!adv_instance)
1795 return -EINVAL;
1796 } else {
1797 adv_instance = NULL;
1798 }
1799
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001800 flags = hci_adv_instance_flags(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301801
1802 /* If the "connectable" instance flag was not set, then choose between
1803 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1804 */
1805 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1806 mgmt_get_connectable(hdev);
1807
Colin Ian King75edd1f2018-11-09 13:27:36 +00001808 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301809 return -EPERM;
1810
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301811 /* Set require_privacy to true only when non-connectable
1812 * advertising is used. In that case it is fine to use a
1813 * non-resolvable private address.
1814 */
1815 err = hci_get_random_address(hdev, !connectable,
1816 adv_use_rpa(hdev, flags), adv_instance,
1817 &own_addr_type, &random_addr);
1818 if (err < 0)
1819 return err;
1820
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301821 memset(&cp, 0, sizeof(cp));
1822
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001823 if (adv_instance) {
1824 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1825 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1826 cp.tx_power = adv_instance->tx_power;
1827 } else {
1828 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1829 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1830 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1831 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301832
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301833 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1834
1835 if (connectable) {
1836 if (secondary_adv)
1837 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1838 else
1839 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001840 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
Daniel Winklerff02db12021-03-03 11:15:23 -08001841 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301842 if (secondary_adv)
1843 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1844 else
1845 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1846 } else {
1847 if (secondary_adv)
1848 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1849 else
1850 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1851 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301852
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301853 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301854 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001855 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301856
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301857 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1858 cp.primary_phy = HCI_ADV_PHY_1M;
1859 cp.secondary_phy = HCI_ADV_PHY_2M;
1860 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1861 cp.primary_phy = HCI_ADV_PHY_CODED;
1862 cp.secondary_phy = HCI_ADV_PHY_CODED;
1863 } else {
1864 /* In all other cases use 1M */
1865 cp.primary_phy = HCI_ADV_PHY_1M;
1866 cp.secondary_phy = HCI_ADV_PHY_1M;
1867 }
1868
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301869 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1870
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301871 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1872 bacmp(&random_addr, BDADDR_ANY)) {
1873 struct hci_cp_le_set_adv_set_rand_addr cp;
1874
1875 /* Check if random address need to be updated */
1876 if (adv_instance) {
1877 if (!bacmp(&random_addr, &adv_instance->random_addr))
1878 return 0;
1879 } else {
1880 if (!bacmp(&random_addr, &hdev->random_addr))
1881 return 0;
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001882 /* Instance 0x00 doesn't have an adv_info, instead it
1883 * uses hdev->random_addr to track its address so
1884 * whenever it needs to be updated this also set the
1885 * random address since hdev->random_addr is shared with
1886 * scan state machine.
1887 */
1888 set_random_addr(req, &random_addr);
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301889 }
1890
1891 memset(&cp, 0, sizeof(cp));
1892
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001893 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301894 bacpy(&cp.bdaddr, &random_addr);
1895
1896 hci_req_add(req,
1897 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1898 sizeof(cp), &cp);
1899 }
1900
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301901 return 0;
1902}
1903
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001904int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301905{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001906 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301907 struct hci_cp_le_set_ext_adv_enable *cp;
1908 struct hci_cp_ext_adv_set *adv_set;
1909 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001910 struct adv_info *adv_instance;
1911
1912 if (instance > 0) {
1913 adv_instance = hci_find_adv_instance(hdev, instance);
1914 if (!adv_instance)
1915 return -EINVAL;
1916 } else {
1917 adv_instance = NULL;
1918 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301919
1920 cp = (void *) data;
1921 adv_set = (void *) cp->data;
1922
1923 memset(cp, 0, sizeof(*cp));
1924
1925 cp->enable = 0x01;
1926 cp->num_of_sets = 0x01;
1927
1928 memset(adv_set, 0, sizeof(*adv_set));
1929
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001930 adv_set->handle = instance;
1931
1932 /* Set duration per instance since controller is responsible for
1933 * scheduling it.
1934 */
1935 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001936 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001937
1938 /* Time = N * 10 ms */
1939 adv_set->duration = cpu_to_le16(duration / 10);
1940 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301941
1942 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1943 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1944 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001945
1946 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301947}
1948
Daniel Winkler37adf702020-07-14 14:16:00 -07001949int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1950{
1951 struct hci_dev *hdev = req->hdev;
1952 struct hci_cp_le_set_ext_adv_enable *cp;
1953 struct hci_cp_ext_adv_set *adv_set;
1954 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1955 u8 req_size;
1956
1957 /* If request specifies an instance that doesn't exist, fail */
1958 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1959 return -EINVAL;
1960
1961 memset(data, 0, sizeof(data));
1962
1963 cp = (void *)data;
1964 adv_set = (void *)cp->data;
1965
1966 /* Instance 0x00 indicates all advertising instances will be disabled */
1967 cp->num_of_sets = !!instance;
1968 cp->enable = 0x00;
1969
1970 adv_set->handle = instance;
1971
1972 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1973 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1974
1975 return 0;
1976}
1977
1978int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1979{
1980 struct hci_dev *hdev = req->hdev;
1981
1982 /* If request specifies an instance that doesn't exist, fail */
1983 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1984 return -EINVAL;
1985
1986 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1987
1988 return 0;
1989}
1990
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301991int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1992{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301993 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07001994 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301995 int err;
1996
Daniel Winkler37adf702020-07-14 14:16:00 -07001997 /* If instance isn't pending, the chip knows about it, and it's safe to
1998 * disable
1999 */
2000 if (adv_instance && !adv_instance->pending)
2001 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302002
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302003 err = __hci_req_setup_ext_adv_instance(req, instance);
2004 if (err < 0)
2005 return err;
2006
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302007 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002008 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302009
2010 return 0;
2011}
2012
Johan Hedbergf2252572015-11-18 12:49:20 +02002013int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2014 bool force)
2015{
2016 struct hci_dev *hdev = req->hdev;
2017 struct adv_info *adv_instance = NULL;
2018 u16 timeout;
2019
2020 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002021 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002022 return -EPERM;
2023
2024 if (hdev->adv_instance_timeout)
2025 return -EBUSY;
2026
2027 adv_instance = hci_find_adv_instance(hdev, instance);
2028 if (!adv_instance)
2029 return -ENOENT;
2030
2031 /* A zero timeout means unlimited advertising. As long as there is
2032 * only one instance, duration should be ignored. We still set a timeout
2033 * in case further instances are being added later on.
2034 *
2035 * If the remaining lifetime of the instance is more than the duration
2036 * then the timeout corresponds to the duration, otherwise it will be
2037 * reduced to the remaining instance lifetime.
2038 */
2039 if (adv_instance->timeout == 0 ||
2040 adv_instance->duration <= adv_instance->remaining_time)
2041 timeout = adv_instance->duration;
2042 else
2043 timeout = adv_instance->remaining_time;
2044
2045 /* The remaining time is being reduced unless the instance is being
2046 * advertised without time limit.
2047 */
2048 if (adv_instance->timeout)
2049 adv_instance->remaining_time =
2050 adv_instance->remaining_time - timeout;
2051
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002052 /* Only use work for scheduling instances with legacy advertising */
2053 if (!ext_adv_capable(hdev)) {
2054 hdev->adv_instance_timeout = timeout;
2055 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002056 &hdev->adv_instance_expire,
2057 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002058 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002059
2060 /* If we're just re-scheduling the same instance again then do not
2061 * execute any HCI commands. This happens when a single instance is
2062 * being advertised.
2063 */
2064 if (!force && hdev->cur_adv_instance == instance &&
2065 hci_dev_test_flag(hdev, HCI_LE_ADV))
2066 return 0;
2067
2068 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302069 if (ext_adv_capable(hdev)) {
2070 __hci_req_start_ext_adv(req, instance);
2071 } else {
2072 __hci_req_update_adv_data(req, instance);
2073 __hci_req_update_scan_rsp_data(req, instance);
2074 __hci_req_enable_advertising(req);
2075 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002076
2077 return 0;
2078}
2079
Johan Hedbergf2252572015-11-18 12:49:20 +02002080/* For a single instance:
2081 * - force == true: The instance will be removed even when its remaining
2082 * lifetime is not zero.
2083 * - force == false: the instance will be deactivated but kept stored unless
2084 * the remaining lifetime is zero.
2085 *
2086 * For instance == 0x00:
2087 * - force == true: All instances will be removed regardless of their timeout
2088 * setting.
2089 * - force == false: Only instances that have a timeout will be removed.
2090 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002091void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2092 struct hci_request *req, u8 instance,
2093 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002094{
2095 struct adv_info *adv_instance, *n, *next_instance = NULL;
2096 int err;
2097 u8 rem_inst;
2098
2099 /* Cancel any timeout concerning the removed instance(s). */
2100 if (!instance || hdev->cur_adv_instance == instance)
2101 cancel_adv_timeout(hdev);
2102
2103 /* Get the next instance to advertise BEFORE we remove
2104 * the current one. This can be the same instance again
2105 * if there is only one instance.
2106 */
2107 if (instance && hdev->cur_adv_instance == instance)
2108 next_instance = hci_get_next_instance(hdev, instance);
2109
2110 if (instance == 0x00) {
2111 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2112 list) {
2113 if (!(force || adv_instance->timeout))
2114 continue;
2115
2116 rem_inst = adv_instance->instance;
2117 err = hci_remove_adv_instance(hdev, rem_inst);
2118 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002119 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002120 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002121 } else {
2122 adv_instance = hci_find_adv_instance(hdev, instance);
2123
2124 if (force || (adv_instance && adv_instance->timeout &&
2125 !adv_instance->remaining_time)) {
2126 /* Don't advertise a removed instance. */
2127 if (next_instance &&
2128 next_instance->instance == instance)
2129 next_instance = NULL;
2130
2131 err = hci_remove_adv_instance(hdev, instance);
2132 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002133 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002134 }
2135 }
2136
Johan Hedbergf2252572015-11-18 12:49:20 +02002137 if (!req || !hdev_is_powered(hdev) ||
2138 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2139 return;
2140
Daniel Winkler37adf702020-07-14 14:16:00 -07002141 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002142 __hci_req_schedule_adv_instance(req, next_instance->instance,
2143 false);
2144}
2145
Johan Hedberg0857dd32014-12-19 13:40:20 +02002146int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002147 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002148{
2149 struct hci_dev *hdev = req->hdev;
2150 int err;
2151
2152 /* If privacy is enabled use a resolvable private address. If
2153 * current RPA has expired or there is something else than
2154 * the current RPA in use, then generate a new one.
2155 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002156 if (use_rpa) {
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302157 /* If Controller supports LL Privacy use own address type is
2158 * 0x03
2159 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05302160 if (use_ll_privacy(hdev) &&
2161 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302162 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2163 else
2164 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002165
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07002166 if (rpa_valid(hdev))
Johan Hedberg0857dd32014-12-19 13:40:20 +02002167 return 0;
2168
2169 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2170 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002171 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002172 return err;
2173 }
2174
2175 set_random_addr(req, &hdev->rpa);
2176
Johan Hedberg0857dd32014-12-19 13:40:20 +02002177 return 0;
2178 }
2179
2180 /* In case of required privacy without resolvable private address,
2181 * use an non-resolvable private address. This is useful for active
2182 * scanning and non-connectable advertising.
2183 */
2184 if (require_privacy) {
2185 bdaddr_t nrpa;
2186
2187 while (true) {
2188 /* The non-resolvable private address is generated
2189 * from random six bytes with the two most significant
2190 * bits cleared.
2191 */
2192 get_random_bytes(&nrpa, 6);
2193 nrpa.b[5] &= 0x3f;
2194
2195 /* The non-resolvable private address shall not be
2196 * equal to the public address.
2197 */
2198 if (bacmp(&hdev->bdaddr, &nrpa))
2199 break;
2200 }
2201
2202 *own_addr_type = ADDR_LE_DEV_RANDOM;
2203 set_random_addr(req, &nrpa);
2204 return 0;
2205 }
2206
2207 /* If forcing static address is in use or there is no public
2208 * address use the static address as random address (but skip
2209 * the HCI command if the current random address is already the
2210 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002211 *
2212 * In case BR/EDR has been disabled on a dual-mode controller
2213 * and a static address has been configured, then use that
2214 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002215 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002216 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002217 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002218 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002219 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002220 *own_addr_type = ADDR_LE_DEV_RANDOM;
2221 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2222 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2223 &hdev->static_addr);
2224 return 0;
2225 }
2226
2227 /* Neither privacy nor static address is being used so use a
2228 * public address.
2229 */
2230 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2231
2232 return 0;
2233}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002234
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002235static bool disconnected_accept_list_entries(struct hci_dev *hdev)
Johan Hedberg405a2612014-12-19 23:18:22 +02002236{
2237 struct bdaddr_list *b;
2238
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002239 list_for_each_entry(b, &hdev->accept_list, list) {
Johan Hedberg405a2612014-12-19 23:18:22 +02002240 struct hci_conn *conn;
2241
2242 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2243 if (!conn)
2244 return true;
2245
2246 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2247 return true;
2248 }
2249
2250 return false;
2251}
2252
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002253void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002254{
2255 struct hci_dev *hdev = req->hdev;
2256 u8 scan;
2257
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002258 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002259 return;
2260
2261 if (!hdev_is_powered(hdev))
2262 return;
2263
2264 if (mgmt_powering_down(hdev))
2265 return;
2266
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002267 if (hdev->scanning_paused)
2268 return;
2269
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002270 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002271 disconnected_accept_list_entries(hdev))
Johan Hedberg405a2612014-12-19 23:18:22 +02002272 scan = SCAN_PAGE;
2273 else
2274 scan = SCAN_DISABLED;
2275
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002276 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002277 scan |= SCAN_INQUIRY;
2278
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002279 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2280 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2281 return;
2282
Johan Hedberg405a2612014-12-19 23:18:22 +02002283 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2284}
2285
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002286static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002287{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002288 hci_dev_lock(req->hdev);
2289 __hci_req_update_scan(req);
2290 hci_dev_unlock(req->hdev);
2291 return 0;
2292}
Johan Hedberg405a2612014-12-19 23:18:22 +02002293
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002294static void scan_update_work(struct work_struct *work)
2295{
2296 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2297
2298 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002299}
2300
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002301static int connectable_update(struct hci_request *req, unsigned long opt)
2302{
2303 struct hci_dev *hdev = req->hdev;
2304
2305 hci_dev_lock(hdev);
2306
2307 __hci_req_update_scan(req);
2308
2309 /* If BR/EDR is not enabled and we disable advertising as a
2310 * by-product of disabling connectable, we need to update the
2311 * advertising flags.
2312 */
2313 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002314 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002315
2316 /* Update the advertising parameters if necessary */
2317 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302318 !list_empty(&hdev->adv_instances)) {
2319 if (ext_adv_capable(hdev))
2320 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2321 else
2322 __hci_req_enable_advertising(req);
2323 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002324
2325 __hci_update_background_scan(req);
2326
2327 hci_dev_unlock(hdev);
2328
2329 return 0;
2330}
2331
2332static void connectable_update_work(struct work_struct *work)
2333{
2334 struct hci_dev *hdev = container_of(work, struct hci_dev,
2335 connectable_update);
2336 u8 status;
2337
2338 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2339 mgmt_set_connectable_complete(hdev, status);
2340}
2341
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002342static u8 get_service_classes(struct hci_dev *hdev)
2343{
2344 struct bt_uuid *uuid;
2345 u8 val = 0;
2346
2347 list_for_each_entry(uuid, &hdev->uuids, list)
2348 val |= uuid->svc_hint;
2349
2350 return val;
2351}
2352
2353void __hci_req_update_class(struct hci_request *req)
2354{
2355 struct hci_dev *hdev = req->hdev;
2356 u8 cod[3];
2357
Howard Chung22fbcfc2020-11-11 15:02:19 +08002358 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002359
2360 if (!hdev_is_powered(hdev))
2361 return;
2362
2363 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2364 return;
2365
2366 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2367 return;
2368
2369 cod[0] = hdev->minor_class;
2370 cod[1] = hdev->major_class;
2371 cod[2] = get_service_classes(hdev);
2372
2373 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2374 cod[1] |= 0x20;
2375
2376 if (memcmp(cod, hdev->dev_class, 3) == 0)
2377 return;
2378
2379 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2380}
2381
Johan Hedbergaed1a882015-11-22 17:24:44 +03002382static void write_iac(struct hci_request *req)
2383{
2384 struct hci_dev *hdev = req->hdev;
2385 struct hci_cp_write_current_iac_lap cp;
2386
2387 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2388 return;
2389
2390 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2391 /* Limited discoverable mode */
2392 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2393 cp.iac_lap[0] = 0x00; /* LIAC */
2394 cp.iac_lap[1] = 0x8b;
2395 cp.iac_lap[2] = 0x9e;
2396 cp.iac_lap[3] = 0x33; /* GIAC */
2397 cp.iac_lap[4] = 0x8b;
2398 cp.iac_lap[5] = 0x9e;
2399 } else {
2400 /* General discoverable mode */
2401 cp.num_iac = 1;
2402 cp.iac_lap[0] = 0x33; /* GIAC */
2403 cp.iac_lap[1] = 0x8b;
2404 cp.iac_lap[2] = 0x9e;
2405 }
2406
2407 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2408 (cp.num_iac * 3) + 1, &cp);
2409}
2410
2411static int discoverable_update(struct hci_request *req, unsigned long opt)
2412{
2413 struct hci_dev *hdev = req->hdev;
2414
2415 hci_dev_lock(hdev);
2416
2417 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2418 write_iac(req);
2419 __hci_req_update_scan(req);
2420 __hci_req_update_class(req);
2421 }
2422
2423 /* Advertising instances don't use the global discoverable setting, so
2424 * only update AD if advertising was enabled using Set Advertising.
2425 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002426 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002427 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002428
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002429 /* Discoverable mode affects the local advertising
2430 * address in limited privacy mode.
2431 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302432 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2433 if (ext_adv_capable(hdev))
2434 __hci_req_start_ext_adv(req, 0x00);
2435 else
2436 __hci_req_enable_advertising(req);
2437 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002438 }
2439
Johan Hedbergaed1a882015-11-22 17:24:44 +03002440 hci_dev_unlock(hdev);
2441
2442 return 0;
2443}
2444
2445static void discoverable_update_work(struct work_struct *work)
2446{
2447 struct hci_dev *hdev = container_of(work, struct hci_dev,
2448 discoverable_update);
2449 u8 status;
2450
2451 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2452 mgmt_set_discoverable_complete(hdev, status);
2453}
2454
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002455void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2456 u8 reason)
2457{
2458 switch (conn->state) {
2459 case BT_CONNECTED:
2460 case BT_CONFIG:
2461 if (conn->type == AMP_LINK) {
2462 struct hci_cp_disconn_phy_link cp;
2463
2464 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2465 cp.reason = reason;
2466 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2467 &cp);
2468 } else {
2469 struct hci_cp_disconnect dc;
2470
2471 dc.handle = cpu_to_le16(conn->handle);
2472 dc.reason = reason;
2473 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2474 }
2475
2476 conn->state = BT_DISCONN;
2477
2478 break;
2479 case BT_CONNECT:
2480 if (conn->type == LE_LINK) {
2481 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2482 break;
2483 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2484 0, NULL);
2485 } else if (conn->type == ACL_LINK) {
2486 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2487 break;
2488 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2489 6, &conn->dst);
2490 }
2491 break;
2492 case BT_CONNECT2:
2493 if (conn->type == ACL_LINK) {
2494 struct hci_cp_reject_conn_req rej;
2495
2496 bacpy(&rej.bdaddr, &conn->dst);
2497 rej.reason = reason;
2498
2499 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2500 sizeof(rej), &rej);
2501 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2502 struct hci_cp_reject_sync_conn_req rej;
2503
2504 bacpy(&rej.bdaddr, &conn->dst);
2505
2506 /* SCO rejection has its own limited set of
2507 * allowed error values (0x0D-0x0F) which isn't
2508 * compatible with most values passed to this
2509 * function. To be safe hard-code one of the
2510 * values that's suitable for SCO.
2511 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002512 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002513
2514 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2515 sizeof(rej), &rej);
2516 }
2517 break;
2518 default:
2519 conn->state = BT_CLOSED;
2520 break;
2521 }
2522}
2523
2524static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2525{
2526 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002527 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002528}
2529
2530int hci_abort_conn(struct hci_conn *conn, u8 reason)
2531{
2532 struct hci_request req;
2533 int err;
2534
2535 hci_req_init(&req, conn->hdev);
2536
2537 __hci_abort_conn(&req, conn, reason);
2538
2539 err = hci_req_run(&req, abort_conn_complete);
2540 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002541 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002542 return err;
2543 }
2544
2545 return 0;
2546}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002547
Johan Hedberga1d01db2015-11-11 08:11:25 +02002548static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002549{
2550 hci_dev_lock(req->hdev);
2551 __hci_update_background_scan(req);
2552 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002553 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002554}
2555
2556static void bg_scan_update(struct work_struct *work)
2557{
2558 struct hci_dev *hdev = container_of(work, struct hci_dev,
2559 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002560 struct hci_conn *conn;
2561 u8 status;
2562 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002563
Johan Hedberg84235d22015-11-11 08:11:20 +02002564 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2565 if (!err)
2566 return;
2567
2568 hci_dev_lock(hdev);
2569
2570 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2571 if (conn)
2572 hci_le_conn_failed(conn, status);
2573
2574 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002575}
2576
Johan Hedberga1d01db2015-11-11 08:11:25 +02002577static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002578{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302579 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002580 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002581}
2582
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002583static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2584{
2585 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002586 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2587 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002588 struct hci_cp_inquiry cp;
2589
Archie Pusaka06752d12021-04-01 11:11:33 +08002590 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2591 return 0;
2592
Howard Chung22fbcfc2020-11-11 15:02:19 +08002593 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002594
2595 hci_dev_lock(req->hdev);
2596 hci_inquiry_cache_flush(req->hdev);
2597 hci_dev_unlock(req->hdev);
2598
2599 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002600
2601 if (req->hdev->discovery.limited)
2602 memcpy(&cp.lap, liac, sizeof(cp.lap));
2603 else
2604 memcpy(&cp.lap, giac, sizeof(cp.lap));
2605
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002606 cp.length = length;
2607
2608 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2609
2610 return 0;
2611}
2612
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002613static void le_scan_disable_work(struct work_struct *work)
2614{
2615 struct hci_dev *hdev = container_of(work, struct hci_dev,
2616 le_scan_disable.work);
2617 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002618
Howard Chung22fbcfc2020-11-11 15:02:19 +08002619 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002620
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002621 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002622 return;
2623
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002624 cancel_delayed_work(&hdev->le_scan_restart);
2625
2626 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2627 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002628 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2629 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002630 return;
2631 }
2632
2633 hdev->discovery.scan_start = 0;
2634
2635 /* If we were running LE only scan, change discovery state. If
2636 * we were running both LE and BR/EDR inquiry simultaneously,
2637 * and BR/EDR inquiry is already finished, stop discovery,
2638 * otherwise BR/EDR inquiry will stop discovery when finished.
2639 * If we will resolve remote device name, do not change
2640 * discovery state.
2641 */
2642
2643 if (hdev->discovery.type == DISCOV_TYPE_LE)
2644 goto discov_stopped;
2645
2646 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2647 return;
2648
2649 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2650 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2651 hdev->discovery.state != DISCOVERY_RESOLVING)
2652 goto discov_stopped;
2653
2654 return;
2655 }
2656
2657 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2658 HCI_CMD_TIMEOUT, &status);
2659 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002660 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002661 goto discov_stopped;
2662 }
2663
2664 return;
2665
2666discov_stopped:
2667 hci_dev_lock(hdev);
2668 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2669 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002670}
2671
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002672static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002673{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002674 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002675
2676 /* If controller is not scanning we are done. */
2677 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2678 return 0;
2679
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002680 if (hdev->scanning_paused) {
2681 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2682 return 0;
2683 }
2684
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302685 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002686
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302687 if (use_ext_scan(hdev)) {
2688 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2689
2690 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2691 ext_enable_cp.enable = LE_SCAN_ENABLE;
2692 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2693
2694 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2695 sizeof(ext_enable_cp), &ext_enable_cp);
2696 } else {
2697 struct hci_cp_le_set_scan_enable cp;
2698
2699 memset(&cp, 0, sizeof(cp));
2700 cp.enable = LE_SCAN_ENABLE;
2701 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2702 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2703 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002704
2705 return 0;
2706}
2707
2708static void le_scan_restart_work(struct work_struct *work)
2709{
2710 struct hci_dev *hdev = container_of(work, struct hci_dev,
2711 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002712 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002713 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002714
Howard Chung22fbcfc2020-11-11 15:02:19 +08002715 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002716
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002717 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002718 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002719 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2720 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002721 return;
2722 }
2723
2724 hci_dev_lock(hdev);
2725
2726 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2727 !hdev->discovery.scan_start)
2728 goto unlock;
2729
2730 /* When the scan was started, hdev->le_scan_disable has been queued
2731 * after duration from scan_start. During scan restart this job
2732 * has been canceled, and we need to queue it again after proper
2733 * timeout, to make sure that scan does not run indefinitely.
2734 */
2735 duration = hdev->discovery.scan_duration;
2736 scan_start = hdev->discovery.scan_start;
2737 now = jiffies;
2738 if (now - scan_start <= duration) {
2739 int elapsed;
2740
2741 if (now >= scan_start)
2742 elapsed = now - scan_start;
2743 else
2744 elapsed = ULONG_MAX - scan_start + now;
2745
2746 timeout = duration - elapsed;
2747 } else {
2748 timeout = 0;
2749 }
2750
2751 queue_delayed_work(hdev->req_workqueue,
2752 &hdev->le_scan_disable, timeout);
2753
2754unlock:
2755 hci_dev_unlock(hdev);
2756}
2757
Johan Hedberge68f0722015-11-11 08:30:30 +02002758static int active_scan(struct hci_request *req, unsigned long opt)
2759{
2760 uint16_t interval = opt;
2761 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002762 u8 own_addr_type;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002763 /* Accept list is not used for discovery */
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002764 u8 filter_policy = 0x00;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002765 /* Default is to enable duplicates filter */
2766 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302767 /* Discovery doesn't require controller address resolution */
2768 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002769 int err;
2770
Howard Chung22fbcfc2020-11-11 15:02:19 +08002771 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002772
Johan Hedberge68f0722015-11-11 08:30:30 +02002773 /* If controller is scanning, it means the background scanning is
2774 * running. Thus, we should temporarily stop it in order to set the
2775 * discovery scanning parameters.
2776 */
Howard Chung422bb172020-11-26 12:22:23 +08002777 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302778 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08002779 cancel_interleave_scan(hdev);
2780 }
Johan Hedberge68f0722015-11-11 08:30:30 +02002781
2782 /* All active scans will be done with either a resolvable private
2783 * address (when privacy feature has been enabled) or non-resolvable
2784 * private address.
2785 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002786 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2787 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002788 if (err < 0)
2789 own_addr_type = ADDR_LE_DEV_PUBLIC;
2790
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002791 if (hci_is_adv_monitoring(hdev)) {
2792 /* Duplicate filter should be disabled when some advertisement
2793 * monitor is activated, otherwise AdvMon can only receive one
2794 * advertisement for one peer(*) during active scanning, and
2795 * might report loss to these peers.
2796 *
2797 * Note that different controllers have different meanings of
2798 * |duplicate|. Some of them consider packets with the same
2799 * address as duplicate, and others consider packets with the
2800 * same address and the same RSSI as duplicate. Although in the
2801 * latter case we don't need to disable duplicate filter, but
2802 * it is common to have active scanning for a short period of
2803 * time, the power impact should be neglectable.
2804 */
2805 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2806 }
2807
Alain Michaudd4edda02020-06-29 17:04:15 +00002808 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2809 hdev->le_scan_window_discovery, own_addr_type,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002810 filter_policy, filter_dup, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002811 return 0;
2812}
2813
2814static int interleaved_discov(struct hci_request *req, unsigned long opt)
2815{
2816 int err;
2817
Howard Chung22fbcfc2020-11-11 15:02:19 +08002818 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002819
2820 err = active_scan(req, opt);
2821 if (err)
2822 return err;
2823
Johan Hedberg7df26b52015-11-11 12:24:21 +02002824 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002825}
2826
2827static void start_discovery(struct hci_dev *hdev, u8 *status)
2828{
2829 unsigned long timeout;
2830
Howard Chung22fbcfc2020-11-11 15:02:19 +08002831 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002832
2833 switch (hdev->discovery.type) {
2834 case DISCOV_TYPE_BREDR:
2835 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002836 hci_req_sync(hdev, bredr_inquiry,
2837 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002838 status);
2839 return;
2840 case DISCOV_TYPE_INTERLEAVED:
2841 /* When running simultaneous discovery, the LE scanning time
2842 * should occupy the whole discovery time sine BR/EDR inquiry
2843 * and LE scanning are scheduled by the controller.
2844 *
2845 * For interleaving discovery in comparison, BR/EDR inquiry
2846 * and LE scanning are done sequentially with separate
2847 * timeouts.
2848 */
2849 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2850 &hdev->quirks)) {
2851 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2852 /* During simultaneous discovery, we double LE scan
2853 * interval. We must leave some time for the controller
2854 * to do BR/EDR inquiry.
2855 */
2856 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00002857 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002858 status);
2859 break;
2860 }
2861
2862 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00002863 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002864 HCI_CMD_TIMEOUT, status);
2865 break;
2866 case DISCOV_TYPE_LE:
2867 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00002868 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002869 HCI_CMD_TIMEOUT, status);
2870 break;
2871 default:
2872 *status = HCI_ERROR_UNSPECIFIED;
2873 return;
2874 }
2875
2876 if (*status)
2877 return;
2878
Howard Chung22fbcfc2020-11-11 15:02:19 +08002879 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02002880
2881 /* When service discovery is used and the controller has a
2882 * strict duplicate filter, it is important to remember the
2883 * start and duration of the scan. This is required for
2884 * restarting scanning during the discovery phase.
2885 */
2886 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2887 hdev->discovery.result_filtering) {
2888 hdev->discovery.scan_start = jiffies;
2889 hdev->discovery.scan_duration = timeout;
2890 }
2891
2892 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2893 timeout);
2894}
2895
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002896bool hci_req_stop_discovery(struct hci_request *req)
2897{
2898 struct hci_dev *hdev = req->hdev;
2899 struct discovery_state *d = &hdev->discovery;
2900 struct hci_cp_remote_name_req_cancel cp;
2901 struct inquiry_entry *e;
2902 bool ret = false;
2903
Howard Chung22fbcfc2020-11-11 15:02:19 +08002904 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002905
2906 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2907 if (test_bit(HCI_INQUIRY, &hdev->flags))
2908 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2909
2910 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2911 cancel_delayed_work(&hdev->le_scan_disable);
Sonny Sasakac06632a2021-03-15 10:30:59 -07002912 cancel_delayed_work(&hdev->le_scan_restart);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302913 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002914 }
2915
2916 ret = true;
2917 } else {
2918 /* Passive scanning */
2919 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302920 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002921 ret = true;
2922 }
2923 }
2924
2925 /* No further actions needed for LE-only discovery */
2926 if (d->type == DISCOV_TYPE_LE)
2927 return ret;
2928
2929 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2930 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2931 NAME_PENDING);
2932 if (!e)
2933 return ret;
2934
2935 bacpy(&cp.bdaddr, &e->data.bdaddr);
2936 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2937 &cp);
2938 ret = true;
2939 }
2940
2941 return ret;
2942}
2943
Kiran K9798fbd2021-09-07 15:42:44 +05302944static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2945 u16 opcode)
2946{
2947 bt_dev_dbg(hdev, "status %u", status);
2948}
2949
2950int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2951{
2952 struct hci_request req;
2953 int err;
2954 __u8 vnd_len, *vnd_data = NULL;
2955 struct hci_op_configure_data_path *cmd = NULL;
2956
2957 hci_req_init(&req, hdev);
2958
2959 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2960 &vnd_data);
2961 if (err < 0)
2962 goto error;
2963
2964 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2965 if (!cmd) {
2966 err = -ENOMEM;
2967 goto error;
2968 }
2969
2970 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2971 if (err < 0)
2972 goto error;
2973
2974 cmd->vnd_len = vnd_len;
2975 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2976
2977 cmd->direction = 0x00;
2978 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2979
2980 cmd->direction = 0x01;
2981 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2982
2983 err = hci_req_run(&req, config_data_path_complete);
2984error:
2985
2986 kfree(cmd);
2987 kfree(vnd_data);
2988 return err;
2989}
2990
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002991static int stop_discovery(struct hci_request *req, unsigned long opt)
2992{
2993 hci_dev_lock(req->hdev);
2994 hci_req_stop_discovery(req);
2995 hci_dev_unlock(req->hdev);
2996
2997 return 0;
2998}
2999
Johan Hedberge68f0722015-11-11 08:30:30 +02003000static void discov_update(struct work_struct *work)
3001{
3002 struct hci_dev *hdev = container_of(work, struct hci_dev,
3003 discov_update);
3004 u8 status = 0;
3005
3006 switch (hdev->discovery.state) {
3007 case DISCOVERY_STARTING:
3008 start_discovery(hdev, &status);
3009 mgmt_start_discovery_complete(hdev, status);
3010 if (status)
3011 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3012 else
3013 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3014 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003015 case DISCOVERY_STOPPING:
3016 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3017 mgmt_stop_discovery_complete(hdev, status);
3018 if (!status)
3019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3020 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003021 case DISCOVERY_STOPPED:
3022 default:
3023 return;
3024 }
3025}
3026
Johan Hedbergc366f552015-11-23 15:43:06 +02003027static void discov_off(struct work_struct *work)
3028{
3029 struct hci_dev *hdev = container_of(work, struct hci_dev,
3030 discov_off.work);
3031
Howard Chung22fbcfc2020-11-11 15:02:19 +08003032 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02003033
3034 hci_dev_lock(hdev);
3035
3036 /* When discoverable timeout triggers, then just make sure
3037 * the limited discoverable flag is cleared. Even in the case
3038 * of a timeout triggered from general discoverable, it is
3039 * safe to unconditionally clear the flag.
3040 */
3041 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3042 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3043 hdev->discov_timeout = 0;
3044
3045 hci_dev_unlock(hdev);
3046
3047 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3048 mgmt_new_settings(hdev);
3049}
3050
Johan Hedberg2ff13892015-11-25 16:15:44 +02003051static int powered_update_hci(struct hci_request *req, unsigned long opt)
3052{
3053 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003054 u8 link_sec;
3055
3056 hci_dev_lock(hdev);
3057
3058 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3059 !lmp_host_ssp_capable(hdev)) {
3060 u8 mode = 0x01;
3061
3062 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3063
3064 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3065 u8 support = 0x01;
3066
3067 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3068 sizeof(support), &support);
3069 }
3070 }
3071
3072 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3073 lmp_bredr_capable(hdev)) {
3074 struct hci_cp_write_le_host_supported cp;
3075
3076 cp.le = 0x01;
3077 cp.simul = 0x00;
3078
3079 /* Check first if we already have the right
3080 * host state (host features set)
3081 */
3082 if (cp.le != lmp_host_le_capable(hdev) ||
3083 cp.simul != lmp_host_le_br_capable(hdev))
3084 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3085 sizeof(cp), &cp);
3086 }
3087
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003088 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003089 /* Make sure the controller has a good default for
3090 * advertising data. This also applies to the case
3091 * where BR/EDR was toggled during the AUTO_OFF phase.
3092 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003093 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3094 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303095 int err;
3096
3097 if (ext_adv_capable(hdev)) {
3098 err = __hci_req_setup_ext_adv_instance(req,
3099 0x00);
3100 if (!err)
3101 __hci_req_update_scan_rsp_data(req,
3102 0x00);
3103 } else {
3104 err = 0;
3105 __hci_req_update_adv_data(req, 0x00);
3106 __hci_req_update_scan_rsp_data(req, 0x00);
3107 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003108
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303109 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303110 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303111 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303112 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003113 __hci_req_enable_ext_advertising(req,
3114 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303115 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003116 } else if (!list_empty(&hdev->adv_instances)) {
3117 struct adv_info *adv_instance;
3118
Johan Hedberg2ff13892015-11-25 16:15:44 +02003119 adv_instance = list_first_entry(&hdev->adv_instances,
3120 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003121 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003122 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003123 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003124 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003125 }
3126
3127 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3128 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3129 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3130 sizeof(link_sec), &link_sec);
3131
3132 if (lmp_bredr_capable(hdev)) {
3133 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3134 __hci_req_write_fast_connectable(req, true);
3135 else
3136 __hci_req_write_fast_connectable(req, false);
3137 __hci_req_update_scan(req);
3138 __hci_req_update_class(req);
3139 __hci_req_update_name(req);
3140 __hci_req_update_eir(req);
3141 }
3142
3143 hci_dev_unlock(hdev);
3144 return 0;
3145}
3146
3147int __hci_req_hci_power_on(struct hci_dev *hdev)
3148{
3149 /* Register the available SMP channels (BR/EDR and LE) only when
3150 * successfully powering on the controller. This late
3151 * registration is required so that LE SMP can clearly decide if
3152 * the public address or static address is used.
3153 */
3154 smp_register(hdev);
3155
3156 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3157 NULL);
3158}
3159
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003160void hci_request_setup(struct hci_dev *hdev)
3161{
Johan Hedberge68f0722015-11-11 08:30:30 +02003162 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003163 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003164 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003165 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003166 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003167 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003168 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3169 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003170 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08003171 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003172}
3173
3174void hci_request_cancel_all(struct hci_dev *hdev)
3175{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003176 hci_req_sync_cancel(hdev, ENODEV);
3177
Johan Hedberge68f0722015-11-11 08:30:30 +02003178 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003179 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003180 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003181 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003182 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003183 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003184 cancel_delayed_work_sync(&hdev->le_scan_disable);
3185 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003186
3187 if (hdev->adv_instance_timeout) {
3188 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3189 hdev->adv_instance_timeout = 0;
3190 }
Howard Chungc4f1f402020-11-26 12:22:21 +08003191
3192 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003193}