blob: 92611bfc0b9e1655696e8f8163b2a687d8e65d4f [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -070033#include "eir.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020034
Johan Hedbergbe91cd02015-11-10 09:44:54 +020035#define HCI_REQ_DONE 0
36#define HCI_REQ_PEND 1
37#define HCI_REQ_CANCELED 2
38
Johan Hedberg0857dd32014-12-19 13:40:20 +020039void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40{
41 skb_queue_head_init(&req->cmd_q);
42 req->hdev = hdev;
43 req->err = 0;
44}
45
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053046void hci_req_purge(struct hci_request *req)
47{
48 skb_queue_purge(&req->cmd_q);
49}
50
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080051bool hci_req_status_pend(struct hci_dev *hdev)
52{
53 return hdev->req_status == HCI_REQ_PEND;
54}
55
Johan Hedberge62144872015-04-02 13:41:08 +030056static int req_run(struct hci_request *req, hci_req_complete_t complete,
57 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020058{
59 struct hci_dev *hdev = req->hdev;
60 struct sk_buff *skb;
61 unsigned long flags;
62
Howard Chung22fbcfc2020-11-11 15:02:19 +080063 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020064
65 /* If an error occurred during request building, remove all HCI
66 * commands queued on the HCI request queue.
67 */
68 if (req->err) {
69 skb_queue_purge(&req->cmd_q);
70 return req->err;
71 }
72
73 /* Do not allow empty requests */
74 if (skb_queue_empty(&req->cmd_q))
75 return -ENODATA;
76
77 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020078 if (complete) {
79 bt_cb(skb)->hci.req_complete = complete;
80 } else if (complete_skb) {
81 bt_cb(skb)->hci.req_complete_skb = complete_skb;
82 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020084
85 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
86 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
87 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88
89 queue_work(hdev->workqueue, &hdev->cmd_work);
90
91 return 0;
92}
93
Johan Hedberge62144872015-04-02 13:41:08 +030094int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95{
96 return req_run(req, complete, NULL);
97}
98
99int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100{
101 return req_run(req, NULL, complete);
102}
103
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 struct sk_buff *skb)
106{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800107 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 if (skb)
113 hdev->req_skb = skb_get(skb);
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
Johan Hedbergb5044302015-11-10 09:44:55 +0200118void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200119{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800120 bt_dev_dbg(hdev, "err 0x%2.2x", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200121
122 if (hdev->req_status == HCI_REQ_PEND) {
123 hdev->req_result = err;
124 hdev->req_status = HCI_REQ_CANCELED;
125 wake_up_interruptible(&hdev->req_wait_q);
126 }
127}
128
129struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
130 const void *param, u8 event, u32 timeout)
131{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200132 struct hci_request req;
133 struct sk_buff *skb;
134 int err = 0;
135
Howard Chung22fbcfc2020-11-11 15:02:19 +0800136 bt_dev_dbg(hdev, "");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200137
138 hci_req_init(&req, hdev);
139
140 hci_req_add_ev(&req, opcode, plen, param, event);
141
142 hdev->req_status = HCI_REQ_PEND;
143
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100145 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200146 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200147
John Keeping67d8cee2018-04-19 16:29:37 +0100148 err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150
John Keeping67d8cee2018-04-19 16:29:37 +0100151 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200152 return ERR_PTR(-EINTR);
153
154 switch (hdev->req_status) {
155 case HCI_REQ_DONE:
156 err = -bt_to_errno(hdev->req_result);
157 break;
158
159 case HCI_REQ_CANCELED:
160 err = -hdev->req_result;
161 break;
162
163 default:
164 err = -ETIMEDOUT;
165 break;
166 }
167
168 hdev->req_status = hdev->req_result = 0;
169 skb = hdev->req_skb;
170 hdev->req_skb = NULL;
171
Howard Chung22fbcfc2020-11-11 15:02:19 +0800172 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200173
174 if (err < 0) {
175 kfree_skb(skb);
176 return ERR_PTR(err);
177 }
178
179 if (!skb)
180 return ERR_PTR(-ENODATA);
181
182 return skb;
183}
184EXPORT_SYMBOL(__hci_cmd_sync_ev);
185
186struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
187 const void *param, u32 timeout)
188{
189 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190}
191EXPORT_SYMBOL(__hci_cmd_sync);
192
193/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200194int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200196 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197{
198 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200199 int err = 0;
200
Howard Chung22fbcfc2020-11-11 15:02:19 +0800201 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200202
203 hci_req_init(&req, hdev);
204
205 hdev->req_status = HCI_REQ_PEND;
206
Johan Hedberga1d01db2015-11-11 08:11:25 +0200207 err = func(&req, opt);
208 if (err) {
209 if (hci_status)
210 *hci_status = HCI_ERROR_UNSPECIFIED;
211 return err;
212 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200213
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200226 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200231
232 return err;
233 }
234
John Keeping67d8cee2018-04-19 16:29:37 +0100235 err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237
John Keeping67d8cee2018-04-19 16:29:37 +0100238 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200244 if (hci_status)
245 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200246 break;
247
248 case HCI_REQ_CANCELED:
249 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200250 if (hci_status)
251 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200252 break;
253
254 default:
255 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200256 if (hci_status)
257 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200258 break;
259 }
260
Frederic Dalleau9afee942016-08-23 07:59:19 +0200261 kfree_skb(hdev->req_skb);
262 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200263 hdev->req_status = hdev->req_result = 0;
264
Howard Chung22fbcfc2020-11-11 15:02:19 +0800265 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266
267 return err;
268}
269
Johan Hedberga1d01db2015-11-11 08:11:25 +0200270int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200272 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200273{
274 int ret;
275
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200277 hci_req_sync_lock(hdev);
Lin Mae2cb6b82021-04-12 19:17:57 +0800278 /* check the state after obtaing the lock to protect the HCI_UP
279 * against any races from hci_dev_do_close when the controller
280 * gets removed.
281 */
282 if (test_bit(HCI_UP, &hdev->flags))
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 else
285 ret = -ENETDOWN;
Johan Hedbergb5044302015-11-10 09:44:55 +0200286 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200287
288 return ret;
289}
290
Johan Hedberg0857dd32014-12-19 13:40:20 +0200291struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param)
293{
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
296 struct sk_buff *skb;
297
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
Johannes Berg4df864c2017-06-16 14:29:21 +0200302 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200303 hdr->opcode = cpu_to_le16(opcode);
304 hdr->plen = plen;
305
306 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200307 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200308
Howard Chung22fbcfc2020-11-11 15:02:19 +0800309 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200310
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200313
314 return skb;
315}
316
317/* Queue a command to an asynchronous HCI request */
318void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
320{
321 struct hci_dev *hdev = req->hdev;
322 struct sk_buff *skb;
323
Howard Chung22fbcfc2020-11-11 15:02:19 +0800324 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200325
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
328 */
329 if (req->err)
330 return;
331
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100334 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
335 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336 req->err = -ENOMEM;
337 return;
338 }
339
340 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200342
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100343 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200344
345 skb_queue_tail(&req->cmd_q, skb);
346}
347
348void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 const void *param)
350{
351 hci_req_add_ev(req, opcode, plen, param, 0);
352}
353
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200354void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355{
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
358 u8 type;
359
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 return;
362
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 return;
365
366 if (enable) {
367 type = PAGE_SCAN_TYPE_INTERLACED;
368
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
371 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000372 type = hdev->def_page_scan_type;
373 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200374 }
375
Alain Michaud10873f92020-06-11 02:01:56 +0000376 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
Howard Chungc4f1f402020-11-26 12:22:21 +0800387static void start_interleave_scan(struct hci_dev *hdev)
388{
389 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
390 queue_delayed_work(hdev->req_workqueue,
391 &hdev->interleave_scan, 0);
392}
393
394static bool is_interleave_scanning(struct hci_dev *hdev)
395{
396 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
397}
398
399static void cancel_interleave_scan(struct hci_dev *hdev)
400{
401 bt_dev_dbg(hdev, "cancelling interleave scan");
402
403 cancel_delayed_work_sync(&hdev->interleave_scan);
404
405 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
406}
407
408/* Return true if interleave_scan wasn't started until exiting this function,
409 * otherwise, return false
410 */
411static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800413 /* Do interleaved scan only if all of the following are true:
414 * - There is at least one ADV monitor
415 * - At least one pending LE connection or one device to be scanned for
416 * - Monitor offloading is not supported
417 * If so, we should alternate between allowlist scan and one without
418 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800419 */
420 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
421 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800422 list_empty(&hdev->pend_le_reports)) &&
423 hci_get_adv_monitor_offload_ext(hdev) ==
424 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800425 bool is_interleaving = is_interleave_scanning(hdev);
426
427 if (use_interleaving && !is_interleaving) {
428 start_interleave_scan(hdev);
429 bt_dev_dbg(hdev, "starting interleave scan");
430 return true;
431 }
432
433 if (!use_interleaving && is_interleaving)
434 cancel_interleave_scan(hdev);
435
436 return false;
437}
438
Johan Hedberg196a5e92015-11-22 18:55:44 +0200439/* This function controls the background scanning based on hdev->pend_le_conns
440 * list. If there are pending LE connection we start the background scanning,
441 * otherwise we stop it.
442 *
443 * This function requires the caller holds hdev->lock.
444 */
445static void __hci_update_background_scan(struct hci_request *req)
446{
447 struct hci_dev *hdev = req->hdev;
448
449 if (!test_bit(HCI_UP, &hdev->flags) ||
450 test_bit(HCI_INIT, &hdev->flags) ||
451 hci_dev_test_flag(hdev, HCI_SETUP) ||
452 hci_dev_test_flag(hdev, HCI_CONFIG) ||
453 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
454 hci_dev_test_flag(hdev, HCI_UNREGISTER))
455 return;
456
457 /* No point in doing scanning if LE support hasn't been enabled */
458 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
459 return;
460
461 /* If discovery is active don't interfere with it */
462 if (hdev->discovery.state != DISCOVERY_STOPPED)
463 return;
464
465 /* Reset RSSI and UUID filters when starting background scanning
466 * since these filters are meant for service discovery only.
467 *
468 * The Start Discovery and Start Service Discovery operations
469 * ensure to set proper values for RSSI threshold and UUID
470 * filter list. So it is safe to just reset them here.
471 */
472 hci_discovery_filter_clear(hdev);
473
Howard Chung22fbcfc2020-11-11 15:02:19 +0800474 bt_dev_dbg(hdev, "ADV monitoring is %s",
475 hci_is_adv_monitoring(hdev) ? "on" : "off");
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200476
Johan Hedberg196a5e92015-11-22 18:55:44 +0200477 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200478 list_empty(&hdev->pend_le_reports) &&
479 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200480 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200481 * to be scanned for or no ADV monitors, we should stop the
482 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200483 */
484
485 /* If controller is not scanning we are done. */
486 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
487 return;
488
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530489 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200490
Howard Chung22fbcfc2020-11-11 15:02:19 +0800491 bt_dev_dbg(hdev, "stopping background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200492 } else {
493 /* If there is at least one pending LE connection, we should
494 * keep the background scan running.
495 */
496
497 /* If controller is connecting, we should not start scanning
498 * since some controllers are not able to scan and connect at
499 * the same time.
500 */
501 if (hci_lookup_le_connect(hdev))
502 return;
503
504 /* If controller is currently scanning, we stop it to ensure we
505 * don't miss any advertising (due to duplicates filter).
506 */
507 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530508 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200509
510 hci_req_add_le_passive_scan(req);
Howard Chungc4f1f402020-11-26 12:22:21 +0800511 bt_dev_dbg(hdev, "starting background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200512 }
513}
514
Johan Hedberg00cf5042015-11-25 16:15:41 +0200515void __hci_req_update_name(struct hci_request *req)
516{
517 struct hci_dev *hdev = req->hdev;
518 struct hci_cp_write_local_name cp;
519
520 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
521
522 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
523}
524
Johan Hedbergb1a89172015-11-25 16:15:42 +0200525void __hci_req_update_eir(struct hci_request *req)
526{
527 struct hci_dev *hdev = req->hdev;
528 struct hci_cp_write_eir cp;
529
530 if (!hdev_is_powered(hdev))
531 return;
532
533 if (!lmp_ext_inq_capable(hdev))
534 return;
535
536 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
537 return;
538
539 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
540 return;
541
542 memset(&cp, 0, sizeof(cp));
543
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700544 eir_create(hdev, cp.data);
Johan Hedbergb1a89172015-11-25 16:15:42 +0200545
546 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
547 return;
548
549 memcpy(hdev->eir, cp.data, sizeof(cp.data));
550
551 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
552}
553
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530554void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200555{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530556 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200557
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700558 if (hdev->scanning_paused) {
559 bt_dev_dbg(hdev, "Scanning is paused for suspend");
560 return;
561 }
562
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800563 if (hdev->suspended)
564 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
565
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530566 if (use_ext_scan(hdev)) {
567 struct hci_cp_le_set_ext_scan_enable cp;
568
569 memset(&cp, 0, sizeof(cp));
570 cp.enable = LE_SCAN_DISABLE;
571 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
572 &cp);
573 } else {
574 struct hci_cp_le_set_scan_enable cp;
575
576 memset(&cp, 0, sizeof(cp));
577 cp.enable = LE_SCAN_DISABLE;
578 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
579 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530580
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530581 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530582 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530583 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530584 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530585 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530586
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530587 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
588 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200589}
590
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800591static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
592 u8 bdaddr_type)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700593{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800594 struct hci_cp_le_del_from_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700595
596 cp.bdaddr_type = bdaddr_type;
597 bacpy(&cp.bdaddr, bdaddr);
598
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800599 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700600 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800601 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530602
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530603 if (use_ll_privacy(req->hdev) &&
604 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530605 struct smp_irk *irk;
606
607 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
608 if (irk) {
609 struct hci_cp_le_del_from_resolv_list cp;
610
611 cp.bdaddr_type = bdaddr_type;
612 bacpy(&cp.bdaddr, bdaddr);
613
614 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
615 sizeof(cp), &cp);
616 }
617 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700618}
619
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800620/* Adds connection to accept list if needed. On error, returns -1. */
621static int add_to_accept_list(struct hci_request *req,
622 struct hci_conn_params *params, u8 *num_entries,
623 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200624{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800625 struct hci_cp_le_add_to_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700626 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200627
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800628 /* Already in accept list */
629 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700630 params->addr_type))
631 return 0;
632
633 /* Select filter policy to accept all advertising */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800634 if (*num_entries >= hdev->le_accept_list_size)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700635 return -1;
636
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800637 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530638 if (!allow_rpa &&
639 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700640 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
641 return -1;
642 }
643
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800644 /* During suspend, only wakeable devices can be in accept list */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200645 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
646 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700647 return 0;
648
649 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200650 cp.bdaddr_type = params->addr_type;
651 bacpy(&cp.bdaddr, &params->addr);
652
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800653 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700654 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800655 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700656
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530657 if (use_ll_privacy(hdev) &&
658 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530659 struct smp_irk *irk;
660
661 irk = hci_find_irk_by_addr(hdev, &params->addr,
662 params->addr_type);
663 if (irk) {
664 struct hci_cp_le_add_to_resolv_list cp;
665
666 cp.bdaddr_type = params->addr_type;
667 bacpy(&cp.bdaddr, &params->addr);
668 memcpy(cp.peer_irk, irk->val, 16);
669
670 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
671 memcpy(cp.local_irk, hdev->irk, 16);
672 else
673 memset(cp.local_irk, 0, 16);
674
675 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
676 sizeof(cp), &cp);
677 }
678 }
679
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700680 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200681}
682
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800683static u8 update_accept_list(struct hci_request *req)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200684{
685 struct hci_dev *hdev = req->hdev;
686 struct hci_conn_params *params;
687 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700688 u8 num_entries = 0;
689 bool pend_conn, pend_report;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800690 /* We allow usage of accept list even with RPAs in suspend. In the worst
691 * case, we won't be able to wake from devices that use the privacy1.2
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700692 * features. Additionally, once we support privacy1.2 and IRK
693 * offloading, we can update this to also check for those conditions.
694 */
695 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200696
Sathish Narasimman8ce85ad2021-04-05 20:00:41 +0530697 if (use_ll_privacy(hdev) &&
698 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
699 allow_rpa = true;
700
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800701 /* Go through the current accept list programmed into the
Johan Hedberg0857dd32014-12-19 13:40:20 +0200702 * controller one by one and check if that address is still
703 * in the list of pending connections or list of devices to
704 * report. If not present in either list, then queue the
705 * command to remove it from the controller.
706 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800707 list_for_each_entry(b, &hdev->le_accept_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700708 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
709 &b->bdaddr,
710 b->bdaddr_type);
711 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
712 &b->bdaddr,
713 b->bdaddr_type);
714
715 /* If the device is not likely to connect or report,
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800716 * remove it from the accept list.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500717 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700718 if (!pend_conn && !pend_report) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800719 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200720 continue;
721 }
722
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800723 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530724 if (!allow_rpa &&
725 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700726 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500727 return 0x00;
728 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200729
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700730 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200731 }
732
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800733 /* Since all no longer valid accept list entries have been
Johan Hedberg0857dd32014-12-19 13:40:20 +0200734 * removed, walk through the list of pending connections
735 * and ensure that any new device gets programmed into
736 * the controller.
737 *
738 * If the list of the devices is larger than the list of
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800739 * available accept list entries in the controller, then
Johan Hedberg0857dd32014-12-19 13:40:20 +0200740 * just abort and return filer policy value to not use the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800741 * accept list.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200742 */
743 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800744 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200745 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800750 * accept list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800753 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200754 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200755 }
756
Howard Chungc4f1f402020-11-26 12:22:21 +0800757 /* Use the allowlist unless the following conditions are all true:
758 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800759 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800760 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200761 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800762 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800763 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800764 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200765 return 0x00;
766
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800767 /* Select filter policy to use accept list */
Johan Hedberg0857dd32014-12-19 13:40:20 +0200768 return 0x01;
769}
770
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200771static bool scan_use_rpa(struct hci_dev *hdev)
772{
773 return hci_dev_test_flag(hdev, HCI_PRIVACY);
774}
775
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530776static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530777 u16 window, u8 own_addr_type, u8 filter_policy,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800778 bool filter_dup, bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200779{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530780 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530781
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700782 if (hdev->scanning_paused) {
783 bt_dev_dbg(hdev, "Scanning is paused for suspend");
784 return;
785 }
786
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530787 if (use_ll_privacy(hdev) &&
788 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
789 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530790 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530791
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530792 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
793 }
794
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530795 /* Use ext scanning if set ext scan param and ext scan enable is
796 * supported
797 */
798 if (use_ext_scan(hdev)) {
799 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530802 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
803 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530804
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530805 ext_param_cp = (void *)data;
806 phy_params = (void *)ext_param_cp->data;
807
808 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 ext_param_cp->own_addr_type = own_addr_type;
810 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530811
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530812 plen = sizeof(*ext_param_cp);
813
814 if (scan_1m(hdev) || scan_2m(hdev)) {
815 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
816
817 memset(phy_params, 0, sizeof(*phy_params));
818 phy_params->type = type;
819 phy_params->interval = cpu_to_le16(interval);
820 phy_params->window = cpu_to_le16(window);
821
822 plen += sizeof(*phy_params);
823 phy_params++;
824 }
825
826 if (scan_coded(hdev)) {
827 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
828
829 memset(phy_params, 0, sizeof(*phy_params));
830 phy_params->type = type;
831 phy_params->interval = cpu_to_le16(interval);
832 phy_params->window = cpu_to_le16(window);
833
834 plen += sizeof(*phy_params);
835 phy_params++;
836 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530837
838 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530839 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530840
841 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 ext_enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800843 ext_enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530844
845 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 sizeof(ext_enable_cp), &ext_enable_cp);
847 } else {
848 struct hci_cp_le_set_scan_param param_cp;
849 struct hci_cp_le_set_scan_enable enable_cp;
850
851 memset(&param_cp, 0, sizeof(param_cp));
852 param_cp.type = type;
853 param_cp.interval = cpu_to_le16(interval);
854 param_cp.window = cpu_to_le16(window);
855 param_cp.own_address_type = own_addr_type;
856 param_cp.filter_policy = filter_policy;
857 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
858 &param_cp);
859
860 memset(&enable_cp, 0, sizeof(enable_cp));
861 enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800862 enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530863 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
864 &enable_cp);
865 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530866}
867
Alain Michaud9a9373f2020-07-31 01:05:34 +0000868/* Returns true if an le connection is in the scanning state */
869static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
870{
871 struct hci_conn_hash *h = &hdev->conn_hash;
872 struct hci_conn *c;
873
874 rcu_read_lock();
875
876 list_for_each_entry_rcu(c, &h->list, list) {
877 if (c->type == LE_LINK && c->state == BT_CONNECT &&
878 test_bit(HCI_CONN_SCANNING, &c->flags)) {
879 rcu_read_unlock();
880 return true;
881 }
882 }
883
884 rcu_read_unlock();
885
886 return false;
887}
888
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530889/* Ensure to call hci_req_add_le_scan_disable() first to disable the
890 * controller based address resolution to be able to reconfigure
891 * resolving list.
892 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530893void hci_req_add_le_passive_scan(struct hci_request *req)
894{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200895 struct hci_dev *hdev = req->hdev;
896 u8 own_addr_type;
897 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700898 u16 window, interval;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800899 /* Default is to enable duplicates filter */
900 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530901 /* Background scanning should run with address resolution */
902 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700903
904 if (hdev->scanning_paused) {
905 bt_dev_dbg(hdev, "Scanning is paused for suspend");
906 return;
907 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200908
909 /* Set require_privacy to false since no SCAN_REQ are send
910 * during passive scanning. Not using an non-resolvable address
911 * here is important so that peer devices using direct
912 * advertising with our address will be correctly reported
913 * by the controller.
914 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200915 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
916 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200917 return;
918
Howard Chung80af16a2020-11-26 12:22:25 +0800919 if (hdev->enable_advmon_interleave_scan &&
920 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +0800921 return;
922
923 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800924 /* Adding or removing entries from the accept list must
Johan Hedberg0857dd32014-12-19 13:40:20 +0200925 * happen before enabling scanning. The controller does
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800926 * not allow accept list modification while scanning.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200927 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800928 filter_policy = update_accept_list(req);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200929
930 /* When the controller is using random resolvable addresses and
931 * with that having LE privacy enabled, then controllers with
932 * Extended Scanner Filter Policies support can now enable support
933 * for handling directed advertising.
934 *
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800935 * So instead of using filter polices 0x00 (no accept list)
936 * and 0x01 (accept list enabled) use the new filter policies
937 * 0x02 (no accept list) and 0x03 (accept list enabled).
Johan Hedberg0857dd32014-12-19 13:40:20 +0200938 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700939 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200940 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
941 filter_policy |= 0x02;
942
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700943 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000944 window = hdev->le_scan_window_suspend;
945 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -0800946
947 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Alain Michaud9a9373f2020-07-31 01:05:34 +0000948 } else if (hci_is_le_conn_scanning(hdev)) {
949 window = hdev->le_scan_window_connect;
950 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +0800951 } else if (hci_is_adv_monitoring(hdev)) {
952 window = hdev->le_scan_window_adv_monitor;
953 interval = hdev->le_scan_int_adv_monitor;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800954
955 /* Disable duplicates filter when scanning for advertisement
956 * monitor for the following reasons.
957 *
958 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
959 * controllers ignore RSSI_Sampling_Period when the duplicates
960 * filter is enabled.
961 *
962 * For SW pattern filtering, when we're not doing interleaved
963 * scanning, it is necessary to disable duplicates filter,
964 * otherwise hosts can only receive one advertisement and it's
965 * impossible to know if a peer is still in range.
966 */
967 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700968 } else {
969 window = hdev->le_scan_window;
970 interval = hdev->le_scan_interval;
971 }
972
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800973 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
974 filter_policy);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700975 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800976 own_addr_type, filter_policy, filter_dup,
977 addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200978}
979
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700980static void hci_req_clear_event_filter(struct hci_request *req)
981{
982 struct hci_cp_set_event_filter f;
983
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800984 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
985 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700986
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -0800987 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
988 memset(&f, 0, sizeof(f));
989 f.flt_type = HCI_FLT_CLEAR_ALL;
990 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
991 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700992}
993
994static void hci_req_set_event_filter(struct hci_request *req)
995{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200996 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -0700997 struct hci_cp_set_event_filter f;
998 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +0200999 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001000 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1001
1002 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1003 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001004
1005 /* Always clear event filter when starting */
1006 hci_req_clear_event_filter(req);
1007
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001008 list_for_each_entry(b, &hdev->accept_list, list) {
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001009 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1010 b->current_flags))
1011 continue;
1012
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001013 memset(&f, 0, sizeof(f));
1014 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1015 f.flt_type = HCI_FLT_CONN_SETUP;
1016 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1017 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1018
1019 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1020 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001021 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001022 }
1023
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001024 if (scan && !scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001025 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001026 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1027 } else if (!scan && scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001028 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001029 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1030 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001031}
1032
Daniel Winkler53274472020-09-15 14:14:27 -07001033static void cancel_adv_timeout(struct hci_dev *hdev)
1034{
1035 if (hdev->adv_instance_timeout) {
1036 hdev->adv_instance_timeout = 0;
1037 cancel_delayed_work(&hdev->adv_instance_expire);
1038 }
1039}
1040
1041/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001042void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001043{
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001044 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -07001045
1046 /* Call to disable any advertisements active on the controller.
1047 * This will succeed even if no advertisements are configured.
1048 */
1049 __hci_req_disable_advertising(req);
1050
1051 /* If we are using software rotation, pause the loop */
1052 if (!ext_adv_capable(req->hdev))
1053 cancel_adv_timeout(req->hdev);
1054}
1055
1056/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001057static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001058{
1059 struct adv_info *adv;
1060
1061 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1062
1063 if (ext_adv_capable(req->hdev)) {
1064 /* Call for each tracked instance to be re-enabled */
1065 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1066 __hci_req_enable_ext_advertising(req,
1067 adv->instance);
1068 }
1069
1070 } else {
1071 /* Schedule for most recent instance to be restarted and begin
1072 * the software rotation loop
1073 */
1074 __hci_req_schedule_adv_instance(req,
1075 req->hdev->cur_adv_instance,
1076 true);
1077 }
1078}
1079
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001080/* This function requires the caller holds hdev->lock */
1081int hci_req_resume_adv_instances(struct hci_dev *hdev)
1082{
1083 struct hci_request req;
1084
1085 hci_req_init(&req, hdev);
1086 __hci_req_resume_adv_instances(&req);
1087
1088 return hci_req_run(&req, NULL);
1089}
1090
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001091static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1092{
1093 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1094 status);
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001095 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1096 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1097 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1098 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001099 wake_up(&hdev->suspend_wait_q);
1100 }
Howard Chungbf6a4e32021-01-22 16:36:17 +08001101
1102 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1103 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1104 wake_up(&hdev->suspend_wait_q);
1105 }
1106}
1107
Manish Mandlikce818432021-09-21 14:47:10 -07001108static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
1109 bool suspending)
Howard Chungbf6a4e32021-01-22 16:36:17 +08001110{
1111 struct hci_dev *hdev = req->hdev;
1112
1113 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1114 case HCI_ADV_MONITOR_EXT_MSFT:
Manish Mandlikce818432021-09-21 14:47:10 -07001115 if (suspending)
1116 msft_suspend(hdev);
1117 else
1118 msft_resume(hdev);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001119 break;
1120 default:
1121 return;
1122 }
1123
1124 /* No need to block when enabling since it's on resume path */
Manish Mandlikce818432021-09-21 14:47:10 -07001125 if (hdev->suspended && suspending)
Howard Chungbf6a4e32021-01-22 16:36:17 +08001126 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001127}
1128
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001129/* Call with hci_dev_lock */
1130void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1131{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001132 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001133 struct hci_conn *conn;
1134 struct hci_request req;
1135 u8 page_scan;
1136 int disconnect_counter;
1137
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001138 if (next == hdev->suspend_state) {
1139 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1140 goto done;
1141 }
1142
1143 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001144 hci_req_init(&req, hdev);
1145
1146 if (next == BT_SUSPEND_DISCONNECT) {
1147 /* Mark device as suspended */
1148 hdev->suspended = true;
1149
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001150 /* Pause discovery if not already stopped */
1151 old_state = hdev->discovery.state;
1152 if (old_state != DISCOVERY_STOPPED) {
1153 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1154 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1155 queue_work(hdev->req_workqueue, &hdev->discov_update);
1156 }
1157
1158 hdev->discovery_paused = true;
1159 hdev->discovery_old_state = old_state;
1160
Daniel Winkler53274472020-09-15 14:14:27 -07001161 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001162 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1163 if (old_state) {
1164 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1165 cancel_delayed_work(&hdev->discov_off);
1166 queue_delayed_work(hdev->req_workqueue,
1167 &hdev->discov_off, 0);
1168 }
1169
Daniel Winkler53274472020-09-15 14:14:27 -07001170 /* Pause other advertisements */
1171 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001172 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001173
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001174 hdev->advertising_paused = true;
1175 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001176
1177 /* Disable page scan if enabled */
1178 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1179 page_scan = SCAN_DISABLED;
1180 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1181 &page_scan);
1182 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1183 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001184
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001185 /* Disable LE passive scan if enabled */
Howard Chung36afe872020-11-26 12:22:22 +08001186 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1187 cancel_interleave_scan(hdev);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301188 hci_req_add_le_scan_disable(&req, false);
Howard Chung36afe872020-11-26 12:22:22 +08001189 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001190
Howard Chungbf6a4e32021-01-22 16:36:17 +08001191 /* Disable advertisement filters */
Manish Mandlikce818432021-09-21 14:47:10 -07001192 hci_req_prepare_adv_monitor_suspend(&req, true);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001193
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001194 /* Prevent disconnects from causing scanning to be re-enabled */
1195 hdev->scanning_paused = true;
1196
1197 /* Run commands before disconnecting */
1198 hci_req_run(&req, suspend_req_complete);
1199
1200 disconnect_counter = 0;
1201 /* Soft disconnect everything (power off) */
1202 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1203 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1204 disconnect_counter++;
1205 }
1206
1207 if (disconnect_counter > 0) {
1208 bt_dev_dbg(hdev,
1209 "Had %d disconnects. Will wait on them",
1210 disconnect_counter);
1211 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1212 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001213 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001214 /* Unpause to take care of updating scanning params */
1215 hdev->scanning_paused = false;
1216 /* Enable event filter for paired devices */
1217 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001218 /* Enable passive scan at lower duty cycle */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001219 __hci_update_background_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001220 /* Pause scan changes again. */
1221 hdev->scanning_paused = true;
1222 hci_req_run(&req, suspend_req_complete);
1223 } else {
1224 hdev->suspended = false;
1225 hdev->scanning_paused = false;
1226
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001227 /* Clear any event filters and restore scan state */
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001228 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001229 __hci_req_update_scan(&req);
1230
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001231 /* Reset passive/background scanning to normal */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001232 __hci_update_background_scan(&req);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001233 /* Enable all of the advertisement filters */
Manish Mandlikce818432021-09-21 14:47:10 -07001234 hci_req_prepare_adv_monitor_suspend(&req, false);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001235
Daniel Winkler53274472020-09-15 14:14:27 -07001236 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001237 hdev->advertising_paused = false;
1238 if (hdev->advertising_old_state) {
1239 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1240 hdev->suspend_tasks);
1241 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1242 queue_work(hdev->req_workqueue,
1243 &hdev->discoverable_update);
1244 hdev->advertising_old_state = 0;
1245 }
1246
Daniel Winkler53274472020-09-15 14:14:27 -07001247 /* Resume other advertisements */
1248 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001249 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001250
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001251 /* Unpause discovery */
1252 hdev->discovery_paused = false;
1253 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1254 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1255 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1256 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1257 queue_work(hdev->req_workqueue, &hdev->discov_update);
1258 }
1259
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001260 hci_req_run(&req, suspend_req_complete);
1261 }
1262
1263 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001264
1265done:
1266 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1267 wake_up(&hdev->suspend_wait_q);
1268}
1269
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001270static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +02001271{
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001272 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001273}
1274
1275void __hci_req_disable_advertising(struct hci_request *req)
1276{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301277 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001278 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001279
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301280 } else {
1281 u8 enable = 0x00;
1282
1283 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1284 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001285}
1286
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001287static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1288{
1289 /* If privacy is not enabled don't use RPA */
1290 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1291 return false;
1292
1293 /* If basic privacy mode is enabled use RPA */
1294 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1295 return true;
1296
1297 /* If limited privacy mode is enabled don't use RPA if we're
1298 * both discoverable and bondable.
1299 */
1300 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1301 hci_dev_test_flag(hdev, HCI_BONDABLE))
1302 return false;
1303
1304 /* We're neither bondable nor discoverable in the limited
1305 * privacy mode, therefore use RPA.
1306 */
1307 return true;
1308}
1309
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001310static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1311{
1312 /* If there is no connection we are OK to advertise. */
1313 if (hci_conn_num(hdev, LE_LINK) == 0)
1314 return true;
1315
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001316 /* Check le_states if there is any connection in peripheral role. */
1317 if (hdev->conn_hash.le_num_peripheral > 0) {
1318 /* Peripheral connection state and non connectable mode bit 20.
1319 */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001320 if (!connectable && !(hdev->le_states[2] & 0x10))
1321 return false;
1322
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001323 /* Peripheral connection state and connectable mode bit 38
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001324 * and scannable bit 21.
1325 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001326 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1327 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001328 return false;
1329 }
1330
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001331 /* Check le_states if there is any connection in central role. */
1332 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1333 /* Central connection state and non connectable mode bit 18. */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001334 if (!connectable && !(hdev->le_states[2] & 0x02))
1335 return false;
1336
Archie Pusaka39bc74c2021-06-04 16:26:26 +08001337 /* Central connection state and connectable mode bit 35 and
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001338 * scannable 19.
1339 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001340 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001341 !(hdev->le_states[2] & 0x08)))
1342 return false;
1343 }
1344
1345 return true;
1346}
1347
Johan Hedbergf2252572015-11-18 12:49:20 +02001348void __hci_req_enable_advertising(struct hci_request *req)
1349{
1350 struct hci_dev *hdev = req->hdev;
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001351 struct adv_info *adv;
Johan Hedbergf2252572015-11-18 12:49:20 +02001352 struct hci_cp_le_set_adv_param cp;
1353 u8 own_addr_type, enable = 0x01;
1354 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301355 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001356 u32 flags;
1357
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001358 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1359 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001360
1361 /* If the "connectable" instance flag was not set, then choose between
1362 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1363 */
1364 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1365 mgmt_get_connectable(hdev);
1366
1367 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001368 return;
1369
1370 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1371 __hci_req_disable_advertising(req);
1372
1373 /* Clear the HCI_LE_ADV bit temporarily so that the
1374 * hci_update_random_address knows that it's safe to go ahead
1375 * and write a new random address. The flag will be set back on
1376 * as soon as the SET_ADV_ENABLE HCI command completes.
1377 */
1378 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1379
Johan Hedbergf2252572015-11-18 12:49:20 +02001380 /* Set require_privacy to true only when non-connectable
1381 * advertising is used. In that case it is fine to use a
1382 * non-resolvable private address.
1383 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001384 if (hci_update_random_address(req, !connectable,
1385 adv_use_rpa(hdev, flags),
1386 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001387 return;
1388
1389 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001390
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001391 if (adv) {
1392 adv_min_interval = adv->min_interval;
1393 adv_max_interval = adv->max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001394 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301395 adv_min_interval = hdev->le_adv_min_interval;
1396 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001397 }
1398
1399 if (connectable) {
1400 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301401 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001402 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301403 cp.type = LE_ADV_SCAN_IND;
1404 else
1405 cp.type = LE_ADV_NONCONN_IND;
1406
1407 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1408 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1409 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1410 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301411 }
1412 }
1413
1414 cp.min_interval = cpu_to_le16(adv_min_interval);
1415 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001416 cp.own_address_type = own_addr_type;
1417 cp.channel_map = hdev->le_adv_channel_map;
1418
1419 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1420
1421 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1422}
1423
Johan Hedbergcab054a2015-11-30 11:21:45 +02001424void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001425{
1426 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001427 u8 len;
1428
1429 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1430 return;
1431
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301432 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001433 struct {
1434 struct hci_cp_le_set_ext_scan_rsp_data cp;
1435 u8 data[HCI_MAX_EXT_AD_LENGTH];
1436 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001437
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001438 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001439
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001440 len = eir_create_scan_rsp(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001441
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301442 if (hdev->scan_rsp_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001443 !memcmp(pdu.data, hdev->scan_rsp_data, len))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301444 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001445
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001446 memcpy(hdev->scan_rsp_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301447 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001448
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001449 pdu.cp.handle = instance;
1450 pdu.cp.length = len;
1451 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1452 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301453
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001454 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1455 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301456 } else {
1457 struct hci_cp_le_set_scan_rsp_data cp;
1458
1459 memset(&cp, 0, sizeof(cp));
1460
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001461 len = eir_create_scan_rsp(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301462
1463 if (hdev->scan_rsp_data_len == len &&
1464 !memcmp(cp.data, hdev->scan_rsp_data, len))
1465 return;
1466
1467 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1468 hdev->scan_rsp_data_len = len;
1469
1470 cp.length = len;
1471
1472 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1473 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001474}
1475
Johan Hedbergcab054a2015-11-30 11:21:45 +02001476void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001477{
1478 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001479 u8 len;
1480
1481 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1482 return;
1483
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301484 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001485 struct {
1486 struct hci_cp_le_set_ext_adv_data cp;
1487 u8 data[HCI_MAX_EXT_AD_LENGTH];
1488 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001489
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001490 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001491
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001492 len = eir_create_adv_data(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001493
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301494 /* There's nothing to do if the data hasn't changed */
1495 if (hdev->adv_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001496 memcmp(pdu.data, hdev->adv_data, len) == 0)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301497 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001498
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001499 memcpy(hdev->adv_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301500 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001501
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001502 pdu.cp.length = len;
1503 pdu.cp.handle = instance;
1504 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1505 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301506
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001507 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1508 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301509 } else {
1510 struct hci_cp_le_set_adv_data cp;
1511
1512 memset(&cp, 0, sizeof(cp));
1513
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001514 len = eir_create_adv_data(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301515
1516 /* There's nothing to do if the data hasn't changed */
1517 if (hdev->adv_data_len == len &&
1518 memcmp(cp.data, hdev->adv_data, len) == 0)
1519 return;
1520
1521 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1522 hdev->adv_data_len = len;
1523
1524 cp.length = len;
1525
1526 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1527 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001528}
1529
Johan Hedbergcab054a2015-11-30 11:21:45 +02001530int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001531{
1532 struct hci_request req;
1533
1534 hci_req_init(&req, hdev);
1535 __hci_req_update_adv_data(&req, instance);
1536
1537 return hci_req_run(&req, NULL);
1538}
1539
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301540static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1541 u16 opcode)
1542{
1543 BT_DBG("%s status %u", hdev->name, status);
1544}
1545
1546void hci_req_disable_address_resolution(struct hci_dev *hdev)
1547{
1548 struct hci_request req;
1549 __u8 enable = 0x00;
1550
1551 if (!use_ll_privacy(hdev) &&
1552 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1553 return;
1554
1555 hci_req_init(&req, hdev);
1556
1557 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1558
1559 hci_req_run(&req, enable_addr_resolution_complete);
1560}
1561
Johan Hedbergf2252572015-11-18 12:49:20 +02001562static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1563{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001564 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001565}
1566
1567void hci_req_reenable_advertising(struct hci_dev *hdev)
1568{
1569 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001570
1571 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001572 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001573 return;
1574
Johan Hedbergf2252572015-11-18 12:49:20 +02001575 hci_req_init(&req, hdev);
1576
Johan Hedbergcab054a2015-11-30 11:21:45 +02001577 if (hdev->cur_adv_instance) {
1578 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1579 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001580 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301581 if (ext_adv_capable(hdev)) {
1582 __hci_req_start_ext_adv(&req, 0x00);
1583 } else {
1584 __hci_req_update_adv_data(&req, 0x00);
1585 __hci_req_update_scan_rsp_data(&req, 0x00);
1586 __hci_req_enable_advertising(&req);
1587 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001588 }
1589
1590 hci_req_run(&req, adv_enable_complete);
1591}
1592
1593static void adv_timeout_expire(struct work_struct *work)
1594{
1595 struct hci_dev *hdev = container_of(work, struct hci_dev,
1596 adv_instance_expire.work);
1597
1598 struct hci_request req;
1599 u8 instance;
1600
Howard Chung22fbcfc2020-11-11 15:02:19 +08001601 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001602
1603 hci_dev_lock(hdev);
1604
1605 hdev->adv_instance_timeout = 0;
1606
Johan Hedbergcab054a2015-11-30 11:21:45 +02001607 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001608 if (instance == 0x00)
1609 goto unlock;
1610
1611 hci_req_init(&req, hdev);
1612
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001613 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001614
1615 if (list_empty(&hdev->adv_instances))
1616 __hci_req_disable_advertising(&req);
1617
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001618 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001619
1620unlock:
1621 hci_dev_unlock(hdev);
1622}
1623
Howard Chungc4f1f402020-11-26 12:22:21 +08001624static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1625 unsigned long opt)
1626{
1627 struct hci_dev *hdev = req->hdev;
1628 int ret = 0;
1629
1630 hci_dev_lock(hdev);
1631
1632 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1633 hci_req_add_le_scan_disable(req, false);
1634 hci_req_add_le_passive_scan(req);
1635
1636 switch (hdev->interleave_scan_state) {
1637 case INTERLEAVE_SCAN_ALLOWLIST:
1638 bt_dev_dbg(hdev, "next state: allowlist");
1639 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1640 break;
1641 case INTERLEAVE_SCAN_NO_FILTER:
1642 bt_dev_dbg(hdev, "next state: no filter");
1643 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1644 break;
1645 case INTERLEAVE_SCAN_NONE:
1646 BT_ERR("unexpected error");
1647 ret = -1;
1648 }
1649
1650 hci_dev_unlock(hdev);
1651
1652 return ret;
1653}
1654
1655static void interleave_scan_work(struct work_struct *work)
1656{
1657 struct hci_dev *hdev = container_of(work, struct hci_dev,
1658 interleave_scan.work);
1659 u8 status;
1660 unsigned long timeout;
1661
1662 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1663 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1664 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1665 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1666 } else {
1667 bt_dev_err(hdev, "unexpected error");
1668 return;
1669 }
1670
1671 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1672 HCI_CMD_TIMEOUT, &status);
1673
1674 /* Don't continue interleaving if it was canceled */
1675 if (is_interleave_scanning(hdev))
1676 queue_delayed_work(hdev->req_workqueue,
1677 &hdev->interleave_scan, timeout);
1678}
1679
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301680int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1681 bool use_rpa, struct adv_info *adv_instance,
1682 u8 *own_addr_type, bdaddr_t *rand_addr)
1683{
1684 int err;
1685
1686 bacpy(rand_addr, BDADDR_ANY);
1687
1688 /* If privacy is enabled use a resolvable private address. If
1689 * current RPA has expired then generate a new one.
1690 */
1691 if (use_rpa) {
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301692 /* If Controller supports LL Privacy use own address type is
1693 * 0x03
1694 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05301695 if (use_ll_privacy(hdev) &&
1696 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301697 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1698 else
1699 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301700
1701 if (adv_instance) {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001702 if (adv_rpa_valid(adv_instance))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301703 return 0;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301704 } else {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001705 if (rpa_valid(hdev))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301706 return 0;
1707 }
1708
1709 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1710 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001711 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301712 return err;
1713 }
1714
1715 bacpy(rand_addr, &hdev->rpa);
1716
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301717 return 0;
1718 }
1719
1720 /* In case of required privacy without resolvable private address,
1721 * use an non-resolvable private address. This is useful for
1722 * non-connectable advertising.
1723 */
1724 if (require_privacy) {
1725 bdaddr_t nrpa;
1726
1727 while (true) {
1728 /* The non-resolvable private address is generated
1729 * from random six bytes with the two most significant
1730 * bits cleared.
1731 */
1732 get_random_bytes(&nrpa, 6);
1733 nrpa.b[5] &= 0x3f;
1734
1735 /* The non-resolvable private address shall not be
1736 * equal to the public address.
1737 */
1738 if (bacmp(&hdev->bdaddr, &nrpa))
1739 break;
1740 }
1741
1742 *own_addr_type = ADDR_LE_DEV_RANDOM;
1743 bacpy(rand_addr, &nrpa);
1744
1745 return 0;
1746 }
1747
1748 /* No privacy so use a public address. */
1749 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1750
1751 return 0;
1752}
1753
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301754void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1755{
1756 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1757}
1758
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001759static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1760{
1761 struct hci_dev *hdev = req->hdev;
1762
1763 /* If we're advertising or initiating an LE connection we can't
1764 * go ahead and change the random address at this time. This is
1765 * because the eventual initiator address used for the
1766 * subsequently created connection will be undefined (some
1767 * controllers use the new address and others the one we had
1768 * when the operation started).
1769 *
1770 * In this kind of scenario skip the update and let the random
1771 * address be updated at the next cycle.
1772 */
1773 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1774 hci_lookup_le_connect(hdev)) {
1775 bt_dev_dbg(hdev, "Deferring random address update");
1776 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1777 return;
1778 }
1779
1780 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1781}
1782
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301783int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301784{
1785 struct hci_cp_le_set_ext_adv_params cp;
1786 struct hci_dev *hdev = req->hdev;
1787 bool connectable;
1788 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301789 bdaddr_t random_addr;
1790 u8 own_addr_type;
1791 int err;
1792 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301793 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301794
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301795 if (instance > 0) {
1796 adv_instance = hci_find_adv_instance(hdev, instance);
1797 if (!adv_instance)
1798 return -EINVAL;
1799 } else {
1800 adv_instance = NULL;
1801 }
1802
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001803 flags = hci_adv_instance_flags(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301804
1805 /* If the "connectable" instance flag was not set, then choose between
1806 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1807 */
1808 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1809 mgmt_get_connectable(hdev);
1810
Colin Ian King75edd1f2018-11-09 13:27:36 +00001811 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301812 return -EPERM;
1813
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301814 /* Set require_privacy to true only when non-connectable
1815 * advertising is used. In that case it is fine to use a
1816 * non-resolvable private address.
1817 */
1818 err = hci_get_random_address(hdev, !connectable,
1819 adv_use_rpa(hdev, flags), adv_instance,
1820 &own_addr_type, &random_addr);
1821 if (err < 0)
1822 return err;
1823
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301824 memset(&cp, 0, sizeof(cp));
1825
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001826 if (adv_instance) {
1827 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1828 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1829 cp.tx_power = adv_instance->tx_power;
1830 } else {
1831 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1832 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1833 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1834 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301835
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301836 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1837
1838 if (connectable) {
1839 if (secondary_adv)
1840 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1841 else
1842 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001843 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
Daniel Winklerff02db12021-03-03 11:15:23 -08001844 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301845 if (secondary_adv)
1846 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1847 else
1848 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1849 } else {
1850 if (secondary_adv)
1851 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1852 else
1853 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1854 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301855
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301856 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301857 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001858 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301859
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301860 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1861 cp.primary_phy = HCI_ADV_PHY_1M;
1862 cp.secondary_phy = HCI_ADV_PHY_2M;
1863 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1864 cp.primary_phy = HCI_ADV_PHY_CODED;
1865 cp.secondary_phy = HCI_ADV_PHY_CODED;
1866 } else {
1867 /* In all other cases use 1M */
1868 cp.primary_phy = HCI_ADV_PHY_1M;
1869 cp.secondary_phy = HCI_ADV_PHY_1M;
1870 }
1871
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301872 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1873
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301874 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1875 bacmp(&random_addr, BDADDR_ANY)) {
1876 struct hci_cp_le_set_adv_set_rand_addr cp;
1877
1878 /* Check if random address need to be updated */
1879 if (adv_instance) {
1880 if (!bacmp(&random_addr, &adv_instance->random_addr))
1881 return 0;
1882 } else {
1883 if (!bacmp(&random_addr, &hdev->random_addr))
1884 return 0;
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001885 /* Instance 0x00 doesn't have an adv_info, instead it
1886 * uses hdev->random_addr to track its address so
1887 * whenever it needs to be updated this also set the
1888 * random address since hdev->random_addr is shared with
1889 * scan state machine.
1890 */
1891 set_random_addr(req, &random_addr);
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301892 }
1893
1894 memset(&cp, 0, sizeof(cp));
1895
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001896 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301897 bacpy(&cp.bdaddr, &random_addr);
1898
1899 hci_req_add(req,
1900 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1901 sizeof(cp), &cp);
1902 }
1903
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301904 return 0;
1905}
1906
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001907int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301908{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001909 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301910 struct hci_cp_le_set_ext_adv_enable *cp;
1911 struct hci_cp_ext_adv_set *adv_set;
1912 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001913 struct adv_info *adv_instance;
1914
1915 if (instance > 0) {
1916 adv_instance = hci_find_adv_instance(hdev, instance);
1917 if (!adv_instance)
1918 return -EINVAL;
1919 } else {
1920 adv_instance = NULL;
1921 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301922
1923 cp = (void *) data;
1924 adv_set = (void *) cp->data;
1925
1926 memset(cp, 0, sizeof(*cp));
1927
1928 cp->enable = 0x01;
1929 cp->num_of_sets = 0x01;
1930
1931 memset(adv_set, 0, sizeof(*adv_set));
1932
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001933 adv_set->handle = instance;
1934
1935 /* Set duration per instance since controller is responsible for
1936 * scheduling it.
1937 */
1938 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001939 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001940
1941 /* Time = N * 10 ms */
1942 adv_set->duration = cpu_to_le16(duration / 10);
1943 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301944
1945 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1946 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1947 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001948
1949 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301950}
1951
Daniel Winkler37adf702020-07-14 14:16:00 -07001952int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1953{
1954 struct hci_dev *hdev = req->hdev;
1955 struct hci_cp_le_set_ext_adv_enable *cp;
1956 struct hci_cp_ext_adv_set *adv_set;
1957 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1958 u8 req_size;
1959
1960 /* If request specifies an instance that doesn't exist, fail */
1961 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1962 return -EINVAL;
1963
1964 memset(data, 0, sizeof(data));
1965
1966 cp = (void *)data;
1967 adv_set = (void *)cp->data;
1968
1969 /* Instance 0x00 indicates all advertising instances will be disabled */
1970 cp->num_of_sets = !!instance;
1971 cp->enable = 0x00;
1972
1973 adv_set->handle = instance;
1974
1975 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1976 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1977
1978 return 0;
1979}
1980
1981int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1982{
1983 struct hci_dev *hdev = req->hdev;
1984
1985 /* If request specifies an instance that doesn't exist, fail */
1986 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1987 return -EINVAL;
1988
1989 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1990
1991 return 0;
1992}
1993
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301994int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1995{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301996 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07001997 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301998 int err;
1999
Daniel Winkler37adf702020-07-14 14:16:00 -07002000 /* If instance isn't pending, the chip knows about it, and it's safe to
2001 * disable
2002 */
2003 if (adv_instance && !adv_instance->pending)
2004 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302005
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302006 err = __hci_req_setup_ext_adv_instance(req, instance);
2007 if (err < 0)
2008 return err;
2009
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302010 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002011 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302012
2013 return 0;
2014}
2015
Johan Hedbergf2252572015-11-18 12:49:20 +02002016int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2017 bool force)
2018{
2019 struct hci_dev *hdev = req->hdev;
2020 struct adv_info *adv_instance = NULL;
2021 u16 timeout;
2022
2023 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002024 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002025 return -EPERM;
2026
2027 if (hdev->adv_instance_timeout)
2028 return -EBUSY;
2029
2030 adv_instance = hci_find_adv_instance(hdev, instance);
2031 if (!adv_instance)
2032 return -ENOENT;
2033
2034 /* A zero timeout means unlimited advertising. As long as there is
2035 * only one instance, duration should be ignored. We still set a timeout
2036 * in case further instances are being added later on.
2037 *
2038 * If the remaining lifetime of the instance is more than the duration
2039 * then the timeout corresponds to the duration, otherwise it will be
2040 * reduced to the remaining instance lifetime.
2041 */
2042 if (adv_instance->timeout == 0 ||
2043 adv_instance->duration <= adv_instance->remaining_time)
2044 timeout = adv_instance->duration;
2045 else
2046 timeout = adv_instance->remaining_time;
2047
2048 /* The remaining time is being reduced unless the instance is being
2049 * advertised without time limit.
2050 */
2051 if (adv_instance->timeout)
2052 adv_instance->remaining_time =
2053 adv_instance->remaining_time - timeout;
2054
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002055 /* Only use work for scheduling instances with legacy advertising */
2056 if (!ext_adv_capable(hdev)) {
2057 hdev->adv_instance_timeout = timeout;
2058 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002059 &hdev->adv_instance_expire,
2060 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002061 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002062
2063 /* If we're just re-scheduling the same instance again then do not
2064 * execute any HCI commands. This happens when a single instance is
2065 * being advertised.
2066 */
2067 if (!force && hdev->cur_adv_instance == instance &&
2068 hci_dev_test_flag(hdev, HCI_LE_ADV))
2069 return 0;
2070
2071 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302072 if (ext_adv_capable(hdev)) {
2073 __hci_req_start_ext_adv(req, instance);
2074 } else {
2075 __hci_req_update_adv_data(req, instance);
2076 __hci_req_update_scan_rsp_data(req, instance);
2077 __hci_req_enable_advertising(req);
2078 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002079
2080 return 0;
2081}
2082
Johan Hedbergf2252572015-11-18 12:49:20 +02002083/* For a single instance:
2084 * - force == true: The instance will be removed even when its remaining
2085 * lifetime is not zero.
2086 * - force == false: the instance will be deactivated but kept stored unless
2087 * the remaining lifetime is zero.
2088 *
2089 * For instance == 0x00:
2090 * - force == true: All instances will be removed regardless of their timeout
2091 * setting.
2092 * - force == false: Only instances that have a timeout will be removed.
2093 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002094void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2095 struct hci_request *req, u8 instance,
2096 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002097{
2098 struct adv_info *adv_instance, *n, *next_instance = NULL;
2099 int err;
2100 u8 rem_inst;
2101
2102 /* Cancel any timeout concerning the removed instance(s). */
2103 if (!instance || hdev->cur_adv_instance == instance)
2104 cancel_adv_timeout(hdev);
2105
2106 /* Get the next instance to advertise BEFORE we remove
2107 * the current one. This can be the same instance again
2108 * if there is only one instance.
2109 */
2110 if (instance && hdev->cur_adv_instance == instance)
2111 next_instance = hci_get_next_instance(hdev, instance);
2112
2113 if (instance == 0x00) {
2114 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2115 list) {
2116 if (!(force || adv_instance->timeout))
2117 continue;
2118
2119 rem_inst = adv_instance->instance;
2120 err = hci_remove_adv_instance(hdev, rem_inst);
2121 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002122 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002123 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002124 } else {
2125 adv_instance = hci_find_adv_instance(hdev, instance);
2126
2127 if (force || (adv_instance && adv_instance->timeout &&
2128 !adv_instance->remaining_time)) {
2129 /* Don't advertise a removed instance. */
2130 if (next_instance &&
2131 next_instance->instance == instance)
2132 next_instance = NULL;
2133
2134 err = hci_remove_adv_instance(hdev, instance);
2135 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002136 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002137 }
2138 }
2139
Johan Hedbergf2252572015-11-18 12:49:20 +02002140 if (!req || !hdev_is_powered(hdev) ||
2141 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2142 return;
2143
Daniel Winkler37adf702020-07-14 14:16:00 -07002144 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002145 __hci_req_schedule_adv_instance(req, next_instance->instance,
2146 false);
2147}
2148
Johan Hedberg0857dd32014-12-19 13:40:20 +02002149int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002150 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002151{
2152 struct hci_dev *hdev = req->hdev;
2153 int err;
2154
2155 /* If privacy is enabled use a resolvable private address. If
2156 * current RPA has expired or there is something else than
2157 * the current RPA in use, then generate a new one.
2158 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002159 if (use_rpa) {
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302160 /* If Controller supports LL Privacy use own address type is
2161 * 0x03
2162 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05302163 if (use_ll_privacy(hdev) &&
2164 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302165 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2166 else
2167 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002168
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07002169 if (rpa_valid(hdev))
Johan Hedberg0857dd32014-12-19 13:40:20 +02002170 return 0;
2171
2172 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2173 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002174 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002175 return err;
2176 }
2177
2178 set_random_addr(req, &hdev->rpa);
2179
Johan Hedberg0857dd32014-12-19 13:40:20 +02002180 return 0;
2181 }
2182
2183 /* In case of required privacy without resolvable private address,
2184 * use an non-resolvable private address. This is useful for active
2185 * scanning and non-connectable advertising.
2186 */
2187 if (require_privacy) {
2188 bdaddr_t nrpa;
2189
2190 while (true) {
2191 /* The non-resolvable private address is generated
2192 * from random six bytes with the two most significant
2193 * bits cleared.
2194 */
2195 get_random_bytes(&nrpa, 6);
2196 nrpa.b[5] &= 0x3f;
2197
2198 /* The non-resolvable private address shall not be
2199 * equal to the public address.
2200 */
2201 if (bacmp(&hdev->bdaddr, &nrpa))
2202 break;
2203 }
2204
2205 *own_addr_type = ADDR_LE_DEV_RANDOM;
2206 set_random_addr(req, &nrpa);
2207 return 0;
2208 }
2209
2210 /* If forcing static address is in use or there is no public
2211 * address use the static address as random address (but skip
2212 * the HCI command if the current random address is already the
2213 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002214 *
2215 * In case BR/EDR has been disabled on a dual-mode controller
2216 * and a static address has been configured, then use that
2217 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002218 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002219 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002220 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002221 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002222 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002223 *own_addr_type = ADDR_LE_DEV_RANDOM;
2224 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2225 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2226 &hdev->static_addr);
2227 return 0;
2228 }
2229
2230 /* Neither privacy nor static address is being used so use a
2231 * public address.
2232 */
2233 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2234
2235 return 0;
2236}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002237
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002238static bool disconnected_accept_list_entries(struct hci_dev *hdev)
Johan Hedberg405a2612014-12-19 23:18:22 +02002239{
2240 struct bdaddr_list *b;
2241
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002242 list_for_each_entry(b, &hdev->accept_list, list) {
Johan Hedberg405a2612014-12-19 23:18:22 +02002243 struct hci_conn *conn;
2244
2245 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2246 if (!conn)
2247 return true;
2248
2249 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2250 return true;
2251 }
2252
2253 return false;
2254}
2255
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002256void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002257{
2258 struct hci_dev *hdev = req->hdev;
2259 u8 scan;
2260
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002261 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002262 return;
2263
2264 if (!hdev_is_powered(hdev))
2265 return;
2266
2267 if (mgmt_powering_down(hdev))
2268 return;
2269
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002270 if (hdev->scanning_paused)
2271 return;
2272
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002273 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002274 disconnected_accept_list_entries(hdev))
Johan Hedberg405a2612014-12-19 23:18:22 +02002275 scan = SCAN_PAGE;
2276 else
2277 scan = SCAN_DISABLED;
2278
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002279 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002280 scan |= SCAN_INQUIRY;
2281
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002282 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2283 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2284 return;
2285
Johan Hedberg405a2612014-12-19 23:18:22 +02002286 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2287}
2288
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002289static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002290{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002291 hci_dev_lock(req->hdev);
2292 __hci_req_update_scan(req);
2293 hci_dev_unlock(req->hdev);
2294 return 0;
2295}
Johan Hedberg405a2612014-12-19 23:18:22 +02002296
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002297static void scan_update_work(struct work_struct *work)
2298{
2299 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2300
2301 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002302}
2303
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002304static int connectable_update(struct hci_request *req, unsigned long opt)
2305{
2306 struct hci_dev *hdev = req->hdev;
2307
2308 hci_dev_lock(hdev);
2309
2310 __hci_req_update_scan(req);
2311
2312 /* If BR/EDR is not enabled and we disable advertising as a
2313 * by-product of disabling connectable, we need to update the
2314 * advertising flags.
2315 */
2316 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002317 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002318
2319 /* Update the advertising parameters if necessary */
2320 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302321 !list_empty(&hdev->adv_instances)) {
2322 if (ext_adv_capable(hdev))
2323 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2324 else
2325 __hci_req_enable_advertising(req);
2326 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002327
2328 __hci_update_background_scan(req);
2329
2330 hci_dev_unlock(hdev);
2331
2332 return 0;
2333}
2334
2335static void connectable_update_work(struct work_struct *work)
2336{
2337 struct hci_dev *hdev = container_of(work, struct hci_dev,
2338 connectable_update);
2339 u8 status;
2340
2341 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2342 mgmt_set_connectable_complete(hdev, status);
2343}
2344
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002345static u8 get_service_classes(struct hci_dev *hdev)
2346{
2347 struct bt_uuid *uuid;
2348 u8 val = 0;
2349
2350 list_for_each_entry(uuid, &hdev->uuids, list)
2351 val |= uuid->svc_hint;
2352
2353 return val;
2354}
2355
2356void __hci_req_update_class(struct hci_request *req)
2357{
2358 struct hci_dev *hdev = req->hdev;
2359 u8 cod[3];
2360
Howard Chung22fbcfc2020-11-11 15:02:19 +08002361 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002362
2363 if (!hdev_is_powered(hdev))
2364 return;
2365
2366 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2367 return;
2368
2369 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2370 return;
2371
2372 cod[0] = hdev->minor_class;
2373 cod[1] = hdev->major_class;
2374 cod[2] = get_service_classes(hdev);
2375
2376 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2377 cod[1] |= 0x20;
2378
2379 if (memcmp(cod, hdev->dev_class, 3) == 0)
2380 return;
2381
2382 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2383}
2384
Johan Hedbergaed1a882015-11-22 17:24:44 +03002385static void write_iac(struct hci_request *req)
2386{
2387 struct hci_dev *hdev = req->hdev;
2388 struct hci_cp_write_current_iac_lap cp;
2389
2390 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2391 return;
2392
2393 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2394 /* Limited discoverable mode */
2395 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2396 cp.iac_lap[0] = 0x00; /* LIAC */
2397 cp.iac_lap[1] = 0x8b;
2398 cp.iac_lap[2] = 0x9e;
2399 cp.iac_lap[3] = 0x33; /* GIAC */
2400 cp.iac_lap[4] = 0x8b;
2401 cp.iac_lap[5] = 0x9e;
2402 } else {
2403 /* General discoverable mode */
2404 cp.num_iac = 1;
2405 cp.iac_lap[0] = 0x33; /* GIAC */
2406 cp.iac_lap[1] = 0x8b;
2407 cp.iac_lap[2] = 0x9e;
2408 }
2409
2410 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2411 (cp.num_iac * 3) + 1, &cp);
2412}
2413
2414static int discoverable_update(struct hci_request *req, unsigned long opt)
2415{
2416 struct hci_dev *hdev = req->hdev;
2417
2418 hci_dev_lock(hdev);
2419
2420 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2421 write_iac(req);
2422 __hci_req_update_scan(req);
2423 __hci_req_update_class(req);
2424 }
2425
2426 /* Advertising instances don't use the global discoverable setting, so
2427 * only update AD if advertising was enabled using Set Advertising.
2428 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002429 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002430 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002431
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002432 /* Discoverable mode affects the local advertising
2433 * address in limited privacy mode.
2434 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302435 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2436 if (ext_adv_capable(hdev))
2437 __hci_req_start_ext_adv(req, 0x00);
2438 else
2439 __hci_req_enable_advertising(req);
2440 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002441 }
2442
Johan Hedbergaed1a882015-11-22 17:24:44 +03002443 hci_dev_unlock(hdev);
2444
2445 return 0;
2446}
2447
2448static void discoverable_update_work(struct work_struct *work)
2449{
2450 struct hci_dev *hdev = container_of(work, struct hci_dev,
2451 discoverable_update);
2452 u8 status;
2453
2454 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2455 mgmt_set_discoverable_complete(hdev, status);
2456}
2457
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002458void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2459 u8 reason)
2460{
2461 switch (conn->state) {
2462 case BT_CONNECTED:
2463 case BT_CONFIG:
2464 if (conn->type == AMP_LINK) {
2465 struct hci_cp_disconn_phy_link cp;
2466
2467 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2468 cp.reason = reason;
2469 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2470 &cp);
2471 } else {
2472 struct hci_cp_disconnect dc;
2473
2474 dc.handle = cpu_to_le16(conn->handle);
2475 dc.reason = reason;
2476 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2477 }
2478
2479 conn->state = BT_DISCONN;
2480
2481 break;
2482 case BT_CONNECT:
2483 if (conn->type == LE_LINK) {
2484 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2485 break;
2486 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2487 0, NULL);
2488 } else if (conn->type == ACL_LINK) {
2489 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2490 break;
2491 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2492 6, &conn->dst);
2493 }
2494 break;
2495 case BT_CONNECT2:
2496 if (conn->type == ACL_LINK) {
2497 struct hci_cp_reject_conn_req rej;
2498
2499 bacpy(&rej.bdaddr, &conn->dst);
2500 rej.reason = reason;
2501
2502 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2503 sizeof(rej), &rej);
2504 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2505 struct hci_cp_reject_sync_conn_req rej;
2506
2507 bacpy(&rej.bdaddr, &conn->dst);
2508
2509 /* SCO rejection has its own limited set of
2510 * allowed error values (0x0D-0x0F) which isn't
2511 * compatible with most values passed to this
2512 * function. To be safe hard-code one of the
2513 * values that's suitable for SCO.
2514 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002515 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002516
2517 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2518 sizeof(rej), &rej);
2519 }
2520 break;
2521 default:
2522 conn->state = BT_CLOSED;
2523 break;
2524 }
2525}
2526
2527static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2528{
2529 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002530 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002531}
2532
2533int hci_abort_conn(struct hci_conn *conn, u8 reason)
2534{
2535 struct hci_request req;
2536 int err;
2537
2538 hci_req_init(&req, conn->hdev);
2539
2540 __hci_abort_conn(&req, conn, reason);
2541
2542 err = hci_req_run(&req, abort_conn_complete);
2543 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002544 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002545 return err;
2546 }
2547
2548 return 0;
2549}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002550
Johan Hedberga1d01db2015-11-11 08:11:25 +02002551static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002552{
2553 hci_dev_lock(req->hdev);
2554 __hci_update_background_scan(req);
2555 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002556 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002557}
2558
2559static void bg_scan_update(struct work_struct *work)
2560{
2561 struct hci_dev *hdev = container_of(work, struct hci_dev,
2562 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002563 struct hci_conn *conn;
2564 u8 status;
2565 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002566
Johan Hedberg84235d22015-11-11 08:11:20 +02002567 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2568 if (!err)
2569 return;
2570
2571 hci_dev_lock(hdev);
2572
2573 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2574 if (conn)
2575 hci_le_conn_failed(conn, status);
2576
2577 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002578}
2579
Johan Hedberga1d01db2015-11-11 08:11:25 +02002580static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002581{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302582 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002583 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002584}
2585
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002586static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2587{
2588 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002589 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2590 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002591 struct hci_cp_inquiry cp;
2592
Archie Pusaka06752d12021-04-01 11:11:33 +08002593 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2594 return 0;
2595
Howard Chung22fbcfc2020-11-11 15:02:19 +08002596 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002597
2598 hci_dev_lock(req->hdev);
2599 hci_inquiry_cache_flush(req->hdev);
2600 hci_dev_unlock(req->hdev);
2601
2602 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002603
2604 if (req->hdev->discovery.limited)
2605 memcpy(&cp.lap, liac, sizeof(cp.lap));
2606 else
2607 memcpy(&cp.lap, giac, sizeof(cp.lap));
2608
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002609 cp.length = length;
2610
2611 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2612
2613 return 0;
2614}
2615
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002616static void le_scan_disable_work(struct work_struct *work)
2617{
2618 struct hci_dev *hdev = container_of(work, struct hci_dev,
2619 le_scan_disable.work);
2620 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002621
Howard Chung22fbcfc2020-11-11 15:02:19 +08002622 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002623
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002624 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002625 return;
2626
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002627 cancel_delayed_work(&hdev->le_scan_restart);
2628
2629 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2630 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002631 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2632 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002633 return;
2634 }
2635
2636 hdev->discovery.scan_start = 0;
2637
2638 /* If we were running LE only scan, change discovery state. If
2639 * we were running both LE and BR/EDR inquiry simultaneously,
2640 * and BR/EDR inquiry is already finished, stop discovery,
2641 * otherwise BR/EDR inquiry will stop discovery when finished.
2642 * If we will resolve remote device name, do not change
2643 * discovery state.
2644 */
2645
2646 if (hdev->discovery.type == DISCOV_TYPE_LE)
2647 goto discov_stopped;
2648
2649 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2650 return;
2651
2652 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2653 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2654 hdev->discovery.state != DISCOVERY_RESOLVING)
2655 goto discov_stopped;
2656
2657 return;
2658 }
2659
2660 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2661 HCI_CMD_TIMEOUT, &status);
2662 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002663 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002664 goto discov_stopped;
2665 }
2666
2667 return;
2668
2669discov_stopped:
2670 hci_dev_lock(hdev);
2671 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2672 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002673}
2674
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002675static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002676{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002677 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002678
2679 /* If controller is not scanning we are done. */
2680 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2681 return 0;
2682
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002683 if (hdev->scanning_paused) {
2684 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2685 return 0;
2686 }
2687
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302688 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002689
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302690 if (use_ext_scan(hdev)) {
2691 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2692
2693 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2694 ext_enable_cp.enable = LE_SCAN_ENABLE;
2695 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2696
2697 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2698 sizeof(ext_enable_cp), &ext_enable_cp);
2699 } else {
2700 struct hci_cp_le_set_scan_enable cp;
2701
2702 memset(&cp, 0, sizeof(cp));
2703 cp.enable = LE_SCAN_ENABLE;
2704 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2705 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2706 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002707
2708 return 0;
2709}
2710
2711static void le_scan_restart_work(struct work_struct *work)
2712{
2713 struct hci_dev *hdev = container_of(work, struct hci_dev,
2714 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002715 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002716 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002717
Howard Chung22fbcfc2020-11-11 15:02:19 +08002718 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002719
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002720 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002721 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002722 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2723 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002724 return;
2725 }
2726
2727 hci_dev_lock(hdev);
2728
2729 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2730 !hdev->discovery.scan_start)
2731 goto unlock;
2732
2733 /* When the scan was started, hdev->le_scan_disable has been queued
2734 * after duration from scan_start. During scan restart this job
2735 * has been canceled, and we need to queue it again after proper
2736 * timeout, to make sure that scan does not run indefinitely.
2737 */
2738 duration = hdev->discovery.scan_duration;
2739 scan_start = hdev->discovery.scan_start;
2740 now = jiffies;
2741 if (now - scan_start <= duration) {
2742 int elapsed;
2743
2744 if (now >= scan_start)
2745 elapsed = now - scan_start;
2746 else
2747 elapsed = ULONG_MAX - scan_start + now;
2748
2749 timeout = duration - elapsed;
2750 } else {
2751 timeout = 0;
2752 }
2753
2754 queue_delayed_work(hdev->req_workqueue,
2755 &hdev->le_scan_disable, timeout);
2756
2757unlock:
2758 hci_dev_unlock(hdev);
2759}
2760
Johan Hedberge68f0722015-11-11 08:30:30 +02002761static int active_scan(struct hci_request *req, unsigned long opt)
2762{
2763 uint16_t interval = opt;
2764 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002765 u8 own_addr_type;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002766 /* Accept list is not used for discovery */
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002767 u8 filter_policy = 0x00;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002768 /* Default is to enable duplicates filter */
2769 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302770 /* Discovery doesn't require controller address resolution */
2771 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002772 int err;
2773
Howard Chung22fbcfc2020-11-11 15:02:19 +08002774 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002775
Johan Hedberge68f0722015-11-11 08:30:30 +02002776 /* If controller is scanning, it means the background scanning is
2777 * running. Thus, we should temporarily stop it in order to set the
2778 * discovery scanning parameters.
2779 */
Howard Chung422bb172020-11-26 12:22:23 +08002780 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302781 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08002782 cancel_interleave_scan(hdev);
2783 }
Johan Hedberge68f0722015-11-11 08:30:30 +02002784
2785 /* All active scans will be done with either a resolvable private
2786 * address (when privacy feature has been enabled) or non-resolvable
2787 * private address.
2788 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002789 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2790 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002791 if (err < 0)
2792 own_addr_type = ADDR_LE_DEV_PUBLIC;
2793
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002794 if (hci_is_adv_monitoring(hdev)) {
2795 /* Duplicate filter should be disabled when some advertisement
2796 * monitor is activated, otherwise AdvMon can only receive one
2797 * advertisement for one peer(*) during active scanning, and
2798 * might report loss to these peers.
2799 *
2800 * Note that different controllers have different meanings of
2801 * |duplicate|. Some of them consider packets with the same
2802 * address as duplicate, and others consider packets with the
2803 * same address and the same RSSI as duplicate. Although in the
2804 * latter case we don't need to disable duplicate filter, but
2805 * it is common to have active scanning for a short period of
2806 * time, the power impact should be neglectable.
2807 */
2808 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2809 }
2810
Alain Michaudd4edda02020-06-29 17:04:15 +00002811 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2812 hdev->le_scan_window_discovery, own_addr_type,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002813 filter_policy, filter_dup, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002814 return 0;
2815}
2816
2817static int interleaved_discov(struct hci_request *req, unsigned long opt)
2818{
2819 int err;
2820
Howard Chung22fbcfc2020-11-11 15:02:19 +08002821 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002822
2823 err = active_scan(req, opt);
2824 if (err)
2825 return err;
2826
Johan Hedberg7df26b52015-11-11 12:24:21 +02002827 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002828}
2829
2830static void start_discovery(struct hci_dev *hdev, u8 *status)
2831{
2832 unsigned long timeout;
2833
Howard Chung22fbcfc2020-11-11 15:02:19 +08002834 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002835
2836 switch (hdev->discovery.type) {
2837 case DISCOV_TYPE_BREDR:
2838 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002839 hci_req_sync(hdev, bredr_inquiry,
2840 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002841 status);
2842 return;
2843 case DISCOV_TYPE_INTERLEAVED:
2844 /* When running simultaneous discovery, the LE scanning time
2845 * should occupy the whole discovery time sine BR/EDR inquiry
2846 * and LE scanning are scheduled by the controller.
2847 *
2848 * For interleaving discovery in comparison, BR/EDR inquiry
2849 * and LE scanning are done sequentially with separate
2850 * timeouts.
2851 */
2852 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2853 &hdev->quirks)) {
2854 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2855 /* During simultaneous discovery, we double LE scan
2856 * interval. We must leave some time for the controller
2857 * to do BR/EDR inquiry.
2858 */
2859 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00002860 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002861 status);
2862 break;
2863 }
2864
2865 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00002866 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002867 HCI_CMD_TIMEOUT, status);
2868 break;
2869 case DISCOV_TYPE_LE:
2870 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00002871 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002872 HCI_CMD_TIMEOUT, status);
2873 break;
2874 default:
2875 *status = HCI_ERROR_UNSPECIFIED;
2876 return;
2877 }
2878
2879 if (*status)
2880 return;
2881
Howard Chung22fbcfc2020-11-11 15:02:19 +08002882 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02002883
2884 /* When service discovery is used and the controller has a
2885 * strict duplicate filter, it is important to remember the
2886 * start and duration of the scan. This is required for
2887 * restarting scanning during the discovery phase.
2888 */
2889 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2890 hdev->discovery.result_filtering) {
2891 hdev->discovery.scan_start = jiffies;
2892 hdev->discovery.scan_duration = timeout;
2893 }
2894
2895 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2896 timeout);
2897}
2898
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002899bool hci_req_stop_discovery(struct hci_request *req)
2900{
2901 struct hci_dev *hdev = req->hdev;
2902 struct discovery_state *d = &hdev->discovery;
2903 struct hci_cp_remote_name_req_cancel cp;
2904 struct inquiry_entry *e;
2905 bool ret = false;
2906
Howard Chung22fbcfc2020-11-11 15:02:19 +08002907 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002908
2909 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2910 if (test_bit(HCI_INQUIRY, &hdev->flags))
2911 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2912
2913 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2914 cancel_delayed_work(&hdev->le_scan_disable);
Sonny Sasakac06632a2021-03-15 10:30:59 -07002915 cancel_delayed_work(&hdev->le_scan_restart);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302916 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002917 }
2918
2919 ret = true;
2920 } else {
2921 /* Passive scanning */
2922 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302923 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002924 ret = true;
2925 }
2926 }
2927
2928 /* No further actions needed for LE-only discovery */
2929 if (d->type == DISCOV_TYPE_LE)
2930 return ret;
2931
2932 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2933 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2934 NAME_PENDING);
2935 if (!e)
2936 return ret;
2937
2938 bacpy(&cp.bdaddr, &e->data.bdaddr);
2939 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2940 &cp);
2941 ret = true;
2942 }
2943
2944 return ret;
2945}
2946
Kiran K9798fbd2021-09-07 15:42:44 +05302947static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2948 u16 opcode)
2949{
2950 bt_dev_dbg(hdev, "status %u", status);
2951}
2952
2953int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2954{
2955 struct hci_request req;
2956 int err;
2957 __u8 vnd_len, *vnd_data = NULL;
2958 struct hci_op_configure_data_path *cmd = NULL;
2959
2960 hci_req_init(&req, hdev);
2961
2962 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2963 &vnd_data);
2964 if (err < 0)
2965 goto error;
2966
2967 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2968 if (!cmd) {
2969 err = -ENOMEM;
2970 goto error;
2971 }
2972
2973 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2974 if (err < 0)
2975 goto error;
2976
2977 cmd->vnd_len = vnd_len;
2978 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2979
2980 cmd->direction = 0x00;
2981 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2982
2983 cmd->direction = 0x01;
2984 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2985
2986 err = hci_req_run(&req, config_data_path_complete);
2987error:
2988
2989 kfree(cmd);
2990 kfree(vnd_data);
2991 return err;
2992}
2993
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002994static int stop_discovery(struct hci_request *req, unsigned long opt)
2995{
2996 hci_dev_lock(req->hdev);
2997 hci_req_stop_discovery(req);
2998 hci_dev_unlock(req->hdev);
2999
3000 return 0;
3001}
3002
Johan Hedberge68f0722015-11-11 08:30:30 +02003003static void discov_update(struct work_struct *work)
3004{
3005 struct hci_dev *hdev = container_of(work, struct hci_dev,
3006 discov_update);
3007 u8 status = 0;
3008
3009 switch (hdev->discovery.state) {
3010 case DISCOVERY_STARTING:
3011 start_discovery(hdev, &status);
3012 mgmt_start_discovery_complete(hdev, status);
3013 if (status)
3014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3015 else
3016 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3017 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003018 case DISCOVERY_STOPPING:
3019 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3020 mgmt_stop_discovery_complete(hdev, status);
3021 if (!status)
3022 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3023 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003024 case DISCOVERY_STOPPED:
3025 default:
3026 return;
3027 }
3028}
3029
Johan Hedbergc366f552015-11-23 15:43:06 +02003030static void discov_off(struct work_struct *work)
3031{
3032 struct hci_dev *hdev = container_of(work, struct hci_dev,
3033 discov_off.work);
3034
Howard Chung22fbcfc2020-11-11 15:02:19 +08003035 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02003036
3037 hci_dev_lock(hdev);
3038
3039 /* When discoverable timeout triggers, then just make sure
3040 * the limited discoverable flag is cleared. Even in the case
3041 * of a timeout triggered from general discoverable, it is
3042 * safe to unconditionally clear the flag.
3043 */
3044 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3045 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3046 hdev->discov_timeout = 0;
3047
3048 hci_dev_unlock(hdev);
3049
3050 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3051 mgmt_new_settings(hdev);
3052}
3053
Johan Hedberg2ff13892015-11-25 16:15:44 +02003054static int powered_update_hci(struct hci_request *req, unsigned long opt)
3055{
3056 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003057 u8 link_sec;
3058
3059 hci_dev_lock(hdev);
3060
3061 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3062 !lmp_host_ssp_capable(hdev)) {
3063 u8 mode = 0x01;
3064
3065 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3066
3067 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3068 u8 support = 0x01;
3069
3070 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3071 sizeof(support), &support);
3072 }
3073 }
3074
3075 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3076 lmp_bredr_capable(hdev)) {
3077 struct hci_cp_write_le_host_supported cp;
3078
3079 cp.le = 0x01;
3080 cp.simul = 0x00;
3081
3082 /* Check first if we already have the right
3083 * host state (host features set)
3084 */
3085 if (cp.le != lmp_host_le_capable(hdev) ||
3086 cp.simul != lmp_host_le_br_capable(hdev))
3087 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3088 sizeof(cp), &cp);
3089 }
3090
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003091 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003092 /* Make sure the controller has a good default for
3093 * advertising data. This also applies to the case
3094 * where BR/EDR was toggled during the AUTO_OFF phase.
3095 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003096 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3097 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303098 int err;
3099
3100 if (ext_adv_capable(hdev)) {
3101 err = __hci_req_setup_ext_adv_instance(req,
3102 0x00);
3103 if (!err)
3104 __hci_req_update_scan_rsp_data(req,
3105 0x00);
3106 } else {
3107 err = 0;
3108 __hci_req_update_adv_data(req, 0x00);
3109 __hci_req_update_scan_rsp_data(req, 0x00);
3110 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003111
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303112 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303113 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303114 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303115 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003116 __hci_req_enable_ext_advertising(req,
3117 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303118 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003119 } else if (!list_empty(&hdev->adv_instances)) {
3120 struct adv_info *adv_instance;
3121
Johan Hedberg2ff13892015-11-25 16:15:44 +02003122 adv_instance = list_first_entry(&hdev->adv_instances,
3123 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003124 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003125 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003126 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003127 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003128 }
3129
3130 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3131 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3132 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3133 sizeof(link_sec), &link_sec);
3134
3135 if (lmp_bredr_capable(hdev)) {
3136 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3137 __hci_req_write_fast_connectable(req, true);
3138 else
3139 __hci_req_write_fast_connectable(req, false);
3140 __hci_req_update_scan(req);
3141 __hci_req_update_class(req);
3142 __hci_req_update_name(req);
3143 __hci_req_update_eir(req);
3144 }
3145
3146 hci_dev_unlock(hdev);
3147 return 0;
3148}
3149
3150int __hci_req_hci_power_on(struct hci_dev *hdev)
3151{
3152 /* Register the available SMP channels (BR/EDR and LE) only when
3153 * successfully powering on the controller. This late
3154 * registration is required so that LE SMP can clearly decide if
3155 * the public address or static address is used.
3156 */
3157 smp_register(hdev);
3158
3159 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3160 NULL);
3161}
3162
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003163void hci_request_setup(struct hci_dev *hdev)
3164{
Johan Hedberge68f0722015-11-11 08:30:30 +02003165 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003166 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003167 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003168 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003169 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003170 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003171 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3172 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003173 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08003174 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003175}
3176
3177void hci_request_cancel_all(struct hci_dev *hdev)
3178{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003179 hci_req_sync_cancel(hdev, ENODEV);
3180
Johan Hedberge68f0722015-11-11 08:30:30 +02003181 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003182 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003183 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003184 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003185 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003186 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003187 cancel_delayed_work_sync(&hdev->le_scan_disable);
3188 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003189
3190 if (hdev->adv_instance_timeout) {
3191 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3192 hdev->adv_instance_timeout = 0;
3193 }
Howard Chungc4f1f402020-11-26 12:22:21 +08003194
3195 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003196}