blob: 0d78122342d5932d51029eb8d48a4bb106b5433d [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020033
Johan Hedbergbe91cd02015-11-10 09:44:54 +020034#define HCI_REQ_DONE 0
35#define HCI_REQ_PEND 1
36#define HCI_REQ_CANCELED 2
37
Johan Hedberg0857dd32014-12-19 13:40:20 +020038void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39{
40 skb_queue_head_init(&req->cmd_q);
41 req->hdev = hdev;
42 req->err = 0;
43}
44
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053045void hci_req_purge(struct hci_request *req)
46{
47 skb_queue_purge(&req->cmd_q);
48}
49
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080050bool hci_req_status_pend(struct hci_dev *hdev)
51{
52 return hdev->req_status == HCI_REQ_PEND;
53}
54
Johan Hedberge62144872015-04-02 13:41:08 +030055static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020057{
58 struct hci_dev *hdev = req->hdev;
59 struct sk_buff *skb;
60 unsigned long flags;
61
Howard Chung22fbcfc2020-11-11 15:02:19 +080062 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020063
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
66 */
67 if (req->err) {
68 skb_queue_purge(&req->cmd_q);
69 return req->err;
70 }
71
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
74 return -ENODATA;
75
76 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020077 if (complete) {
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020083
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88 queue_work(hdev->workqueue, &hdev->cmd_work);
89
90 return 0;
91}
92
Johan Hedberge62144872015-04-02 13:41:08 +030093int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94{
95 return req_run(req, complete, NULL);
96}
97
98int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99{
100 return req_run(req, NULL, complete);
101}
102
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200103static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104 struct sk_buff *skb)
105{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 if (skb)
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
Johan Hedbergb5044302015-11-10 09:44:55 +0200117void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200118{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
130{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200131 struct hci_request req;
132 struct sk_buff *skb;
133 int err = 0;
134
Howard Chung22fbcfc2020-11-11 15:02:19 +0800135 bt_dev_dbg(hdev, "");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200136
137 hci_req_init(&req, hdev);
138
139 hci_req_add_ev(&req, opcode, plen, param, event);
140
141 hdev->req_status = HCI_REQ_PEND;
142
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200143 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100144 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200146
John Keeping67d8cee2018-04-19 16:29:37 +0100147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200149
John Keeping67d8cee2018-04-19 16:29:37 +0100150 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200151 return ERR_PTR(-EINTR);
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
155 err = -bt_to_errno(hdev->req_result);
156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
165 }
166
167 hdev->req_status = hdev->req_result = 0;
168 skb = hdev->req_skb;
169 hdev->req_skb = NULL;
170
Howard Chung22fbcfc2020-11-11 15:02:19 +0800171 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200172
173 if (err < 0) {
174 kfree_skb(skb);
175 return ERR_PTR(err);
176 }
177
178 if (!skb)
179 return ERR_PTR(-ENODATA);
180
181 return skb;
182}
183EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
187{
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189}
190EXPORT_SYMBOL(__hci_cmd_sync);
191
192/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200193int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200195 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200196{
197 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200198 int err = 0;
199
Howard Chung22fbcfc2020-11-11 15:02:19 +0800200 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200201
202 hci_req_init(&req, hdev);
203
204 hdev->req_status = HCI_REQ_PEND;
205
Johan Hedberga1d01db2015-11-11 08:11:25 +0200206 err = func(&req, opt);
207 if (err) {
208 if (hci_status)
209 *hci_status = HCI_ERROR_UNSPECIFIED;
210 return err;
211 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200213 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 if (err < 0) {
215 hdev->req_status = 0;
216
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
221 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200222 if (err == -ENODATA) {
223 if (hci_status)
224 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200225 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200226 }
227
228 if (hci_status)
229 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200230
231 return err;
232 }
233
John Keeping67d8cee2018-04-19 16:29:37 +0100234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200236
John Keeping67d8cee2018-04-19 16:29:37 +0100237 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
242 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200243 if (hci_status)
244 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200245 break;
246
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200251 break;
252
253 default:
254 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200255 if (hci_status)
256 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200257 break;
258 }
259
Frederic Dalleau9afee942016-08-23 07:59:19 +0200260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200262 hdev->req_status = hdev->req_result = 0;
263
Howard Chung22fbcfc2020-11-11 15:02:19 +0800264 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200265
266 return err;
267}
268
Johan Hedberga1d01db2015-11-11 08:11:25 +0200269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200271 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200272{
273 int ret;
274
275 if (!test_bit(HCI_UP, &hdev->flags))
276 return -ENETDOWN;
277
278 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200279 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200280 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200281 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200282
283 return ret;
284}
285
Johan Hedberg0857dd32014-12-19 13:40:20 +0200286struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
287 const void *param)
288{
289 int len = HCI_COMMAND_HDR_SIZE + plen;
290 struct hci_command_hdr *hdr;
291 struct sk_buff *skb;
292
293 skb = bt_skb_alloc(len, GFP_ATOMIC);
294 if (!skb)
295 return NULL;
296
Johannes Berg4df864c2017-06-16 14:29:21 +0200297 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200298 hdr->opcode = cpu_to_le16(opcode);
299 hdr->plen = plen;
300
301 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200302 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200303
Howard Chung22fbcfc2020-11-11 15:02:19 +0800304 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200305
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100306 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200308
309 return skb;
310}
311
312/* Queue a command to an asynchronous HCI request */
313void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314 const void *param, u8 event)
315{
316 struct hci_dev *hdev = req->hdev;
317 struct sk_buff *skb;
318
Howard Chung22fbcfc2020-11-11 15:02:19 +0800319 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200320
321 /* If an error occurred during request building, there is no point in
322 * queueing the HCI command. We can simply return.
323 */
324 if (req->err)
325 return;
326
327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100329 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
330 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331 req->err = -ENOMEM;
332 return;
333 }
334
335 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200336 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200337
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100338 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200339
340 skb_queue_tail(&req->cmd_q, skb);
341}
342
343void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
344 const void *param)
345{
346 hci_req_add_ev(req, opcode, plen, param, 0);
347}
348
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200349void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350{
351 struct hci_dev *hdev = req->hdev;
352 struct hci_cp_write_page_scan_activity acp;
353 u8 type;
354
355 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
356 return;
357
358 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
359 return;
360
361 if (enable) {
362 type = PAGE_SCAN_TYPE_INTERLACED;
363
364 /* 160 msec page scan interval */
365 acp.interval = cpu_to_le16(0x0100);
366 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000367 type = hdev->def_page_scan_type;
368 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200369 }
370
Alain Michaud10873f92020-06-11 02:01:56 +0000371 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200372
373 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
374 __cpu_to_le16(hdev->page_scan_window) != acp.window)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
376 sizeof(acp), &acp);
377
378 if (hdev->page_scan_type != type)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
380}
381
Howard Chungc4f1f402020-11-26 12:22:21 +0800382static void start_interleave_scan(struct hci_dev *hdev)
383{
384 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
385 queue_delayed_work(hdev->req_workqueue,
386 &hdev->interleave_scan, 0);
387}
388
389static bool is_interleave_scanning(struct hci_dev *hdev)
390{
391 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
392}
393
394static void cancel_interleave_scan(struct hci_dev *hdev)
395{
396 bt_dev_dbg(hdev, "cancelling interleave scan");
397
398 cancel_delayed_work_sync(&hdev->interleave_scan);
399
400 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
401}
402
403/* Return true if interleave_scan wasn't started until exiting this function,
404 * otherwise, return false
405 */
406static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
407{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800408 /* Do interleaved scan only if all of the following are true:
409 * - There is at least one ADV monitor
410 * - At least one pending LE connection or one device to be scanned for
411 * - Monitor offloading is not supported
412 * If so, we should alternate between allowlist scan and one without
413 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800414 */
415 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800417 list_empty(&hdev->pend_le_reports)) &&
418 hci_get_adv_monitor_offload_ext(hdev) ==
419 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800420 bool is_interleaving = is_interleave_scanning(hdev);
421
422 if (use_interleaving && !is_interleaving) {
423 start_interleave_scan(hdev);
424 bt_dev_dbg(hdev, "starting interleave scan");
425 return true;
426 }
427
428 if (!use_interleaving && is_interleaving)
429 cancel_interleave_scan(hdev);
430
431 return false;
432}
433
Johan Hedberg196a5e92015-11-22 18:55:44 +0200434/* This function controls the background scanning based on hdev->pend_le_conns
435 * list. If there are pending LE connection we start the background scanning,
436 * otherwise we stop it.
437 *
438 * This function requires the caller holds hdev->lock.
439 */
440static void __hci_update_background_scan(struct hci_request *req)
441{
442 struct hci_dev *hdev = req->hdev;
443
444 if (!test_bit(HCI_UP, &hdev->flags) ||
445 test_bit(HCI_INIT, &hdev->flags) ||
446 hci_dev_test_flag(hdev, HCI_SETUP) ||
447 hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 hci_dev_test_flag(hdev, HCI_UNREGISTER))
450 return;
451
452 /* No point in doing scanning if LE support hasn't been enabled */
453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
454 return;
455
456 /* If discovery is active don't interfere with it */
457 if (hdev->discovery.state != DISCOVERY_STOPPED)
458 return;
459
460 /* Reset RSSI and UUID filters when starting background scanning
461 * since these filters are meant for service discovery only.
462 *
463 * The Start Discovery and Start Service Discovery operations
464 * ensure to set proper values for RSSI threshold and UUID
465 * filter list. So it is safe to just reset them here.
466 */
467 hci_discovery_filter_clear(hdev);
468
Howard Chung22fbcfc2020-11-11 15:02:19 +0800469 bt_dev_dbg(hdev, "ADV monitoring is %s",
470 hci_is_adv_monitoring(hdev) ? "on" : "off");
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200471
Johan Hedberg196a5e92015-11-22 18:55:44 +0200472 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200473 list_empty(&hdev->pend_le_reports) &&
474 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200475 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200476 * to be scanned for or no ADV monitors, we should stop the
477 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200478 */
479
480 /* If controller is not scanning we are done. */
481 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
482 return;
483
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530484 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200485
Howard Chung22fbcfc2020-11-11 15:02:19 +0800486 bt_dev_dbg(hdev, "stopping background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200487 } else {
488 /* If there is at least one pending LE connection, we should
489 * keep the background scan running.
490 */
491
492 /* If controller is connecting, we should not start scanning
493 * since some controllers are not able to scan and connect at
494 * the same time.
495 */
496 if (hci_lookup_le_connect(hdev))
497 return;
498
499 /* If controller is currently scanning, we stop it to ensure we
500 * don't miss any advertising (due to duplicates filter).
501 */
502 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530503 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200504
505 hci_req_add_le_passive_scan(req);
Howard Chungc4f1f402020-11-26 12:22:21 +0800506 bt_dev_dbg(hdev, "starting background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200507 }
508}
509
Johan Hedberg00cf5042015-11-25 16:15:41 +0200510void __hci_req_update_name(struct hci_request *req)
511{
512 struct hci_dev *hdev = req->hdev;
513 struct hci_cp_write_local_name cp;
514
515 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
516
517 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
518}
519
Johan Hedbergb1a89172015-11-25 16:15:42 +0200520#define PNP_INFO_SVCLASS_ID 0x1200
521
522static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
523{
524 u8 *ptr = data, *uuids_start = NULL;
525 struct bt_uuid *uuid;
526
527 if (len < 4)
528 return ptr;
529
530 list_for_each_entry(uuid, &hdev->uuids, list) {
531 u16 uuid16;
532
533 if (uuid->size != 16)
534 continue;
535
536 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
537 if (uuid16 < 0x1100)
538 continue;
539
540 if (uuid16 == PNP_INFO_SVCLASS_ID)
541 continue;
542
543 if (!uuids_start) {
544 uuids_start = ptr;
545 uuids_start[0] = 1;
546 uuids_start[1] = EIR_UUID16_ALL;
547 ptr += 2;
548 }
549
550 /* Stop if not enough space to put next UUID */
551 if ((ptr - data) + sizeof(u16) > len) {
552 uuids_start[1] = EIR_UUID16_SOME;
553 break;
554 }
555
556 *ptr++ = (uuid16 & 0x00ff);
557 *ptr++ = (uuid16 & 0xff00) >> 8;
558 uuids_start[0] += sizeof(uuid16);
559 }
560
561 return ptr;
562}
563
564static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
565{
566 u8 *ptr = data, *uuids_start = NULL;
567 struct bt_uuid *uuid;
568
569 if (len < 6)
570 return ptr;
571
572 list_for_each_entry(uuid, &hdev->uuids, list) {
573 if (uuid->size != 32)
574 continue;
575
576 if (!uuids_start) {
577 uuids_start = ptr;
578 uuids_start[0] = 1;
579 uuids_start[1] = EIR_UUID32_ALL;
580 ptr += 2;
581 }
582
583 /* Stop if not enough space to put next UUID */
584 if ((ptr - data) + sizeof(u32) > len) {
585 uuids_start[1] = EIR_UUID32_SOME;
586 break;
587 }
588
589 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
590 ptr += sizeof(u32);
591 uuids_start[0] += sizeof(u32);
592 }
593
594 return ptr;
595}
596
597static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
598{
599 u8 *ptr = data, *uuids_start = NULL;
600 struct bt_uuid *uuid;
601
602 if (len < 18)
603 return ptr;
604
605 list_for_each_entry(uuid, &hdev->uuids, list) {
606 if (uuid->size != 128)
607 continue;
608
609 if (!uuids_start) {
610 uuids_start = ptr;
611 uuids_start[0] = 1;
612 uuids_start[1] = EIR_UUID128_ALL;
613 ptr += 2;
614 }
615
616 /* Stop if not enough space to put next UUID */
617 if ((ptr - data) + 16 > len) {
618 uuids_start[1] = EIR_UUID128_SOME;
619 break;
620 }
621
622 memcpy(ptr, uuid->uuid, 16);
623 ptr += 16;
624 uuids_start[0] += 16;
625 }
626
627 return ptr;
628}
629
630static void create_eir(struct hci_dev *hdev, u8 *data)
631{
632 u8 *ptr = data;
633 size_t name_len;
634
635 name_len = strlen(hdev->dev_name);
636
637 if (name_len > 0) {
638 /* EIR Data type */
639 if (name_len > 48) {
640 name_len = 48;
641 ptr[1] = EIR_NAME_SHORT;
642 } else
643 ptr[1] = EIR_NAME_COMPLETE;
644
645 /* EIR Data length */
646 ptr[0] = name_len + 1;
647
648 memcpy(ptr + 2, hdev->dev_name, name_len);
649
650 ptr += (name_len + 2);
651 }
652
653 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
654 ptr[0] = 2;
655 ptr[1] = EIR_TX_POWER;
656 ptr[2] = (u8) hdev->inq_tx_power;
657
658 ptr += 3;
659 }
660
661 if (hdev->devid_source > 0) {
662 ptr[0] = 9;
663 ptr[1] = EIR_DEVICE_ID;
664
665 put_unaligned_le16(hdev->devid_source, ptr + 2);
666 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 put_unaligned_le16(hdev->devid_product, ptr + 6);
668 put_unaligned_le16(hdev->devid_version, ptr + 8);
669
670 ptr += 10;
671 }
672
673 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
676}
677
678void __hci_req_update_eir(struct hci_request *req)
679{
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_write_eir cp;
682
683 if (!hdev_is_powered(hdev))
684 return;
685
686 if (!lmp_ext_inq_capable(hdev))
687 return;
688
689 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
690 return;
691
692 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
693 return;
694
695 memset(&cp, 0, sizeof(cp));
696
697 create_eir(hdev, cp.data);
698
699 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
700 return;
701
702 memcpy(hdev->eir, cp.data, sizeof(cp.data));
703
704 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
705}
706
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530707void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200708{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530709 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200710
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700711 if (hdev->scanning_paused) {
712 bt_dev_dbg(hdev, "Scanning is paused for suspend");
713 return;
714 }
715
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800716 if (hdev->suspended)
717 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
718
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530719 if (use_ext_scan(hdev)) {
720 struct hci_cp_le_set_ext_scan_enable cp;
721
722 memset(&cp, 0, sizeof(cp));
723 cp.enable = LE_SCAN_DISABLE;
724 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
725 &cp);
726 } else {
727 struct hci_cp_le_set_scan_enable cp;
728
729 memset(&cp, 0, sizeof(cp));
730 cp.enable = LE_SCAN_DISABLE;
731 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
732 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530733
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530734 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530735 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530736 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530737 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530738 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530739
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530740 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
741 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200742}
743
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700744static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
745 u8 bdaddr_type)
746{
747 struct hci_cp_le_del_from_white_list cp;
748
749 cp.bdaddr_type = bdaddr_type;
750 bacpy(&cp.bdaddr, bdaddr);
751
752 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
753 cp.bdaddr_type);
754 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530755
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530756 if (use_ll_privacy(req->hdev) &&
757 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530758 struct smp_irk *irk;
759
760 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
761 if (irk) {
762 struct hci_cp_le_del_from_resolv_list cp;
763
764 cp.bdaddr_type = bdaddr_type;
765 bacpy(&cp.bdaddr, bdaddr);
766
767 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
768 sizeof(cp), &cp);
769 }
770 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700771}
772
773/* Adds connection to white list if needed. On error, returns -1. */
774static int add_to_white_list(struct hci_request *req,
775 struct hci_conn_params *params, u8 *num_entries,
776 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200777{
778 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700779 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200780
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700781 /* Already in white list */
782 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
783 params->addr_type))
784 return 0;
785
786 /* Select filter policy to accept all advertising */
787 if (*num_entries >= hdev->le_white_list_size)
788 return -1;
789
790 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530791 if (!allow_rpa &&
792 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700793 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
794 return -1;
795 }
796
797 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200798 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
799 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700800 return 0;
801
802 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200803 cp.bdaddr_type = params->addr_type;
804 bacpy(&cp.bdaddr, &params->addr);
805
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700806 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
807 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200808 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700809
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530810 if (use_ll_privacy(hdev) &&
811 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530812 struct smp_irk *irk;
813
814 irk = hci_find_irk_by_addr(hdev, &params->addr,
815 params->addr_type);
816 if (irk) {
817 struct hci_cp_le_add_to_resolv_list cp;
818
819 cp.bdaddr_type = params->addr_type;
820 bacpy(&cp.bdaddr, &params->addr);
821 memcpy(cp.peer_irk, irk->val, 16);
822
823 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
824 memcpy(cp.local_irk, hdev->irk, 16);
825 else
826 memset(cp.local_irk, 0, 16);
827
828 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
829 sizeof(cp), &cp);
830 }
831 }
832
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700833 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200834}
835
836static u8 update_white_list(struct hci_request *req)
837{
838 struct hci_dev *hdev = req->hdev;
839 struct hci_conn_params *params;
840 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700841 u8 num_entries = 0;
842 bool pend_conn, pend_report;
843 /* We allow whitelisting even with RPAs in suspend. In the worst case,
844 * we won't be able to wake from devices that use the privacy1.2
845 * features. Additionally, once we support privacy1.2 and IRK
846 * offloading, we can update this to also check for those conditions.
847 */
848 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200849
850 /* Go through the current white list programmed into the
851 * controller one by one and check if that address is still
852 * in the list of pending connections or list of devices to
853 * report. If not present in either list, then queue the
854 * command to remove it from the controller.
855 */
856 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700857 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
858 &b->bdaddr,
859 b->bdaddr_type);
860 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
861 &b->bdaddr,
862 b->bdaddr_type);
863
864 /* If the device is not likely to connect or report,
865 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500866 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700867 if (!pend_conn && !pend_report) {
868 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200869 continue;
870 }
871
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700872 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530873 if (!allow_rpa &&
874 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700875 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500876 return 0x00;
877 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200878
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700879 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200880 }
881
882 /* Since all no longer valid white list entries have been
883 * removed, walk through the list of pending connections
884 * and ensure that any new device gets programmed into
885 * the controller.
886 *
887 * If the list of the devices is larger than the list of
888 * available white list entries in the controller, then
889 * just abort and return filer policy value to not use the
890 * white list.
891 */
892 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700893 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200894 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200895 }
896
897 /* After adding all new pending connections, walk through
898 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700899 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200900 */
901 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700902 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200903 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200904 }
905
Howard Chungc4f1f402020-11-26 12:22:21 +0800906 /* Use the allowlist unless the following conditions are all true:
907 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800908 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800909 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200910 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800911 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800912 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800913 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200914 return 0x00;
915
Johan Hedberg0857dd32014-12-19 13:40:20 +0200916 /* Select filter policy to use white list */
917 return 0x01;
918}
919
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200920static bool scan_use_rpa(struct hci_dev *hdev)
921{
922 return hci_dev_test_flag(hdev, HCI_PRIVACY);
923}
924
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530925static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530926 u16 window, u8 own_addr_type, u8 filter_policy,
927 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200928{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530929 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530930
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700931 if (hdev->scanning_paused) {
932 bt_dev_dbg(hdev, "Scanning is paused for suspend");
933 return;
934 }
935
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530936 if (use_ll_privacy(hdev) &&
937 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
938 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530939 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530940
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530941 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
942 }
943
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530944 /* Use ext scanning if set ext scan param and ext scan enable is
945 * supported
946 */
947 if (use_ext_scan(hdev)) {
948 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
949 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
950 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530951 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
952 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530953
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530954 ext_param_cp = (void *)data;
955 phy_params = (void *)ext_param_cp->data;
956
957 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
958 ext_param_cp->own_addr_type = own_addr_type;
959 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530960
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530961 plen = sizeof(*ext_param_cp);
962
963 if (scan_1m(hdev) || scan_2m(hdev)) {
964 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
965
966 memset(phy_params, 0, sizeof(*phy_params));
967 phy_params->type = type;
968 phy_params->interval = cpu_to_le16(interval);
969 phy_params->window = cpu_to_le16(window);
970
971 plen += sizeof(*phy_params);
972 phy_params++;
973 }
974
975 if (scan_coded(hdev)) {
976 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
977
978 memset(phy_params, 0, sizeof(*phy_params));
979 phy_params->type = type;
980 phy_params->interval = cpu_to_le16(interval);
981 phy_params->window = cpu_to_le16(window);
982
983 plen += sizeof(*phy_params);
984 phy_params++;
985 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530986
987 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530988 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530989
990 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
991 ext_enable_cp.enable = LE_SCAN_ENABLE;
992 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
993
994 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
995 sizeof(ext_enable_cp), &ext_enable_cp);
996 } else {
997 struct hci_cp_le_set_scan_param param_cp;
998 struct hci_cp_le_set_scan_enable enable_cp;
999
1000 memset(&param_cp, 0, sizeof(param_cp));
1001 param_cp.type = type;
1002 param_cp.interval = cpu_to_le16(interval);
1003 param_cp.window = cpu_to_le16(window);
1004 param_cp.own_address_type = own_addr_type;
1005 param_cp.filter_policy = filter_policy;
1006 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1007 &param_cp);
1008
1009 memset(&enable_cp, 0, sizeof(enable_cp));
1010 enable_cp.enable = LE_SCAN_ENABLE;
1011 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1012 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1013 &enable_cp);
1014 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05301015}
1016
Alain Michaud9a9373f2020-07-31 01:05:34 +00001017/* Returns true if an le connection is in the scanning state */
1018static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1019{
1020 struct hci_conn_hash *h = &hdev->conn_hash;
1021 struct hci_conn *c;
1022
1023 rcu_read_lock();
1024
1025 list_for_each_entry_rcu(c, &h->list, list) {
1026 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1027 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1028 rcu_read_unlock();
1029 return true;
1030 }
1031 }
1032
1033 rcu_read_unlock();
1034
1035 return false;
1036}
1037
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301038/* Ensure to call hci_req_add_le_scan_disable() first to disable the
1039 * controller based address resolution to be able to reconfigure
1040 * resolving list.
1041 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05301042void hci_req_add_le_passive_scan(struct hci_request *req)
1043{
Johan Hedberg0857dd32014-12-19 13:40:20 +02001044 struct hci_dev *hdev = req->hdev;
1045 u8 own_addr_type;
1046 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -07001047 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301048 /* Background scanning should run with address resolution */
1049 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001050
1051 if (hdev->scanning_paused) {
1052 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1053 return;
1054 }
Johan Hedberg0857dd32014-12-19 13:40:20 +02001055
1056 /* Set require_privacy to false since no SCAN_REQ are send
1057 * during passive scanning. Not using an non-resolvable address
1058 * here is important so that peer devices using direct
1059 * advertising with our address will be correctly reported
1060 * by the controller.
1061 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001062 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1063 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001064 return;
1065
Howard Chung80af16a2020-11-26 12:22:25 +08001066 if (hdev->enable_advmon_interleave_scan &&
1067 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +08001068 return;
1069
1070 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001071 /* Adding or removing entries from the white list must
1072 * happen before enabling scanning. The controller does
1073 * not allow white list modification while scanning.
1074 */
1075 filter_policy = update_white_list(req);
1076
1077 /* When the controller is using random resolvable addresses and
1078 * with that having LE privacy enabled, then controllers with
1079 * Extended Scanner Filter Policies support can now enable support
1080 * for handling directed advertising.
1081 *
1082 * So instead of using filter polices 0x00 (no whitelist)
1083 * and 0x01 (whitelist enabled) use the new filter policies
1084 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1085 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001086 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001087 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1088 filter_policy |= 0x02;
1089
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001090 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +00001091 window = hdev->le_scan_window_suspend;
1092 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001093
1094 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Alain Michaud9a9373f2020-07-31 01:05:34 +00001095 } else if (hci_is_le_conn_scanning(hdev)) {
1096 window = hdev->le_scan_window_connect;
1097 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +08001098 } else if (hci_is_adv_monitoring(hdev)) {
1099 window = hdev->le_scan_window_adv_monitor;
1100 interval = hdev->le_scan_int_adv_monitor;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001101 } else {
1102 window = hdev->le_scan_window;
1103 interval = hdev->le_scan_interval;
1104 }
1105
1106 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1107 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301108 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001109}
1110
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001111static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301112{
1113 struct adv_info *adv_instance;
1114
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001115 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301116 if (instance == 0x00)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001117 return true;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301118
1119 adv_instance = hci_find_adv_instance(hdev, instance);
1120 if (!adv_instance)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001121 return false;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301122
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001123 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1124 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001125 return true;
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001126
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001127 return adv_instance->scan_rsp_len ? true : false;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301128}
1129
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001130static void hci_req_clear_event_filter(struct hci_request *req)
1131{
1132 struct hci_cp_set_event_filter f;
1133
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001134 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1135 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001136
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001137 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1138 memset(&f, 0, sizeof(f));
1139 f.flt_type = HCI_FLT_CLEAR_ALL;
1140 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1141 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001142}
1143
1144static void hci_req_set_event_filter(struct hci_request *req)
1145{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001146 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001147 struct hci_cp_set_event_filter f;
1148 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001149 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001150 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1151
1152 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1153 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001154
1155 /* Always clear event filter when starting */
1156 hci_req_clear_event_filter(req);
1157
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001158 list_for_each_entry(b, &hdev->whitelist, list) {
1159 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1160 b->current_flags))
1161 continue;
1162
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001163 memset(&f, 0, sizeof(f));
1164 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1165 f.flt_type = HCI_FLT_CONN_SETUP;
1166 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1167 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1168
1169 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1170 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001171 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001172 }
1173
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001174 if (scan && !scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001175 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001176 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1177 } else if (!scan && scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001178 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001179 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001181}
1182
Daniel Winkler53274472020-09-15 14:14:27 -07001183static void cancel_adv_timeout(struct hci_dev *hdev)
1184{
1185 if (hdev->adv_instance_timeout) {
1186 hdev->adv_instance_timeout = 0;
1187 cancel_delayed_work(&hdev->adv_instance_expire);
1188 }
1189}
1190
1191/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001192void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001193{
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001194 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -07001195
1196 /* Call to disable any advertisements active on the controller.
1197 * This will succeed even if no advertisements are configured.
1198 */
1199 __hci_req_disable_advertising(req);
1200
1201 /* If we are using software rotation, pause the loop */
1202 if (!ext_adv_capable(req->hdev))
1203 cancel_adv_timeout(req->hdev);
1204}
1205
1206/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001207static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001208{
1209 struct adv_info *adv;
1210
1211 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1212
1213 if (ext_adv_capable(req->hdev)) {
1214 /* Call for each tracked instance to be re-enabled */
1215 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1216 __hci_req_enable_ext_advertising(req,
1217 adv->instance);
1218 }
1219
1220 } else {
1221 /* Schedule for most recent instance to be restarted and begin
1222 * the software rotation loop
1223 */
1224 __hci_req_schedule_adv_instance(req,
1225 req->hdev->cur_adv_instance,
1226 true);
1227 }
1228}
1229
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001230/* This function requires the caller holds hdev->lock */
1231int hci_req_resume_adv_instances(struct hci_dev *hdev)
1232{
1233 struct hci_request req;
1234
1235 hci_req_init(&req, hdev);
1236 __hci_req_resume_adv_instances(&req);
1237
1238 return hci_req_run(&req, NULL);
1239}
1240
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001241static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1242{
1243 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1244 status);
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001245 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1246 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1247 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1248 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001249 wake_up(&hdev->suspend_wait_q);
1250 }
Howard Chungbf6a4e32021-01-22 16:36:17 +08001251
1252 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1253 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1254 wake_up(&hdev->suspend_wait_q);
1255 }
1256}
1257
1258static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1259 bool enable)
1260{
1261 struct hci_dev *hdev = req->hdev;
1262
1263 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1264 case HCI_ADV_MONITOR_EXT_MSFT:
1265 msft_req_add_set_filter_enable(req, enable);
1266 break;
1267 default:
1268 return;
1269 }
1270
1271 /* No need to block when enabling since it's on resume path */
1272 if (hdev->suspended && !enable)
1273 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001274}
1275
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001276/* Call with hci_dev_lock */
1277void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1278{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001279 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001280 struct hci_conn *conn;
1281 struct hci_request req;
1282 u8 page_scan;
1283 int disconnect_counter;
1284
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001285 if (next == hdev->suspend_state) {
1286 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1287 goto done;
1288 }
1289
1290 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001291 hci_req_init(&req, hdev);
1292
1293 if (next == BT_SUSPEND_DISCONNECT) {
1294 /* Mark device as suspended */
1295 hdev->suspended = true;
1296
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001297 /* Pause discovery if not already stopped */
1298 old_state = hdev->discovery.state;
1299 if (old_state != DISCOVERY_STOPPED) {
1300 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1301 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1302 queue_work(hdev->req_workqueue, &hdev->discov_update);
1303 }
1304
1305 hdev->discovery_paused = true;
1306 hdev->discovery_old_state = old_state;
1307
Daniel Winkler53274472020-09-15 14:14:27 -07001308 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001309 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1310 if (old_state) {
1311 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1312 cancel_delayed_work(&hdev->discov_off);
1313 queue_delayed_work(hdev->req_workqueue,
1314 &hdev->discov_off, 0);
1315 }
1316
Daniel Winkler53274472020-09-15 14:14:27 -07001317 /* Pause other advertisements */
1318 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001319 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001320
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001321 hdev->advertising_paused = true;
1322 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001323
1324 /* Disable page scan if enabled */
1325 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1326 page_scan = SCAN_DISABLED;
1327 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1328 &page_scan);
1329 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1330 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001331
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001332 /* Disable LE passive scan if enabled */
Howard Chung36afe872020-11-26 12:22:22 +08001333 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1334 cancel_interleave_scan(hdev);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301335 hci_req_add_le_scan_disable(&req, false);
Howard Chung36afe872020-11-26 12:22:22 +08001336 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001337
Howard Chungbf6a4e32021-01-22 16:36:17 +08001338 /* Disable advertisement filters */
1339 hci_req_add_set_adv_filter_enable(&req, false);
1340
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001341 /* Prevent disconnects from causing scanning to be re-enabled */
1342 hdev->scanning_paused = true;
1343
1344 /* Run commands before disconnecting */
1345 hci_req_run(&req, suspend_req_complete);
1346
1347 disconnect_counter = 0;
1348 /* Soft disconnect everything (power off) */
1349 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1350 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1351 disconnect_counter++;
1352 }
1353
1354 if (disconnect_counter > 0) {
1355 bt_dev_dbg(hdev,
1356 "Had %d disconnects. Will wait on them",
1357 disconnect_counter);
1358 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1359 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001360 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001361 /* Unpause to take care of updating scanning params */
1362 hdev->scanning_paused = false;
1363 /* Enable event filter for paired devices */
1364 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001365 /* Enable passive scan at lower duty cycle */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001366 __hci_update_background_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001367 /* Pause scan changes again. */
1368 hdev->scanning_paused = true;
1369 hci_req_run(&req, suspend_req_complete);
1370 } else {
1371 hdev->suspended = false;
1372 hdev->scanning_paused = false;
1373
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001374 /* Clear any event filters and restore scan state */
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001375 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001376 __hci_req_update_scan(&req);
1377
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001378 /* Reset passive/background scanning to normal */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001379 __hci_update_background_scan(&req);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001380 /* Enable all of the advertisement filters */
1381 hci_req_add_set_adv_filter_enable(&req, true);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001382
Daniel Winkler53274472020-09-15 14:14:27 -07001383 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001384 hdev->advertising_paused = false;
1385 if (hdev->advertising_old_state) {
1386 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1387 hdev->suspend_tasks);
1388 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1389 queue_work(hdev->req_workqueue,
1390 &hdev->discoverable_update);
1391 hdev->advertising_old_state = 0;
1392 }
1393
Daniel Winkler53274472020-09-15 14:14:27 -07001394 /* Resume other advertisements */
1395 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001396 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001397
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001398 /* Unpause discovery */
1399 hdev->discovery_paused = false;
1400 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1401 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1402 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1403 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1404 queue_work(hdev->req_workqueue, &hdev->discov_update);
1405 }
1406
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001407 hci_req_run(&req, suspend_req_complete);
1408 }
1409
1410 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001411
1412done:
1413 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1414 wake_up(&hdev->suspend_wait_q);
1415}
1416
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001417static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +02001418{
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001419 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001420}
1421
1422void __hci_req_disable_advertising(struct hci_request *req)
1423{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301424 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001425 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001426
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301427 } else {
1428 u8 enable = 0x00;
1429
1430 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1431 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001432}
1433
1434static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1435{
1436 u32 flags;
1437 struct adv_info *adv_instance;
1438
1439 if (instance == 0x00) {
1440 /* Instance 0 always manages the "Tx Power" and "Flags"
1441 * fields
1442 */
1443 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1444
1445 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1446 * corresponds to the "connectable" instance flag.
1447 */
1448 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1449 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1450
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001451 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1452 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1453 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001454 flags |= MGMT_ADV_FLAG_DISCOV;
1455
Johan Hedbergf2252572015-11-18 12:49:20 +02001456 return flags;
1457 }
1458
1459 adv_instance = hci_find_adv_instance(hdev, instance);
1460
1461 /* Return 0 when we got an invalid instance identifier. */
1462 if (!adv_instance)
1463 return 0;
1464
1465 return adv_instance->flags;
1466}
1467
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001468static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1469{
1470 /* If privacy is not enabled don't use RPA */
1471 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1472 return false;
1473
1474 /* If basic privacy mode is enabled use RPA */
1475 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1476 return true;
1477
1478 /* If limited privacy mode is enabled don't use RPA if we're
1479 * both discoverable and bondable.
1480 */
1481 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1482 hci_dev_test_flag(hdev, HCI_BONDABLE))
1483 return false;
1484
1485 /* We're neither bondable nor discoverable in the limited
1486 * privacy mode, therefore use RPA.
1487 */
1488 return true;
1489}
1490
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001491static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1492{
1493 /* If there is no connection we are OK to advertise. */
1494 if (hci_conn_num(hdev, LE_LINK) == 0)
1495 return true;
1496
1497 /* Check le_states if there is any connection in slave role. */
1498 if (hdev->conn_hash.le_num_slave > 0) {
1499 /* Slave connection state and non connectable mode bit 20. */
1500 if (!connectable && !(hdev->le_states[2] & 0x10))
1501 return false;
1502
1503 /* Slave connection state and connectable mode bit 38
1504 * and scannable bit 21.
1505 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001506 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1507 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001508 return false;
1509 }
1510
1511 /* Check le_states if there is any connection in master role. */
1512 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1513 /* Master connection state and non connectable mode bit 18. */
1514 if (!connectable && !(hdev->le_states[2] & 0x02))
1515 return false;
1516
1517 /* Master connection state and connectable mode bit 35 and
1518 * scannable 19.
1519 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001520 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001521 !(hdev->le_states[2] & 0x08)))
1522 return false;
1523 }
1524
1525 return true;
1526}
1527
Johan Hedbergf2252572015-11-18 12:49:20 +02001528void __hci_req_enable_advertising(struct hci_request *req)
1529{
1530 struct hci_dev *hdev = req->hdev;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001531 struct adv_info *adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001532 struct hci_cp_le_set_adv_param cp;
1533 u8 own_addr_type, enable = 0x01;
1534 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301535 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001536 u32 flags;
1537
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001538 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001539 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001540
1541 /* If the "connectable" instance flag was not set, then choose between
1542 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1543 */
1544 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1545 mgmt_get_connectable(hdev);
1546
1547 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001548 return;
1549
1550 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1551 __hci_req_disable_advertising(req);
1552
1553 /* Clear the HCI_LE_ADV bit temporarily so that the
1554 * hci_update_random_address knows that it's safe to go ahead
1555 * and write a new random address. The flag will be set back on
1556 * as soon as the SET_ADV_ENABLE HCI command completes.
1557 */
1558 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1559
Johan Hedbergf2252572015-11-18 12:49:20 +02001560 /* Set require_privacy to true only when non-connectable
1561 * advertising is used. In that case it is fine to use a
1562 * non-resolvable private address.
1563 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001564 if (hci_update_random_address(req, !connectable,
1565 adv_use_rpa(hdev, flags),
1566 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001567 return;
1568
1569 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001570
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001571 if (adv_instance) {
1572 adv_min_interval = adv_instance->min_interval;
1573 adv_max_interval = adv_instance->max_interval;
1574 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301575 adv_min_interval = hdev->le_adv_min_interval;
1576 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001577 }
1578
1579 if (connectable) {
1580 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301581 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001582 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301583 cp.type = LE_ADV_SCAN_IND;
1584 else
1585 cp.type = LE_ADV_NONCONN_IND;
1586
1587 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1588 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1589 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1590 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301591 }
1592 }
1593
1594 cp.min_interval = cpu_to_le16(adv_min_interval);
1595 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001596 cp.own_address_type = own_addr_type;
1597 cp.channel_map = hdev->le_adv_channel_map;
1598
1599 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1600
1601 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1602}
1603
Michał Narajowskif61851f2016-10-19 10:20:27 +02001604u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001605{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001606 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001607 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001608
Michał Narajowskif61851f2016-10-19 10:20:27 +02001609 /* no space left for name (+ NULL + type + len) */
1610 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1611 return ad_len;
1612
1613 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001614 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001615 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001616 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001617 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001618
Michał Narajowskif61851f2016-10-19 10:20:27 +02001619 /* use short name if present */
1620 short_len = strlen(hdev->short_name);
1621 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001622 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001623 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001624
Michał Narajowskif61851f2016-10-19 10:20:27 +02001625 /* use shortened full name if present, we already know that name
1626 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1627 */
1628 if (complete_len) {
1629 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1630
1631 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1632 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1633
1634 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1635 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001636 }
1637
1638 return ad_len;
1639}
1640
Michał Narajowski1b422062016-10-05 12:28:27 +02001641static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1642{
1643 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1644}
1645
Michał Narajowski7c295c42016-09-18 12:50:02 +02001646static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1647{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001648 u8 scan_rsp_len = 0;
1649
Meng Yu149b3f12021-04-01 14:50:39 +08001650 if (hdev->appearance)
Michał Narajowski1b422062016-10-05 12:28:27 +02001651 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001652
Michał Narajowski1b422062016-10-05 12:28:27 +02001653 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001654}
1655
Johan Hedbergf2252572015-11-18 12:49:20 +02001656static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1657 u8 *ptr)
1658{
1659 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001660 u32 instance_flags;
1661 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001662
1663 adv_instance = hci_find_adv_instance(hdev, instance);
1664 if (!adv_instance)
1665 return 0;
1666
Michał Narajowski7c295c42016-09-18 12:50:02 +02001667 instance_flags = adv_instance->flags;
1668
Meng Yu149b3f12021-04-01 14:50:39 +08001669 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
Michał Narajowski1b422062016-10-05 12:28:27 +02001670 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001671
Michał Narajowski1b422062016-10-05 12:28:27 +02001672 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001673 adv_instance->scan_rsp_len);
1674
Michał Narajowski7c295c42016-09-18 12:50:02 +02001675 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001676
1677 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1678 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1679
1680 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001681}
1682
Johan Hedbergcab054a2015-11-30 11:21:45 +02001683void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001684{
1685 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001686 u8 len;
1687
1688 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1689 return;
1690
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301691 if (ext_adv_capable(hdev)) {
1692 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001693
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301694 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001695
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001696 if (instance)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301697 len = create_instance_scan_rsp_data(hdev, instance,
1698 cp.data);
1699 else
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001700 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001701
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301702 if (hdev->scan_rsp_data_len == len &&
1703 !memcmp(cp.data, hdev->scan_rsp_data, len))
1704 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001705
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301706 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1707 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001708
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001709 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301710 cp.length = len;
1711 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1712 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1713
1714 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1715 &cp);
1716 } else {
1717 struct hci_cp_le_set_scan_rsp_data cp;
1718
1719 memset(&cp, 0, sizeof(cp));
1720
1721 if (instance)
1722 len = create_instance_scan_rsp_data(hdev, instance,
1723 cp.data);
1724 else
1725 len = create_default_scan_rsp_data(hdev, cp.data);
1726
1727 if (hdev->scan_rsp_data_len == len &&
1728 !memcmp(cp.data, hdev->scan_rsp_data, len))
1729 return;
1730
1731 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1732 hdev->scan_rsp_data_len = len;
1733
1734 cp.length = len;
1735
1736 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1737 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001738}
1739
Johan Hedbergf2252572015-11-18 12:49:20 +02001740static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1741{
1742 struct adv_info *adv_instance = NULL;
1743 u8 ad_len = 0, flags = 0;
1744 u32 instance_flags;
1745
1746 /* Return 0 when the current instance identifier is invalid. */
1747 if (instance) {
1748 adv_instance = hci_find_adv_instance(hdev, instance);
1749 if (!adv_instance)
1750 return 0;
1751 }
1752
1753 instance_flags = get_adv_instance_flags(hdev, instance);
1754
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001755 /* If instance already has the flags set skip adding it once
1756 * again.
1757 */
1758 if (adv_instance && eir_get_data(adv_instance->adv_data,
1759 adv_instance->adv_data_len, EIR_FLAGS,
1760 NULL))
1761 goto skip_flags;
1762
Johan Hedbergf2252572015-11-18 12:49:20 +02001763 /* The Add Advertising command allows userspace to set both the general
1764 * and limited discoverable flags.
1765 */
1766 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1767 flags |= LE_AD_GENERAL;
1768
1769 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1770 flags |= LE_AD_LIMITED;
1771
Johan Hedbergf18ba582016-04-06 13:09:05 +03001772 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1773 flags |= LE_AD_NO_BREDR;
1774
Johan Hedbergf2252572015-11-18 12:49:20 +02001775 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1776 /* If a discovery flag wasn't provided, simply use the global
1777 * settings.
1778 */
1779 if (!flags)
1780 flags |= mgmt_get_adv_discov_flags(hdev);
1781
Johan Hedbergf2252572015-11-18 12:49:20 +02001782 /* If flags would still be empty, then there is no need to
1783 * include the "Flags" AD field".
1784 */
1785 if (flags) {
1786 ptr[0] = 0x02;
1787 ptr[1] = EIR_FLAGS;
1788 ptr[2] = flags;
1789
1790 ad_len += 3;
1791 ptr += 3;
1792 }
1793 }
1794
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001795skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001796 if (adv_instance) {
1797 memcpy(ptr, adv_instance->adv_data,
1798 adv_instance->adv_data_len);
1799 ad_len += adv_instance->adv_data_len;
1800 ptr += adv_instance->adv_data_len;
1801 }
1802
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301803 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1804 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001805
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301806 if (ext_adv_capable(hdev)) {
1807 if (adv_instance)
1808 adv_tx_power = adv_instance->tx_power;
1809 else
1810 adv_tx_power = hdev->adv_tx_power;
1811 } else {
1812 adv_tx_power = hdev->adv_tx_power;
1813 }
1814
1815 /* Provide Tx Power only if we can provide a valid value for it */
1816 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1817 ptr[0] = 0x02;
1818 ptr[1] = EIR_TX_POWER;
1819 ptr[2] = (u8)adv_tx_power;
1820
1821 ad_len += 3;
1822 ptr += 3;
1823 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001824 }
1825
1826 return ad_len;
1827}
1828
Johan Hedbergcab054a2015-11-30 11:21:45 +02001829void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001830{
1831 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001832 u8 len;
1833
1834 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1835 return;
1836
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301837 if (ext_adv_capable(hdev)) {
1838 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001839
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301840 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001841
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301842 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001843
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301844 /* There's nothing to do if the data hasn't changed */
1845 if (hdev->adv_data_len == len &&
1846 memcmp(cp.data, hdev->adv_data, len) == 0)
1847 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001848
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301849 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1850 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001851
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301852 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001853 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301854 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1855 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1856
1857 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1858 } else {
1859 struct hci_cp_le_set_adv_data cp;
1860
1861 memset(&cp, 0, sizeof(cp));
1862
1863 len = create_instance_adv_data(hdev, instance, cp.data);
1864
1865 /* There's nothing to do if the data hasn't changed */
1866 if (hdev->adv_data_len == len &&
1867 memcmp(cp.data, hdev->adv_data, len) == 0)
1868 return;
1869
1870 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1871 hdev->adv_data_len = len;
1872
1873 cp.length = len;
1874
1875 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1876 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001877}
1878
Johan Hedbergcab054a2015-11-30 11:21:45 +02001879int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001880{
1881 struct hci_request req;
1882
1883 hci_req_init(&req, hdev);
1884 __hci_req_update_adv_data(&req, instance);
1885
1886 return hci_req_run(&req, NULL);
1887}
1888
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301889static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1890 u16 opcode)
1891{
1892 BT_DBG("%s status %u", hdev->name, status);
1893}
1894
1895void hci_req_disable_address_resolution(struct hci_dev *hdev)
1896{
1897 struct hci_request req;
1898 __u8 enable = 0x00;
1899
1900 if (!use_ll_privacy(hdev) &&
1901 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1902 return;
1903
1904 hci_req_init(&req, hdev);
1905
1906 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1907
1908 hci_req_run(&req, enable_addr_resolution_complete);
1909}
1910
Johan Hedbergf2252572015-11-18 12:49:20 +02001911static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1912{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001913 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001914}
1915
1916void hci_req_reenable_advertising(struct hci_dev *hdev)
1917{
1918 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001919
1920 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001921 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001922 return;
1923
Johan Hedbergf2252572015-11-18 12:49:20 +02001924 hci_req_init(&req, hdev);
1925
Johan Hedbergcab054a2015-11-30 11:21:45 +02001926 if (hdev->cur_adv_instance) {
1927 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1928 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001929 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301930 if (ext_adv_capable(hdev)) {
1931 __hci_req_start_ext_adv(&req, 0x00);
1932 } else {
1933 __hci_req_update_adv_data(&req, 0x00);
1934 __hci_req_update_scan_rsp_data(&req, 0x00);
1935 __hci_req_enable_advertising(&req);
1936 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001937 }
1938
1939 hci_req_run(&req, adv_enable_complete);
1940}
1941
1942static void adv_timeout_expire(struct work_struct *work)
1943{
1944 struct hci_dev *hdev = container_of(work, struct hci_dev,
1945 adv_instance_expire.work);
1946
1947 struct hci_request req;
1948 u8 instance;
1949
Howard Chung22fbcfc2020-11-11 15:02:19 +08001950 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001951
1952 hci_dev_lock(hdev);
1953
1954 hdev->adv_instance_timeout = 0;
1955
Johan Hedbergcab054a2015-11-30 11:21:45 +02001956 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001957 if (instance == 0x00)
1958 goto unlock;
1959
1960 hci_req_init(&req, hdev);
1961
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001962 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001963
1964 if (list_empty(&hdev->adv_instances))
1965 __hci_req_disable_advertising(&req);
1966
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001967 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001968
1969unlock:
1970 hci_dev_unlock(hdev);
1971}
1972
Howard Chungc4f1f402020-11-26 12:22:21 +08001973static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1974 unsigned long opt)
1975{
1976 struct hci_dev *hdev = req->hdev;
1977 int ret = 0;
1978
1979 hci_dev_lock(hdev);
1980
1981 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1982 hci_req_add_le_scan_disable(req, false);
1983 hci_req_add_le_passive_scan(req);
1984
1985 switch (hdev->interleave_scan_state) {
1986 case INTERLEAVE_SCAN_ALLOWLIST:
1987 bt_dev_dbg(hdev, "next state: allowlist");
1988 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1989 break;
1990 case INTERLEAVE_SCAN_NO_FILTER:
1991 bt_dev_dbg(hdev, "next state: no filter");
1992 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1993 break;
1994 case INTERLEAVE_SCAN_NONE:
1995 BT_ERR("unexpected error");
1996 ret = -1;
1997 }
1998
1999 hci_dev_unlock(hdev);
2000
2001 return ret;
2002}
2003
2004static void interleave_scan_work(struct work_struct *work)
2005{
2006 struct hci_dev *hdev = container_of(work, struct hci_dev,
2007 interleave_scan.work);
2008 u8 status;
2009 unsigned long timeout;
2010
2011 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2012 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2013 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2014 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2015 } else {
2016 bt_dev_err(hdev, "unexpected error");
2017 return;
2018 }
2019
2020 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2021 HCI_CMD_TIMEOUT, &status);
2022
2023 /* Don't continue interleaving if it was canceled */
2024 if (is_interleave_scanning(hdev))
2025 queue_delayed_work(hdev->req_workqueue,
2026 &hdev->interleave_scan, timeout);
2027}
2028
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302029int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2030 bool use_rpa, struct adv_info *adv_instance,
2031 u8 *own_addr_type, bdaddr_t *rand_addr)
2032{
2033 int err;
2034
2035 bacpy(rand_addr, BDADDR_ANY);
2036
2037 /* If privacy is enabled use a resolvable private address. If
2038 * current RPA has expired then generate a new one.
2039 */
2040 if (use_rpa) {
2041 int to;
2042
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05302043 /* If Controller supports LL Privacy use own address type is
2044 * 0x03
2045 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05302046 if (use_ll_privacy(hdev) &&
2047 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05302048 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2049 else
2050 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302051
2052 if (adv_instance) {
2053 if (!adv_instance->rpa_expired &&
2054 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2055 return 0;
2056
2057 adv_instance->rpa_expired = false;
2058 } else {
2059 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2060 !bacmp(&hdev->random_addr, &hdev->rpa))
2061 return 0;
2062 }
2063
2064 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2065 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01002066 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302067 return err;
2068 }
2069
2070 bacpy(rand_addr, &hdev->rpa);
2071
2072 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2073 if (adv_instance)
2074 queue_delayed_work(hdev->workqueue,
2075 &adv_instance->rpa_expired_cb, to);
2076 else
2077 queue_delayed_work(hdev->workqueue,
2078 &hdev->rpa_expired, to);
2079
2080 return 0;
2081 }
2082
2083 /* In case of required privacy without resolvable private address,
2084 * use an non-resolvable private address. This is useful for
2085 * non-connectable advertising.
2086 */
2087 if (require_privacy) {
2088 bdaddr_t nrpa;
2089
2090 while (true) {
2091 /* The non-resolvable private address is generated
2092 * from random six bytes with the two most significant
2093 * bits cleared.
2094 */
2095 get_random_bytes(&nrpa, 6);
2096 nrpa.b[5] &= 0x3f;
2097
2098 /* The non-resolvable private address shall not be
2099 * equal to the public address.
2100 */
2101 if (bacmp(&hdev->bdaddr, &nrpa))
2102 break;
2103 }
2104
2105 *own_addr_type = ADDR_LE_DEV_RANDOM;
2106 bacpy(rand_addr, &nrpa);
2107
2108 return 0;
2109 }
2110
2111 /* No privacy so use a public address. */
2112 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2113
2114 return 0;
2115}
2116
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302117void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2118{
2119 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2120}
2121
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302122int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302123{
2124 struct hci_cp_le_set_ext_adv_params cp;
2125 struct hci_dev *hdev = req->hdev;
2126 bool connectable;
2127 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302128 bdaddr_t random_addr;
2129 u8 own_addr_type;
2130 int err;
2131 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302132 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302133
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302134 if (instance > 0) {
2135 adv_instance = hci_find_adv_instance(hdev, instance);
2136 if (!adv_instance)
2137 return -EINVAL;
2138 } else {
2139 adv_instance = NULL;
2140 }
2141
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302142 flags = get_adv_instance_flags(hdev, instance);
2143
2144 /* If the "connectable" instance flag was not set, then choose between
2145 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2146 */
2147 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2148 mgmt_get_connectable(hdev);
2149
Colin Ian King75edd1f2018-11-09 13:27:36 +00002150 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302151 return -EPERM;
2152
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302153 /* Set require_privacy to true only when non-connectable
2154 * advertising is used. In that case it is fine to use a
2155 * non-resolvable private address.
2156 */
2157 err = hci_get_random_address(hdev, !connectable,
2158 adv_use_rpa(hdev, flags), adv_instance,
2159 &own_addr_type, &random_addr);
2160 if (err < 0)
2161 return err;
2162
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302163 memset(&cp, 0, sizeof(cp));
2164
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08002165 if (adv_instance) {
2166 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2167 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2168 cp.tx_power = adv_instance->tx_power;
2169 } else {
2170 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2171 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2172 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2173 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302174
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302175 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2176
2177 if (connectable) {
2178 if (secondary_adv)
2179 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2180 else
2181 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Daniel Winklerff02db12021-03-03 11:15:23 -08002182 } else if (adv_instance_is_scannable(hdev, instance) ||
2183 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302184 if (secondary_adv)
2185 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2186 else
2187 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2188 } else {
2189 if (secondary_adv)
2190 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2191 else
2192 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2193 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302194
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302195 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302196 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002197 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302198
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302199 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2200 cp.primary_phy = HCI_ADV_PHY_1M;
2201 cp.secondary_phy = HCI_ADV_PHY_2M;
2202 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2203 cp.primary_phy = HCI_ADV_PHY_CODED;
2204 cp.secondary_phy = HCI_ADV_PHY_CODED;
2205 } else {
2206 /* In all other cases use 1M */
2207 cp.primary_phy = HCI_ADV_PHY_1M;
2208 cp.secondary_phy = HCI_ADV_PHY_1M;
2209 }
2210
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302211 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2212
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302213 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2214 bacmp(&random_addr, BDADDR_ANY)) {
2215 struct hci_cp_le_set_adv_set_rand_addr cp;
2216
2217 /* Check if random address need to be updated */
2218 if (adv_instance) {
2219 if (!bacmp(&random_addr, &adv_instance->random_addr))
2220 return 0;
2221 } else {
2222 if (!bacmp(&random_addr, &hdev->random_addr))
2223 return 0;
2224 }
2225
2226 memset(&cp, 0, sizeof(cp));
2227
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07002228 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302229 bacpy(&cp.bdaddr, &random_addr);
2230
2231 hci_req_add(req,
2232 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2233 sizeof(cp), &cp);
2234 }
2235
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302236 return 0;
2237}
2238
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002239int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302240{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002241 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302242 struct hci_cp_le_set_ext_adv_enable *cp;
2243 struct hci_cp_ext_adv_set *adv_set;
2244 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002245 struct adv_info *adv_instance;
2246
2247 if (instance > 0) {
2248 adv_instance = hci_find_adv_instance(hdev, instance);
2249 if (!adv_instance)
2250 return -EINVAL;
2251 } else {
2252 adv_instance = NULL;
2253 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302254
2255 cp = (void *) data;
2256 adv_set = (void *) cp->data;
2257
2258 memset(cp, 0, sizeof(*cp));
2259
2260 cp->enable = 0x01;
2261 cp->num_of_sets = 0x01;
2262
2263 memset(adv_set, 0, sizeof(*adv_set));
2264
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002265 adv_set->handle = instance;
2266
2267 /* Set duration per instance since controller is responsible for
2268 * scheduling it.
2269 */
2270 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03002271 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002272
2273 /* Time = N * 10 ms */
2274 adv_set->duration = cpu_to_le16(duration / 10);
2275 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302276
2277 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2278 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2279 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002280
2281 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302282}
2283
Daniel Winkler37adf702020-07-14 14:16:00 -07002284int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2285{
2286 struct hci_dev *hdev = req->hdev;
2287 struct hci_cp_le_set_ext_adv_enable *cp;
2288 struct hci_cp_ext_adv_set *adv_set;
2289 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2290 u8 req_size;
2291
2292 /* If request specifies an instance that doesn't exist, fail */
2293 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2294 return -EINVAL;
2295
2296 memset(data, 0, sizeof(data));
2297
2298 cp = (void *)data;
2299 adv_set = (void *)cp->data;
2300
2301 /* Instance 0x00 indicates all advertising instances will be disabled */
2302 cp->num_of_sets = !!instance;
2303 cp->enable = 0x00;
2304
2305 adv_set->handle = instance;
2306
2307 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2308 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2309
2310 return 0;
2311}
2312
2313int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2314{
2315 struct hci_dev *hdev = req->hdev;
2316
2317 /* If request specifies an instance that doesn't exist, fail */
2318 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2319 return -EINVAL;
2320
2321 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2322
2323 return 0;
2324}
2325
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302326int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2327{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302328 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002329 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302330 int err;
2331
Daniel Winkler37adf702020-07-14 14:16:00 -07002332 /* If instance isn't pending, the chip knows about it, and it's safe to
2333 * disable
2334 */
2335 if (adv_instance && !adv_instance->pending)
2336 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302337
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302338 err = __hci_req_setup_ext_adv_instance(req, instance);
2339 if (err < 0)
2340 return err;
2341
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302342 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002343 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302344
2345 return 0;
2346}
2347
Johan Hedbergf2252572015-11-18 12:49:20 +02002348int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2349 bool force)
2350{
2351 struct hci_dev *hdev = req->hdev;
2352 struct adv_info *adv_instance = NULL;
2353 u16 timeout;
2354
2355 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002356 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002357 return -EPERM;
2358
2359 if (hdev->adv_instance_timeout)
2360 return -EBUSY;
2361
2362 adv_instance = hci_find_adv_instance(hdev, instance);
2363 if (!adv_instance)
2364 return -ENOENT;
2365
2366 /* A zero timeout means unlimited advertising. As long as there is
2367 * only one instance, duration should be ignored. We still set a timeout
2368 * in case further instances are being added later on.
2369 *
2370 * If the remaining lifetime of the instance is more than the duration
2371 * then the timeout corresponds to the duration, otherwise it will be
2372 * reduced to the remaining instance lifetime.
2373 */
2374 if (adv_instance->timeout == 0 ||
2375 adv_instance->duration <= adv_instance->remaining_time)
2376 timeout = adv_instance->duration;
2377 else
2378 timeout = adv_instance->remaining_time;
2379
2380 /* The remaining time is being reduced unless the instance is being
2381 * advertised without time limit.
2382 */
2383 if (adv_instance->timeout)
2384 adv_instance->remaining_time =
2385 adv_instance->remaining_time - timeout;
2386
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002387 /* Only use work for scheduling instances with legacy advertising */
2388 if (!ext_adv_capable(hdev)) {
2389 hdev->adv_instance_timeout = timeout;
2390 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002391 &hdev->adv_instance_expire,
2392 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002393 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002394
2395 /* If we're just re-scheduling the same instance again then do not
2396 * execute any HCI commands. This happens when a single instance is
2397 * being advertised.
2398 */
2399 if (!force && hdev->cur_adv_instance == instance &&
2400 hci_dev_test_flag(hdev, HCI_LE_ADV))
2401 return 0;
2402
2403 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302404 if (ext_adv_capable(hdev)) {
2405 __hci_req_start_ext_adv(req, instance);
2406 } else {
2407 __hci_req_update_adv_data(req, instance);
2408 __hci_req_update_scan_rsp_data(req, instance);
2409 __hci_req_enable_advertising(req);
2410 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002411
2412 return 0;
2413}
2414
Johan Hedbergf2252572015-11-18 12:49:20 +02002415/* For a single instance:
2416 * - force == true: The instance will be removed even when its remaining
2417 * lifetime is not zero.
2418 * - force == false: the instance will be deactivated but kept stored unless
2419 * the remaining lifetime is zero.
2420 *
2421 * For instance == 0x00:
2422 * - force == true: All instances will be removed regardless of their timeout
2423 * setting.
2424 * - force == false: Only instances that have a timeout will be removed.
2425 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002426void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2427 struct hci_request *req, u8 instance,
2428 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002429{
2430 struct adv_info *adv_instance, *n, *next_instance = NULL;
2431 int err;
2432 u8 rem_inst;
2433
2434 /* Cancel any timeout concerning the removed instance(s). */
2435 if (!instance || hdev->cur_adv_instance == instance)
2436 cancel_adv_timeout(hdev);
2437
2438 /* Get the next instance to advertise BEFORE we remove
2439 * the current one. This can be the same instance again
2440 * if there is only one instance.
2441 */
2442 if (instance && hdev->cur_adv_instance == instance)
2443 next_instance = hci_get_next_instance(hdev, instance);
2444
2445 if (instance == 0x00) {
2446 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2447 list) {
2448 if (!(force || adv_instance->timeout))
2449 continue;
2450
2451 rem_inst = adv_instance->instance;
2452 err = hci_remove_adv_instance(hdev, rem_inst);
2453 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002454 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002455 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002456 } else {
2457 adv_instance = hci_find_adv_instance(hdev, instance);
2458
2459 if (force || (adv_instance && adv_instance->timeout &&
2460 !adv_instance->remaining_time)) {
2461 /* Don't advertise a removed instance. */
2462 if (next_instance &&
2463 next_instance->instance == instance)
2464 next_instance = NULL;
2465
2466 err = hci_remove_adv_instance(hdev, instance);
2467 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002468 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002469 }
2470 }
2471
Johan Hedbergf2252572015-11-18 12:49:20 +02002472 if (!req || !hdev_is_powered(hdev) ||
2473 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2474 return;
2475
Daniel Winkler37adf702020-07-14 14:16:00 -07002476 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002477 __hci_req_schedule_adv_instance(req, next_instance->instance,
2478 false);
2479}
2480
Johan Hedberg0857dd32014-12-19 13:40:20 +02002481static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2482{
2483 struct hci_dev *hdev = req->hdev;
2484
2485 /* If we're advertising or initiating an LE connection we can't
2486 * go ahead and change the random address at this time. This is
2487 * because the eventual initiator address used for the
2488 * subsequently created connection will be undefined (some
2489 * controllers use the new address and others the one we had
2490 * when the operation started).
2491 *
2492 * In this kind of scenario skip the update and let the random
2493 * address be updated at the next cycle.
2494 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002495 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002496 hci_lookup_le_connect(hdev)) {
Howard Chung22fbcfc2020-11-11 15:02:19 +08002497 bt_dev_dbg(hdev, "Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002498 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002499 return;
2500 }
2501
2502 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2503}
2504
2505int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002506 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002507{
2508 struct hci_dev *hdev = req->hdev;
2509 int err;
2510
2511 /* If privacy is enabled use a resolvable private address. If
2512 * current RPA has expired or there is something else than
2513 * the current RPA in use, then generate a new one.
2514 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002515 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002516 int to;
2517
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302518 /* If Controller supports LL Privacy use own address type is
2519 * 0x03
2520 */
Sathish Narasimmanabb638b2021-04-05 20:00:23 +05302521 if (use_ll_privacy(hdev) &&
2522 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302523 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2524 else
2525 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002526
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002527 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002528 !bacmp(&hdev->random_addr, &hdev->rpa))
2529 return 0;
2530
2531 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2532 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002533 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002534 return err;
2535 }
2536
2537 set_random_addr(req, &hdev->rpa);
2538
2539 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2540 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2541
2542 return 0;
2543 }
2544
2545 /* In case of required privacy without resolvable private address,
2546 * use an non-resolvable private address. This is useful for active
2547 * scanning and non-connectable advertising.
2548 */
2549 if (require_privacy) {
2550 bdaddr_t nrpa;
2551
2552 while (true) {
2553 /* The non-resolvable private address is generated
2554 * from random six bytes with the two most significant
2555 * bits cleared.
2556 */
2557 get_random_bytes(&nrpa, 6);
2558 nrpa.b[5] &= 0x3f;
2559
2560 /* The non-resolvable private address shall not be
2561 * equal to the public address.
2562 */
2563 if (bacmp(&hdev->bdaddr, &nrpa))
2564 break;
2565 }
2566
2567 *own_addr_type = ADDR_LE_DEV_RANDOM;
2568 set_random_addr(req, &nrpa);
2569 return 0;
2570 }
2571
2572 /* If forcing static address is in use or there is no public
2573 * address use the static address as random address (but skip
2574 * the HCI command if the current random address is already the
2575 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002576 *
2577 * In case BR/EDR has been disabled on a dual-mode controller
2578 * and a static address has been configured, then use that
2579 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002580 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002581 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002582 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002583 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002584 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002585 *own_addr_type = ADDR_LE_DEV_RANDOM;
2586 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2587 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2588 &hdev->static_addr);
2589 return 0;
2590 }
2591
2592 /* Neither privacy nor static address is being used so use a
2593 * public address.
2594 */
2595 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2596
2597 return 0;
2598}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002599
Johan Hedberg405a2612014-12-19 23:18:22 +02002600static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2601{
2602 struct bdaddr_list *b;
2603
2604 list_for_each_entry(b, &hdev->whitelist, list) {
2605 struct hci_conn *conn;
2606
2607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2608 if (!conn)
2609 return true;
2610
2611 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2612 return true;
2613 }
2614
2615 return false;
2616}
2617
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002618void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002619{
2620 struct hci_dev *hdev = req->hdev;
2621 u8 scan;
2622
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002623 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002624 return;
2625
2626 if (!hdev_is_powered(hdev))
2627 return;
2628
2629 if (mgmt_powering_down(hdev))
2630 return;
2631
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002632 if (hdev->scanning_paused)
2633 return;
2634
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002635 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002636 disconnected_whitelist_entries(hdev))
2637 scan = SCAN_PAGE;
2638 else
2639 scan = SCAN_DISABLED;
2640
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002641 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002642 scan |= SCAN_INQUIRY;
2643
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002644 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2645 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2646 return;
2647
Johan Hedberg405a2612014-12-19 23:18:22 +02002648 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2649}
2650
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002651static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002652{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002653 hci_dev_lock(req->hdev);
2654 __hci_req_update_scan(req);
2655 hci_dev_unlock(req->hdev);
2656 return 0;
2657}
Johan Hedberg405a2612014-12-19 23:18:22 +02002658
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002659static void scan_update_work(struct work_struct *work)
2660{
2661 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2662
2663 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002664}
2665
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002666static int connectable_update(struct hci_request *req, unsigned long opt)
2667{
2668 struct hci_dev *hdev = req->hdev;
2669
2670 hci_dev_lock(hdev);
2671
2672 __hci_req_update_scan(req);
2673
2674 /* If BR/EDR is not enabled and we disable advertising as a
2675 * by-product of disabling connectable, we need to update the
2676 * advertising flags.
2677 */
2678 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002679 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002680
2681 /* Update the advertising parameters if necessary */
2682 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302683 !list_empty(&hdev->adv_instances)) {
2684 if (ext_adv_capable(hdev))
2685 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2686 else
2687 __hci_req_enable_advertising(req);
2688 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002689
2690 __hci_update_background_scan(req);
2691
2692 hci_dev_unlock(hdev);
2693
2694 return 0;
2695}
2696
2697static void connectable_update_work(struct work_struct *work)
2698{
2699 struct hci_dev *hdev = container_of(work, struct hci_dev,
2700 connectable_update);
2701 u8 status;
2702
2703 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2704 mgmt_set_connectable_complete(hdev, status);
2705}
2706
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002707static u8 get_service_classes(struct hci_dev *hdev)
2708{
2709 struct bt_uuid *uuid;
2710 u8 val = 0;
2711
2712 list_for_each_entry(uuid, &hdev->uuids, list)
2713 val |= uuid->svc_hint;
2714
2715 return val;
2716}
2717
2718void __hci_req_update_class(struct hci_request *req)
2719{
2720 struct hci_dev *hdev = req->hdev;
2721 u8 cod[3];
2722
Howard Chung22fbcfc2020-11-11 15:02:19 +08002723 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002724
2725 if (!hdev_is_powered(hdev))
2726 return;
2727
2728 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2729 return;
2730
2731 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2732 return;
2733
2734 cod[0] = hdev->minor_class;
2735 cod[1] = hdev->major_class;
2736 cod[2] = get_service_classes(hdev);
2737
2738 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2739 cod[1] |= 0x20;
2740
2741 if (memcmp(cod, hdev->dev_class, 3) == 0)
2742 return;
2743
2744 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2745}
2746
Johan Hedbergaed1a882015-11-22 17:24:44 +03002747static void write_iac(struct hci_request *req)
2748{
2749 struct hci_dev *hdev = req->hdev;
2750 struct hci_cp_write_current_iac_lap cp;
2751
2752 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2753 return;
2754
2755 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2756 /* Limited discoverable mode */
2757 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2758 cp.iac_lap[0] = 0x00; /* LIAC */
2759 cp.iac_lap[1] = 0x8b;
2760 cp.iac_lap[2] = 0x9e;
2761 cp.iac_lap[3] = 0x33; /* GIAC */
2762 cp.iac_lap[4] = 0x8b;
2763 cp.iac_lap[5] = 0x9e;
2764 } else {
2765 /* General discoverable mode */
2766 cp.num_iac = 1;
2767 cp.iac_lap[0] = 0x33; /* GIAC */
2768 cp.iac_lap[1] = 0x8b;
2769 cp.iac_lap[2] = 0x9e;
2770 }
2771
2772 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2773 (cp.num_iac * 3) + 1, &cp);
2774}
2775
2776static int discoverable_update(struct hci_request *req, unsigned long opt)
2777{
2778 struct hci_dev *hdev = req->hdev;
2779
2780 hci_dev_lock(hdev);
2781
2782 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2783 write_iac(req);
2784 __hci_req_update_scan(req);
2785 __hci_req_update_class(req);
2786 }
2787
2788 /* Advertising instances don't use the global discoverable setting, so
2789 * only update AD if advertising was enabled using Set Advertising.
2790 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002791 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002792 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002793
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002794 /* Discoverable mode affects the local advertising
2795 * address in limited privacy mode.
2796 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302797 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2798 if (ext_adv_capable(hdev))
2799 __hci_req_start_ext_adv(req, 0x00);
2800 else
2801 __hci_req_enable_advertising(req);
2802 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002803 }
2804
Johan Hedbergaed1a882015-11-22 17:24:44 +03002805 hci_dev_unlock(hdev);
2806
2807 return 0;
2808}
2809
2810static void discoverable_update_work(struct work_struct *work)
2811{
2812 struct hci_dev *hdev = container_of(work, struct hci_dev,
2813 discoverable_update);
2814 u8 status;
2815
2816 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2817 mgmt_set_discoverable_complete(hdev, status);
2818}
2819
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002820void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2821 u8 reason)
2822{
2823 switch (conn->state) {
2824 case BT_CONNECTED:
2825 case BT_CONFIG:
2826 if (conn->type == AMP_LINK) {
2827 struct hci_cp_disconn_phy_link cp;
2828
2829 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2830 cp.reason = reason;
2831 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2832 &cp);
2833 } else {
2834 struct hci_cp_disconnect dc;
2835
2836 dc.handle = cpu_to_le16(conn->handle);
2837 dc.reason = reason;
2838 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2839 }
2840
2841 conn->state = BT_DISCONN;
2842
2843 break;
2844 case BT_CONNECT:
2845 if (conn->type == LE_LINK) {
2846 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2847 break;
2848 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2849 0, NULL);
2850 } else if (conn->type == ACL_LINK) {
2851 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2852 break;
2853 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2854 6, &conn->dst);
2855 }
2856 break;
2857 case BT_CONNECT2:
2858 if (conn->type == ACL_LINK) {
2859 struct hci_cp_reject_conn_req rej;
2860
2861 bacpy(&rej.bdaddr, &conn->dst);
2862 rej.reason = reason;
2863
2864 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2865 sizeof(rej), &rej);
2866 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2867 struct hci_cp_reject_sync_conn_req rej;
2868
2869 bacpy(&rej.bdaddr, &conn->dst);
2870
2871 /* SCO rejection has its own limited set of
2872 * allowed error values (0x0D-0x0F) which isn't
2873 * compatible with most values passed to this
2874 * function. To be safe hard-code one of the
2875 * values that's suitable for SCO.
2876 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002877 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002878
2879 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2880 sizeof(rej), &rej);
2881 }
2882 break;
2883 default:
2884 conn->state = BT_CLOSED;
2885 break;
2886 }
2887}
2888
2889static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2890{
2891 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002892 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002893}
2894
2895int hci_abort_conn(struct hci_conn *conn, u8 reason)
2896{
2897 struct hci_request req;
2898 int err;
2899
2900 hci_req_init(&req, conn->hdev);
2901
2902 __hci_abort_conn(&req, conn, reason);
2903
2904 err = hci_req_run(&req, abort_conn_complete);
2905 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002906 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002907 return err;
2908 }
2909
2910 return 0;
2911}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002912
Johan Hedberga1d01db2015-11-11 08:11:25 +02002913static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002914{
2915 hci_dev_lock(req->hdev);
2916 __hci_update_background_scan(req);
2917 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002918 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002919}
2920
2921static void bg_scan_update(struct work_struct *work)
2922{
2923 struct hci_dev *hdev = container_of(work, struct hci_dev,
2924 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002925 struct hci_conn *conn;
2926 u8 status;
2927 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002928
Johan Hedberg84235d22015-11-11 08:11:20 +02002929 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2930 if (!err)
2931 return;
2932
2933 hci_dev_lock(hdev);
2934
2935 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2936 if (conn)
2937 hci_le_conn_failed(conn, status);
2938
2939 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002940}
2941
Johan Hedberga1d01db2015-11-11 08:11:25 +02002942static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002943{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302944 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002945 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002946}
2947
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002948static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2949{
2950 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002951 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2952 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002953 struct hci_cp_inquiry cp;
2954
Archie Pusaka06752d12021-04-01 11:11:33 +08002955 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2956 return 0;
2957
Howard Chung22fbcfc2020-11-11 15:02:19 +08002958 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002959
2960 hci_dev_lock(req->hdev);
2961 hci_inquiry_cache_flush(req->hdev);
2962 hci_dev_unlock(req->hdev);
2963
2964 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002965
2966 if (req->hdev->discovery.limited)
2967 memcpy(&cp.lap, liac, sizeof(cp.lap));
2968 else
2969 memcpy(&cp.lap, giac, sizeof(cp.lap));
2970
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002971 cp.length = length;
2972
2973 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2974
2975 return 0;
2976}
2977
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002978static void le_scan_disable_work(struct work_struct *work)
2979{
2980 struct hci_dev *hdev = container_of(work, struct hci_dev,
2981 le_scan_disable.work);
2982 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002983
Howard Chung22fbcfc2020-11-11 15:02:19 +08002984 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002985
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002986 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002987 return;
2988
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002989 cancel_delayed_work(&hdev->le_scan_restart);
2990
2991 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2992 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002993 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2994 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002995 return;
2996 }
2997
2998 hdev->discovery.scan_start = 0;
2999
3000 /* If we were running LE only scan, change discovery state. If
3001 * we were running both LE and BR/EDR inquiry simultaneously,
3002 * and BR/EDR inquiry is already finished, stop discovery,
3003 * otherwise BR/EDR inquiry will stop discovery when finished.
3004 * If we will resolve remote device name, do not change
3005 * discovery state.
3006 */
3007
3008 if (hdev->discovery.type == DISCOV_TYPE_LE)
3009 goto discov_stopped;
3010
3011 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3012 return;
3013
3014 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3015 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3016 hdev->discovery.state != DISCOVERY_RESOLVING)
3017 goto discov_stopped;
3018
3019 return;
3020 }
3021
3022 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3023 HCI_CMD_TIMEOUT, &status);
3024 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003025 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02003026 goto discov_stopped;
3027 }
3028
3029 return;
3030
3031discov_stopped:
3032 hci_dev_lock(hdev);
3033 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3034 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003035}
3036
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003037static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003038{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003039 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003040
3041 /* If controller is not scanning we are done. */
3042 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3043 return 0;
3044
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07003045 if (hdev->scanning_paused) {
3046 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3047 return 0;
3048 }
3049
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303050 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003051
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05303052 if (use_ext_scan(hdev)) {
3053 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3054
3055 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3056 ext_enable_cp.enable = LE_SCAN_ENABLE;
3057 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3058
3059 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3060 sizeof(ext_enable_cp), &ext_enable_cp);
3061 } else {
3062 struct hci_cp_le_set_scan_enable cp;
3063
3064 memset(&cp, 0, sizeof(cp));
3065 cp.enable = LE_SCAN_ENABLE;
3066 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3067 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3068 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003069
3070 return 0;
3071}
3072
3073static void le_scan_restart_work(struct work_struct *work)
3074{
3075 struct hci_dev *hdev = container_of(work, struct hci_dev,
3076 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003077 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003078 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003079
Howard Chung22fbcfc2020-11-11 15:02:19 +08003080 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003081
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003082 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003083 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003084 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3085 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003086 return;
3087 }
3088
3089 hci_dev_lock(hdev);
3090
3091 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3092 !hdev->discovery.scan_start)
3093 goto unlock;
3094
3095 /* When the scan was started, hdev->le_scan_disable has been queued
3096 * after duration from scan_start. During scan restart this job
3097 * has been canceled, and we need to queue it again after proper
3098 * timeout, to make sure that scan does not run indefinitely.
3099 */
3100 duration = hdev->discovery.scan_duration;
3101 scan_start = hdev->discovery.scan_start;
3102 now = jiffies;
3103 if (now - scan_start <= duration) {
3104 int elapsed;
3105
3106 if (now >= scan_start)
3107 elapsed = now - scan_start;
3108 else
3109 elapsed = ULONG_MAX - scan_start + now;
3110
3111 timeout = duration - elapsed;
3112 } else {
3113 timeout = 0;
3114 }
3115
3116 queue_delayed_work(hdev->req_workqueue,
3117 &hdev->le_scan_disable, timeout);
3118
3119unlock:
3120 hci_dev_unlock(hdev);
3121}
3122
Johan Hedberge68f0722015-11-11 08:30:30 +02003123static int active_scan(struct hci_request *req, unsigned long opt)
3124{
3125 uint16_t interval = opt;
3126 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02003127 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02003128 /* White list is not used for discovery */
3129 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303130 /* Discovery doesn't require controller address resolution */
3131 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02003132 int err;
3133
Howard Chung22fbcfc2020-11-11 15:02:19 +08003134 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02003135
Johan Hedberge68f0722015-11-11 08:30:30 +02003136 /* If controller is scanning, it means the background scanning is
3137 * running. Thus, we should temporarily stop it in order to set the
3138 * discovery scanning parameters.
3139 */
Howard Chung422bb172020-11-26 12:22:23 +08003140 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303141 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08003142 cancel_interleave_scan(hdev);
3143 }
Johan Hedberge68f0722015-11-11 08:30:30 +02003144
3145 /* All active scans will be done with either a resolvable private
3146 * address (when privacy feature has been enabled) or non-resolvable
3147 * private address.
3148 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02003149 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3150 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02003151 if (err < 0)
3152 own_addr_type = ADDR_LE_DEV_PUBLIC;
3153
Alain Michaudd4edda02020-06-29 17:04:15 +00003154 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3155 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303156 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02003157 return 0;
3158}
3159
3160static int interleaved_discov(struct hci_request *req, unsigned long opt)
3161{
3162 int err;
3163
Howard Chung22fbcfc2020-11-11 15:02:19 +08003164 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02003165
3166 err = active_scan(req, opt);
3167 if (err)
3168 return err;
3169
Johan Hedberg7df26b52015-11-11 12:24:21 +02003170 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02003171}
3172
3173static void start_discovery(struct hci_dev *hdev, u8 *status)
3174{
3175 unsigned long timeout;
3176
Howard Chung22fbcfc2020-11-11 15:02:19 +08003177 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02003178
3179 switch (hdev->discovery.type) {
3180 case DISCOV_TYPE_BREDR:
3181 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02003182 hci_req_sync(hdev, bredr_inquiry,
3183 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003184 status);
3185 return;
3186 case DISCOV_TYPE_INTERLEAVED:
3187 /* When running simultaneous discovery, the LE scanning time
3188 * should occupy the whole discovery time sine BR/EDR inquiry
3189 * and LE scanning are scheduled by the controller.
3190 *
3191 * For interleaving discovery in comparison, BR/EDR inquiry
3192 * and LE scanning are done sequentially with separate
3193 * timeouts.
3194 */
3195 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3196 &hdev->quirks)) {
3197 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3198 /* During simultaneous discovery, we double LE scan
3199 * interval. We must leave some time for the controller
3200 * to do BR/EDR inquiry.
3201 */
3202 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00003203 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003204 status);
3205 break;
3206 }
3207
3208 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00003209 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003210 HCI_CMD_TIMEOUT, status);
3211 break;
3212 case DISCOV_TYPE_LE:
3213 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00003214 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003215 HCI_CMD_TIMEOUT, status);
3216 break;
3217 default:
3218 *status = HCI_ERROR_UNSPECIFIED;
3219 return;
3220 }
3221
3222 if (*status)
3223 return;
3224
Howard Chung22fbcfc2020-11-11 15:02:19 +08003225 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02003226
3227 /* When service discovery is used and the controller has a
3228 * strict duplicate filter, it is important to remember the
3229 * start and duration of the scan. This is required for
3230 * restarting scanning during the discovery phase.
3231 */
3232 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3233 hdev->discovery.result_filtering) {
3234 hdev->discovery.scan_start = jiffies;
3235 hdev->discovery.scan_duration = timeout;
3236 }
3237
3238 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3239 timeout);
3240}
3241
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003242bool hci_req_stop_discovery(struct hci_request *req)
3243{
3244 struct hci_dev *hdev = req->hdev;
3245 struct discovery_state *d = &hdev->discovery;
3246 struct hci_cp_remote_name_req_cancel cp;
3247 struct inquiry_entry *e;
3248 bool ret = false;
3249
Howard Chung22fbcfc2020-11-11 15:02:19 +08003250 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003251
3252 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3253 if (test_bit(HCI_INQUIRY, &hdev->flags))
3254 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3255
3256 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3257 cancel_delayed_work(&hdev->le_scan_disable);
Sonny Sasakac06632a2021-03-15 10:30:59 -07003258 cancel_delayed_work(&hdev->le_scan_restart);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303259 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003260 }
3261
3262 ret = true;
3263 } else {
3264 /* Passive scanning */
3265 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303266 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003267 ret = true;
3268 }
3269 }
3270
3271 /* No further actions needed for LE-only discovery */
3272 if (d->type == DISCOV_TYPE_LE)
3273 return ret;
3274
3275 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3276 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3277 NAME_PENDING);
3278 if (!e)
3279 return ret;
3280
3281 bacpy(&cp.bdaddr, &e->data.bdaddr);
3282 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3283 &cp);
3284 ret = true;
3285 }
3286
3287 return ret;
3288}
3289
3290static int stop_discovery(struct hci_request *req, unsigned long opt)
3291{
3292 hci_dev_lock(req->hdev);
3293 hci_req_stop_discovery(req);
3294 hci_dev_unlock(req->hdev);
3295
3296 return 0;
3297}
3298
Johan Hedberge68f0722015-11-11 08:30:30 +02003299static void discov_update(struct work_struct *work)
3300{
3301 struct hci_dev *hdev = container_of(work, struct hci_dev,
3302 discov_update);
3303 u8 status = 0;
3304
3305 switch (hdev->discovery.state) {
3306 case DISCOVERY_STARTING:
3307 start_discovery(hdev, &status);
3308 mgmt_start_discovery_complete(hdev, status);
3309 if (status)
3310 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3311 else
3312 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3313 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003314 case DISCOVERY_STOPPING:
3315 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3316 mgmt_stop_discovery_complete(hdev, status);
3317 if (!status)
3318 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3319 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003320 case DISCOVERY_STOPPED:
3321 default:
3322 return;
3323 }
3324}
3325
Johan Hedbergc366f552015-11-23 15:43:06 +02003326static void discov_off(struct work_struct *work)
3327{
3328 struct hci_dev *hdev = container_of(work, struct hci_dev,
3329 discov_off.work);
3330
Howard Chung22fbcfc2020-11-11 15:02:19 +08003331 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02003332
3333 hci_dev_lock(hdev);
3334
3335 /* When discoverable timeout triggers, then just make sure
3336 * the limited discoverable flag is cleared. Even in the case
3337 * of a timeout triggered from general discoverable, it is
3338 * safe to unconditionally clear the flag.
3339 */
3340 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3341 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3342 hdev->discov_timeout = 0;
3343
3344 hci_dev_unlock(hdev);
3345
3346 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3347 mgmt_new_settings(hdev);
3348}
3349
Johan Hedberg2ff13892015-11-25 16:15:44 +02003350static int powered_update_hci(struct hci_request *req, unsigned long opt)
3351{
3352 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003353 u8 link_sec;
3354
3355 hci_dev_lock(hdev);
3356
3357 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3358 !lmp_host_ssp_capable(hdev)) {
3359 u8 mode = 0x01;
3360
3361 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3362
3363 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3364 u8 support = 0x01;
3365
3366 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3367 sizeof(support), &support);
3368 }
3369 }
3370
3371 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3372 lmp_bredr_capable(hdev)) {
3373 struct hci_cp_write_le_host_supported cp;
3374
3375 cp.le = 0x01;
3376 cp.simul = 0x00;
3377
3378 /* Check first if we already have the right
3379 * host state (host features set)
3380 */
3381 if (cp.le != lmp_host_le_capable(hdev) ||
3382 cp.simul != lmp_host_le_br_capable(hdev))
3383 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3384 sizeof(cp), &cp);
3385 }
3386
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003387 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003388 /* Make sure the controller has a good default for
3389 * advertising data. This also applies to the case
3390 * where BR/EDR was toggled during the AUTO_OFF phase.
3391 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003392 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3393 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303394 int err;
3395
3396 if (ext_adv_capable(hdev)) {
3397 err = __hci_req_setup_ext_adv_instance(req,
3398 0x00);
3399 if (!err)
3400 __hci_req_update_scan_rsp_data(req,
3401 0x00);
3402 } else {
3403 err = 0;
3404 __hci_req_update_adv_data(req, 0x00);
3405 __hci_req_update_scan_rsp_data(req, 0x00);
3406 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003407
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303408 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303409 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303410 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303411 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003412 __hci_req_enable_ext_advertising(req,
3413 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303414 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003415 } else if (!list_empty(&hdev->adv_instances)) {
3416 struct adv_info *adv_instance;
3417
Johan Hedberg2ff13892015-11-25 16:15:44 +02003418 adv_instance = list_first_entry(&hdev->adv_instances,
3419 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003420 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003421 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003422 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003423 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003424 }
3425
3426 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3427 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3428 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3429 sizeof(link_sec), &link_sec);
3430
3431 if (lmp_bredr_capable(hdev)) {
3432 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3433 __hci_req_write_fast_connectable(req, true);
3434 else
3435 __hci_req_write_fast_connectable(req, false);
3436 __hci_req_update_scan(req);
3437 __hci_req_update_class(req);
3438 __hci_req_update_name(req);
3439 __hci_req_update_eir(req);
3440 }
3441
3442 hci_dev_unlock(hdev);
3443 return 0;
3444}
3445
3446int __hci_req_hci_power_on(struct hci_dev *hdev)
3447{
3448 /* Register the available SMP channels (BR/EDR and LE) only when
3449 * successfully powering on the controller. This late
3450 * registration is required so that LE SMP can clearly decide if
3451 * the public address or static address is used.
3452 */
3453 smp_register(hdev);
3454
3455 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3456 NULL);
3457}
3458
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003459void hci_request_setup(struct hci_dev *hdev)
3460{
Johan Hedberge68f0722015-11-11 08:30:30 +02003461 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003462 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003463 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003464 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003465 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003466 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003467 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3468 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003469 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08003470 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003471}
3472
3473void hci_request_cancel_all(struct hci_dev *hdev)
3474{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003475 hci_req_sync_cancel(hdev, ENODEV);
3476
Johan Hedberge68f0722015-11-11 08:30:30 +02003477 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003478 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003479 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003480 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003481 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003482 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003483 cancel_delayed_work_sync(&hdev->le_scan_disable);
3484 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003485
3486 if (hdev->adv_instance_timeout) {
3487 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3488 hdev->adv_instance_timeout = 0;
3489 }
Howard Chungc4f1f402020-11-26 12:22:21 +08003490
3491 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003492}