blob: d7ee11ef70d3e1f547cd66d894870c5b4e1f7b7d [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020033
Johan Hedbergbe91cd02015-11-10 09:44:54 +020034#define HCI_REQ_DONE 0
35#define HCI_REQ_PEND 1
36#define HCI_REQ_CANCELED 2
37
Johan Hedberg0857dd32014-12-19 13:40:20 +020038void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39{
40 skb_queue_head_init(&req->cmd_q);
41 req->hdev = hdev;
42 req->err = 0;
43}
44
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053045void hci_req_purge(struct hci_request *req)
46{
47 skb_queue_purge(&req->cmd_q);
48}
49
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080050bool hci_req_status_pend(struct hci_dev *hdev)
51{
52 return hdev->req_status == HCI_REQ_PEND;
53}
54
Johan Hedberge62144872015-04-02 13:41:08 +030055static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020057{
58 struct hci_dev *hdev = req->hdev;
59 struct sk_buff *skb;
60 unsigned long flags;
61
Howard Chung22fbcfc2020-11-11 15:02:19 +080062 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020063
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
66 */
67 if (req->err) {
68 skb_queue_purge(&req->cmd_q);
69 return req->err;
70 }
71
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
74 return -ENODATA;
75
76 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020077 if (complete) {
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020083
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88 queue_work(hdev->workqueue, &hdev->cmd_work);
89
90 return 0;
91}
92
Johan Hedberge62144872015-04-02 13:41:08 +030093int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94{
95 return req_run(req, complete, NULL);
96}
97
98int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99{
100 return req_run(req, NULL, complete);
101}
102
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200103static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104 struct sk_buff *skb)
105{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 if (skb)
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
Johan Hedbergb5044302015-11-10 09:44:55 +0200117void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200118{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200120
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
125 }
126}
127
128struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
130{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200131 struct hci_request req;
132 struct sk_buff *skb;
133 int err = 0;
134
Howard Chung22fbcfc2020-11-11 15:02:19 +0800135 bt_dev_dbg(hdev, "");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200136
137 hci_req_init(&req, hdev);
138
139 hci_req_add_ev(&req, opcode, plen, param, event);
140
141 hdev->req_status = HCI_REQ_PEND;
142
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200143 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100144 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200146
John Keeping67d8cee2018-04-19 16:29:37 +0100147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200149
John Keeping67d8cee2018-04-19 16:29:37 +0100150 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200151 return ERR_PTR(-EINTR);
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
155 err = -bt_to_errno(hdev->req_result);
156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
165 }
166
167 hdev->req_status = hdev->req_result = 0;
168 skb = hdev->req_skb;
169 hdev->req_skb = NULL;
170
Howard Chung22fbcfc2020-11-11 15:02:19 +0800171 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200172
173 if (err < 0) {
174 kfree_skb(skb);
175 return ERR_PTR(err);
176 }
177
178 if (!skb)
179 return ERR_PTR(-ENODATA);
180
181 return skb;
182}
183EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
187{
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189}
190EXPORT_SYMBOL(__hci_cmd_sync);
191
192/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200193int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200195 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200196{
197 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200198 int err = 0;
199
Howard Chung22fbcfc2020-11-11 15:02:19 +0800200 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200201
202 hci_req_init(&req, hdev);
203
204 hdev->req_status = HCI_REQ_PEND;
205
Johan Hedberga1d01db2015-11-11 08:11:25 +0200206 err = func(&req, opt);
207 if (err) {
208 if (hci_status)
209 *hci_status = HCI_ERROR_UNSPECIFIED;
210 return err;
211 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200213 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 if (err < 0) {
215 hdev->req_status = 0;
216
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
221 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200222 if (err == -ENODATA) {
223 if (hci_status)
224 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200225 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200226 }
227
228 if (hci_status)
229 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200230
231 return err;
232 }
233
John Keeping67d8cee2018-04-19 16:29:37 +0100234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200236
John Keeping67d8cee2018-04-19 16:29:37 +0100237 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
242 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200243 if (hci_status)
244 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200245 break;
246
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200251 break;
252
253 default:
254 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200255 if (hci_status)
256 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200257 break;
258 }
259
Frederic Dalleau9afee942016-08-23 07:59:19 +0200260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200262 hdev->req_status = hdev->req_result = 0;
263
Howard Chung22fbcfc2020-11-11 15:02:19 +0800264 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200265
266 return err;
267}
268
Johan Hedberga1d01db2015-11-11 08:11:25 +0200269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200271 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200272{
273 int ret;
274
275 if (!test_bit(HCI_UP, &hdev->flags))
276 return -ENETDOWN;
277
278 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200279 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200280 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200281 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200282
283 return ret;
284}
285
Johan Hedberg0857dd32014-12-19 13:40:20 +0200286struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
287 const void *param)
288{
289 int len = HCI_COMMAND_HDR_SIZE + plen;
290 struct hci_command_hdr *hdr;
291 struct sk_buff *skb;
292
293 skb = bt_skb_alloc(len, GFP_ATOMIC);
294 if (!skb)
295 return NULL;
296
Johannes Berg4df864c2017-06-16 14:29:21 +0200297 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200298 hdr->opcode = cpu_to_le16(opcode);
299 hdr->plen = plen;
300
301 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200302 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200303
Howard Chung22fbcfc2020-11-11 15:02:19 +0800304 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200305
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100306 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200308
309 return skb;
310}
311
312/* Queue a command to an asynchronous HCI request */
313void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314 const void *param, u8 event)
315{
316 struct hci_dev *hdev = req->hdev;
317 struct sk_buff *skb;
318
Howard Chung22fbcfc2020-11-11 15:02:19 +0800319 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200320
321 /* If an error occurred during request building, there is no point in
322 * queueing the HCI command. We can simply return.
323 */
324 if (req->err)
325 return;
326
327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100329 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
330 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331 req->err = -ENOMEM;
332 return;
333 }
334
335 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200336 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200337
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100338 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200339
340 skb_queue_tail(&req->cmd_q, skb);
341}
342
343void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
344 const void *param)
345{
346 hci_req_add_ev(req, opcode, plen, param, 0);
347}
348
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200349void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350{
351 struct hci_dev *hdev = req->hdev;
352 struct hci_cp_write_page_scan_activity acp;
353 u8 type;
354
355 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
356 return;
357
358 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
359 return;
360
361 if (enable) {
362 type = PAGE_SCAN_TYPE_INTERLACED;
363
364 /* 160 msec page scan interval */
365 acp.interval = cpu_to_le16(0x0100);
366 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000367 type = hdev->def_page_scan_type;
368 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200369 }
370
Alain Michaud10873f92020-06-11 02:01:56 +0000371 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200372
373 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
374 __cpu_to_le16(hdev->page_scan_window) != acp.window)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
376 sizeof(acp), &acp);
377
378 if (hdev->page_scan_type != type)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
380}
381
Howard Chungc4f1f402020-11-26 12:22:21 +0800382static void start_interleave_scan(struct hci_dev *hdev)
383{
384 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
385 queue_delayed_work(hdev->req_workqueue,
386 &hdev->interleave_scan, 0);
387}
388
389static bool is_interleave_scanning(struct hci_dev *hdev)
390{
391 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
392}
393
394static void cancel_interleave_scan(struct hci_dev *hdev)
395{
396 bt_dev_dbg(hdev, "cancelling interleave scan");
397
398 cancel_delayed_work_sync(&hdev->interleave_scan);
399
400 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
401}
402
403/* Return true if interleave_scan wasn't started until exiting this function,
404 * otherwise, return false
405 */
406static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
407{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800408 /* Do interleaved scan only if all of the following are true:
409 * - There is at least one ADV monitor
410 * - At least one pending LE connection or one device to be scanned for
411 * - Monitor offloading is not supported
412 * If so, we should alternate between allowlist scan and one without
413 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800414 */
415 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800417 list_empty(&hdev->pend_le_reports)) &&
418 hci_get_adv_monitor_offload_ext(hdev) ==
419 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800420 bool is_interleaving = is_interleave_scanning(hdev);
421
422 if (use_interleaving && !is_interleaving) {
423 start_interleave_scan(hdev);
424 bt_dev_dbg(hdev, "starting interleave scan");
425 return true;
426 }
427
428 if (!use_interleaving && is_interleaving)
429 cancel_interleave_scan(hdev);
430
431 return false;
432}
433
Johan Hedberg196a5e92015-11-22 18:55:44 +0200434/* This function controls the background scanning based on hdev->pend_le_conns
435 * list. If there are pending LE connection we start the background scanning,
436 * otherwise we stop it.
437 *
438 * This function requires the caller holds hdev->lock.
439 */
440static void __hci_update_background_scan(struct hci_request *req)
441{
442 struct hci_dev *hdev = req->hdev;
443
444 if (!test_bit(HCI_UP, &hdev->flags) ||
445 test_bit(HCI_INIT, &hdev->flags) ||
446 hci_dev_test_flag(hdev, HCI_SETUP) ||
447 hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 hci_dev_test_flag(hdev, HCI_UNREGISTER))
450 return;
451
452 /* No point in doing scanning if LE support hasn't been enabled */
453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
454 return;
455
456 /* If discovery is active don't interfere with it */
457 if (hdev->discovery.state != DISCOVERY_STOPPED)
458 return;
459
460 /* Reset RSSI and UUID filters when starting background scanning
461 * since these filters are meant for service discovery only.
462 *
463 * The Start Discovery and Start Service Discovery operations
464 * ensure to set proper values for RSSI threshold and UUID
465 * filter list. So it is safe to just reset them here.
466 */
467 hci_discovery_filter_clear(hdev);
468
Howard Chung22fbcfc2020-11-11 15:02:19 +0800469 bt_dev_dbg(hdev, "ADV monitoring is %s",
470 hci_is_adv_monitoring(hdev) ? "on" : "off");
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200471
Johan Hedberg196a5e92015-11-22 18:55:44 +0200472 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200473 list_empty(&hdev->pend_le_reports) &&
474 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200475 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200476 * to be scanned for or no ADV monitors, we should stop the
477 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200478 */
479
480 /* If controller is not scanning we are done. */
481 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
482 return;
483
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530484 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200485
Howard Chung22fbcfc2020-11-11 15:02:19 +0800486 bt_dev_dbg(hdev, "stopping background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200487 } else {
488 /* If there is at least one pending LE connection, we should
489 * keep the background scan running.
490 */
491
492 /* If controller is connecting, we should not start scanning
493 * since some controllers are not able to scan and connect at
494 * the same time.
495 */
496 if (hci_lookup_le_connect(hdev))
497 return;
498
499 /* If controller is currently scanning, we stop it to ensure we
500 * don't miss any advertising (due to duplicates filter).
501 */
502 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530503 hci_req_add_le_scan_disable(req, false);
Johan Hedberg196a5e92015-11-22 18:55:44 +0200504
505 hci_req_add_le_passive_scan(req);
Howard Chungc4f1f402020-11-26 12:22:21 +0800506 bt_dev_dbg(hdev, "starting background scanning");
Johan Hedberg196a5e92015-11-22 18:55:44 +0200507 }
508}
509
Johan Hedberg00cf5042015-11-25 16:15:41 +0200510void __hci_req_update_name(struct hci_request *req)
511{
512 struct hci_dev *hdev = req->hdev;
513 struct hci_cp_write_local_name cp;
514
515 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
516
517 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
518}
519
Johan Hedbergb1a89172015-11-25 16:15:42 +0200520#define PNP_INFO_SVCLASS_ID 0x1200
521
522static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
523{
524 u8 *ptr = data, *uuids_start = NULL;
525 struct bt_uuid *uuid;
526
527 if (len < 4)
528 return ptr;
529
530 list_for_each_entry(uuid, &hdev->uuids, list) {
531 u16 uuid16;
532
533 if (uuid->size != 16)
534 continue;
535
536 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
537 if (uuid16 < 0x1100)
538 continue;
539
540 if (uuid16 == PNP_INFO_SVCLASS_ID)
541 continue;
542
543 if (!uuids_start) {
544 uuids_start = ptr;
545 uuids_start[0] = 1;
546 uuids_start[1] = EIR_UUID16_ALL;
547 ptr += 2;
548 }
549
550 /* Stop if not enough space to put next UUID */
551 if ((ptr - data) + sizeof(u16) > len) {
552 uuids_start[1] = EIR_UUID16_SOME;
553 break;
554 }
555
556 *ptr++ = (uuid16 & 0x00ff);
557 *ptr++ = (uuid16 & 0xff00) >> 8;
558 uuids_start[0] += sizeof(uuid16);
559 }
560
561 return ptr;
562}
563
564static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
565{
566 u8 *ptr = data, *uuids_start = NULL;
567 struct bt_uuid *uuid;
568
569 if (len < 6)
570 return ptr;
571
572 list_for_each_entry(uuid, &hdev->uuids, list) {
573 if (uuid->size != 32)
574 continue;
575
576 if (!uuids_start) {
577 uuids_start = ptr;
578 uuids_start[0] = 1;
579 uuids_start[1] = EIR_UUID32_ALL;
580 ptr += 2;
581 }
582
583 /* Stop if not enough space to put next UUID */
584 if ((ptr - data) + sizeof(u32) > len) {
585 uuids_start[1] = EIR_UUID32_SOME;
586 break;
587 }
588
589 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
590 ptr += sizeof(u32);
591 uuids_start[0] += sizeof(u32);
592 }
593
594 return ptr;
595}
596
597static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
598{
599 u8 *ptr = data, *uuids_start = NULL;
600 struct bt_uuid *uuid;
601
602 if (len < 18)
603 return ptr;
604
605 list_for_each_entry(uuid, &hdev->uuids, list) {
606 if (uuid->size != 128)
607 continue;
608
609 if (!uuids_start) {
610 uuids_start = ptr;
611 uuids_start[0] = 1;
612 uuids_start[1] = EIR_UUID128_ALL;
613 ptr += 2;
614 }
615
616 /* Stop if not enough space to put next UUID */
617 if ((ptr - data) + 16 > len) {
618 uuids_start[1] = EIR_UUID128_SOME;
619 break;
620 }
621
622 memcpy(ptr, uuid->uuid, 16);
623 ptr += 16;
624 uuids_start[0] += 16;
625 }
626
627 return ptr;
628}
629
630static void create_eir(struct hci_dev *hdev, u8 *data)
631{
632 u8 *ptr = data;
633 size_t name_len;
634
635 name_len = strlen(hdev->dev_name);
636
637 if (name_len > 0) {
638 /* EIR Data type */
639 if (name_len > 48) {
640 name_len = 48;
641 ptr[1] = EIR_NAME_SHORT;
642 } else
643 ptr[1] = EIR_NAME_COMPLETE;
644
645 /* EIR Data length */
646 ptr[0] = name_len + 1;
647
648 memcpy(ptr + 2, hdev->dev_name, name_len);
649
650 ptr += (name_len + 2);
651 }
652
653 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
654 ptr[0] = 2;
655 ptr[1] = EIR_TX_POWER;
656 ptr[2] = (u8) hdev->inq_tx_power;
657
658 ptr += 3;
659 }
660
661 if (hdev->devid_source > 0) {
662 ptr[0] = 9;
663 ptr[1] = EIR_DEVICE_ID;
664
665 put_unaligned_le16(hdev->devid_source, ptr + 2);
666 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 put_unaligned_le16(hdev->devid_product, ptr + 6);
668 put_unaligned_le16(hdev->devid_version, ptr + 8);
669
670 ptr += 10;
671 }
672
673 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
676}
677
678void __hci_req_update_eir(struct hci_request *req)
679{
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_write_eir cp;
682
683 if (!hdev_is_powered(hdev))
684 return;
685
686 if (!lmp_ext_inq_capable(hdev))
687 return;
688
689 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
690 return;
691
692 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
693 return;
694
695 memset(&cp, 0, sizeof(cp));
696
697 create_eir(hdev, cp.data);
698
699 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
700 return;
701
702 memcpy(hdev->eir, cp.data, sizeof(cp.data));
703
704 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
705}
706
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530707void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200708{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530709 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200710
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700711 if (hdev->scanning_paused) {
712 bt_dev_dbg(hdev, "Scanning is paused for suspend");
713 return;
714 }
715
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +0800716 if (hdev->suspended)
717 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
718
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530719 if (use_ext_scan(hdev)) {
720 struct hci_cp_le_set_ext_scan_enable cp;
721
722 memset(&cp, 0, sizeof(cp));
723 cp.enable = LE_SCAN_DISABLE;
724 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
725 &cp);
726 } else {
727 struct hci_cp_le_set_scan_enable cp;
728
729 memset(&cp, 0, sizeof(cp));
730 cp.enable = LE_SCAN_DISABLE;
731 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
732 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530733
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530734 /* Disable address resolution */
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530735 if (use_ll_privacy(hdev) &&
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530736 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530737 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530738 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530739
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530740 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
741 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200742}
743
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700744static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
745 u8 bdaddr_type)
746{
747 struct hci_cp_le_del_from_white_list cp;
748
749 cp.bdaddr_type = bdaddr_type;
750 bacpy(&cp.bdaddr, bdaddr);
751
752 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
753 cp.bdaddr_type);
754 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530755
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530756 if (use_ll_privacy(req->hdev) &&
757 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530758 struct smp_irk *irk;
759
760 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
761 if (irk) {
762 struct hci_cp_le_del_from_resolv_list cp;
763
764 cp.bdaddr_type = bdaddr_type;
765 bacpy(&cp.bdaddr, bdaddr);
766
767 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
768 sizeof(cp), &cp);
769 }
770 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700771}
772
773/* Adds connection to white list if needed. On error, returns -1. */
774static int add_to_white_list(struct hci_request *req,
775 struct hci_conn_params *params, u8 *num_entries,
776 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200777{
778 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700779 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200780
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700781 /* Already in white list */
782 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
783 params->addr_type))
784 return 0;
785
786 /* Select filter policy to accept all advertising */
787 if (*num_entries >= hdev->le_white_list_size)
788 return -1;
789
790 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530791 if (!allow_rpa &&
792 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700793 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
794 return -1;
795 }
796
797 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200798 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
799 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700800 return 0;
801
802 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200803 cp.bdaddr_type = params->addr_type;
804 bacpy(&cp.bdaddr, &params->addr);
805
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700806 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
807 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200808 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700809
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530810 if (use_ll_privacy(hdev) &&
811 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530812 struct smp_irk *irk;
813
814 irk = hci_find_irk_by_addr(hdev, &params->addr,
815 params->addr_type);
816 if (irk) {
817 struct hci_cp_le_add_to_resolv_list cp;
818
819 cp.bdaddr_type = params->addr_type;
820 bacpy(&cp.bdaddr, &params->addr);
821 memcpy(cp.peer_irk, irk->val, 16);
822
823 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
824 memcpy(cp.local_irk, hdev->irk, 16);
825 else
826 memset(cp.local_irk, 0, 16);
827
828 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
829 sizeof(cp), &cp);
830 }
831 }
832
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700833 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200834}
835
836static u8 update_white_list(struct hci_request *req)
837{
838 struct hci_dev *hdev = req->hdev;
839 struct hci_conn_params *params;
840 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700841 u8 num_entries = 0;
842 bool pend_conn, pend_report;
843 /* We allow whitelisting even with RPAs in suspend. In the worst case,
844 * we won't be able to wake from devices that use the privacy1.2
845 * features. Additionally, once we support privacy1.2 and IRK
846 * offloading, we can update this to also check for those conditions.
847 */
848 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200849
850 /* Go through the current white list programmed into the
851 * controller one by one and check if that address is still
852 * in the list of pending connections or list of devices to
853 * report. If not present in either list, then queue the
854 * command to remove it from the controller.
855 */
856 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700857 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
858 &b->bdaddr,
859 b->bdaddr_type);
860 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
861 &b->bdaddr,
862 b->bdaddr_type);
863
864 /* If the device is not likely to connect or report,
865 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500866 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700867 if (!pend_conn && !pend_report) {
868 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200869 continue;
870 }
871
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700872 /* White list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530873 if (!allow_rpa &&
874 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700875 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500876 return 0x00;
877 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200878
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700879 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200880 }
881
882 /* Since all no longer valid white list entries have been
883 * removed, walk through the list of pending connections
884 * and ensure that any new device gets programmed into
885 * the controller.
886 *
887 * If the list of the devices is larger than the list of
888 * available white list entries in the controller, then
889 * just abort and return filer policy value to not use the
890 * white list.
891 */
892 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700893 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200894 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200895 }
896
897 /* After adding all new pending connections, walk through
898 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700899 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200900 */
901 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700902 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200903 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200904 }
905
Howard Chungc4f1f402020-11-26 12:22:21 +0800906 /* Use the allowlist unless the following conditions are all true:
907 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800908 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800909 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200910 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800911 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800912 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800913 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200914 return 0x00;
915
Johan Hedberg0857dd32014-12-19 13:40:20 +0200916 /* Select filter policy to use white list */
917 return 0x01;
918}
919
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200920static bool scan_use_rpa(struct hci_dev *hdev)
921{
922 return hci_dev_test_flag(hdev, HCI_PRIVACY);
923}
924
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530925static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530926 u16 window, u8 own_addr_type, u8 filter_policy,
927 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200928{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530929 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530930
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700931 if (hdev->scanning_paused) {
932 bt_dev_dbg(hdev, "Scanning is paused for suspend");
933 return;
934 }
935
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530936 if (use_ll_privacy(hdev) &&
937 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
938 addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530939 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530940
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530941 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
942 }
943
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530944 /* Use ext scanning if set ext scan param and ext scan enable is
945 * supported
946 */
947 if (use_ext_scan(hdev)) {
948 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
949 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
950 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530951 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
952 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530953
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530954 ext_param_cp = (void *)data;
955 phy_params = (void *)ext_param_cp->data;
956
957 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
958 ext_param_cp->own_addr_type = own_addr_type;
959 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530960
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530961 plen = sizeof(*ext_param_cp);
962
963 if (scan_1m(hdev) || scan_2m(hdev)) {
964 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
965
966 memset(phy_params, 0, sizeof(*phy_params));
967 phy_params->type = type;
968 phy_params->interval = cpu_to_le16(interval);
969 phy_params->window = cpu_to_le16(window);
970
971 plen += sizeof(*phy_params);
972 phy_params++;
973 }
974
975 if (scan_coded(hdev)) {
976 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
977
978 memset(phy_params, 0, sizeof(*phy_params));
979 phy_params->type = type;
980 phy_params->interval = cpu_to_le16(interval);
981 phy_params->window = cpu_to_le16(window);
982
983 plen += sizeof(*phy_params);
984 phy_params++;
985 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530986
987 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530988 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530989
990 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
991 ext_enable_cp.enable = LE_SCAN_ENABLE;
992 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
993
994 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
995 sizeof(ext_enable_cp), &ext_enable_cp);
996 } else {
997 struct hci_cp_le_set_scan_param param_cp;
998 struct hci_cp_le_set_scan_enable enable_cp;
999
1000 memset(&param_cp, 0, sizeof(param_cp));
1001 param_cp.type = type;
1002 param_cp.interval = cpu_to_le16(interval);
1003 param_cp.window = cpu_to_le16(window);
1004 param_cp.own_address_type = own_addr_type;
1005 param_cp.filter_policy = filter_policy;
1006 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1007 &param_cp);
1008
1009 memset(&enable_cp, 0, sizeof(enable_cp));
1010 enable_cp.enable = LE_SCAN_ENABLE;
1011 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1012 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1013 &enable_cp);
1014 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05301015}
1016
Alain Michaud9a9373f2020-07-31 01:05:34 +00001017/* Returns true if an le connection is in the scanning state */
1018static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1019{
1020 struct hci_conn_hash *h = &hdev->conn_hash;
1021 struct hci_conn *c;
1022
1023 rcu_read_lock();
1024
1025 list_for_each_entry_rcu(c, &h->list, list) {
1026 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1027 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1028 rcu_read_unlock();
1029 return true;
1030 }
1031 }
1032
1033 rcu_read_unlock();
1034
1035 return false;
1036}
1037
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301038/* Ensure to call hci_req_add_le_scan_disable() first to disable the
1039 * controller based address resolution to be able to reconfigure
1040 * resolving list.
1041 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05301042void hci_req_add_le_passive_scan(struct hci_request *req)
1043{
Johan Hedberg0857dd32014-12-19 13:40:20 +02001044 struct hci_dev *hdev = req->hdev;
1045 u8 own_addr_type;
1046 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -07001047 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301048 /* Background scanning should run with address resolution */
1049 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001050
1051 if (hdev->scanning_paused) {
1052 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1053 return;
1054 }
Johan Hedberg0857dd32014-12-19 13:40:20 +02001055
1056 /* Set require_privacy to false since no SCAN_REQ are send
1057 * during passive scanning. Not using an non-resolvable address
1058 * here is important so that peer devices using direct
1059 * advertising with our address will be correctly reported
1060 * by the controller.
1061 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001062 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1063 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001064 return;
1065
Howard Chung80af16a2020-11-26 12:22:25 +08001066 if (hdev->enable_advmon_interleave_scan &&
1067 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +08001068 return;
1069
1070 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001071 /* Adding or removing entries from the white list must
1072 * happen before enabling scanning. The controller does
1073 * not allow white list modification while scanning.
1074 */
1075 filter_policy = update_white_list(req);
1076
1077 /* When the controller is using random resolvable addresses and
1078 * with that having LE privacy enabled, then controllers with
1079 * Extended Scanner Filter Policies support can now enable support
1080 * for handling directed advertising.
1081 *
1082 * So instead of using filter polices 0x00 (no whitelist)
1083 * and 0x01 (whitelist enabled) use the new filter policies
1084 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1085 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001086 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001087 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1088 filter_policy |= 0x02;
1089
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001090 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +00001091 window = hdev->le_scan_window_suspend;
1092 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001093
1094 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Alain Michaud9a9373f2020-07-31 01:05:34 +00001095 } else if (hci_is_le_conn_scanning(hdev)) {
1096 window = hdev->le_scan_window_connect;
1097 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +08001098 } else if (hci_is_adv_monitoring(hdev)) {
1099 window = hdev->le_scan_window_adv_monitor;
1100 interval = hdev->le_scan_int_adv_monitor;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001101 } else {
1102 window = hdev->le_scan_window;
1103 interval = hdev->le_scan_interval;
1104 }
1105
1106 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1107 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301108 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001109}
1110
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001111static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301112{
1113 struct adv_info *adv_instance;
1114
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001115 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301116 if (instance == 0x00)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001117 return true;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301118
1119 adv_instance = hci_find_adv_instance(hdev, instance);
1120 if (!adv_instance)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001121 return false;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301122
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001123 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1124 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001125 return true;
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001126
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001127 return adv_instance->scan_rsp_len ? true : false;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301128}
1129
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001130static void hci_req_clear_event_filter(struct hci_request *req)
1131{
1132 struct hci_cp_set_event_filter f;
1133
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001134 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1135 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001136
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001137 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1138 memset(&f, 0, sizeof(f));
1139 f.flt_type = HCI_FLT_CLEAR_ALL;
1140 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1141 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001142}
1143
1144static void hci_req_set_event_filter(struct hci_request *req)
1145{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001146 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001147 struct hci_cp_set_event_filter f;
1148 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001149 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001150 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1151
1152 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1153 return;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001154
1155 /* Always clear event filter when starting */
1156 hci_req_clear_event_filter(req);
1157
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001158 list_for_each_entry(b, &hdev->whitelist, list) {
1159 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1160 b->current_flags))
1161 continue;
1162
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001163 memset(&f, 0, sizeof(f));
1164 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1165 f.flt_type = HCI_FLT_CONN_SETUP;
1166 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1167 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1168
1169 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1170 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001171 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001172 }
1173
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001174 if (scan && !scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001175 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001176 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1177 } else if (!scan && scanning) {
Abhishek Pandit-Subedidce0a4b2020-12-04 11:14:31 +08001178 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001179 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001181}
1182
Daniel Winkler53274472020-09-15 14:14:27 -07001183static void cancel_adv_timeout(struct hci_dev *hdev)
1184{
1185 if (hdev->adv_instance_timeout) {
1186 hdev->adv_instance_timeout = 0;
1187 cancel_delayed_work(&hdev->adv_instance_expire);
1188 }
1189}
1190
1191/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001192void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001193{
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001194 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -07001195
1196 /* Call to disable any advertisements active on the controller.
1197 * This will succeed even if no advertisements are configured.
1198 */
1199 __hci_req_disable_advertising(req);
1200
1201 /* If we are using software rotation, pause the loop */
1202 if (!ext_adv_capable(req->hdev))
1203 cancel_adv_timeout(req->hdev);
1204}
1205
1206/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001207static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -07001208{
1209 struct adv_info *adv;
1210
1211 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1212
1213 if (ext_adv_capable(req->hdev)) {
1214 /* Call for each tracked instance to be re-enabled */
1215 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1216 __hci_req_enable_ext_advertising(req,
1217 adv->instance);
1218 }
1219
1220 } else {
1221 /* Schedule for most recent instance to be restarted and begin
1222 * the software rotation loop
1223 */
1224 __hci_req_schedule_adv_instance(req,
1225 req->hdev->cur_adv_instance,
1226 true);
1227 }
1228}
1229
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001230/* This function requires the caller holds hdev->lock */
1231int hci_req_resume_adv_instances(struct hci_dev *hdev)
1232{
1233 struct hci_request req;
1234
1235 hci_req_init(&req, hdev);
1236 __hci_req_resume_adv_instances(&req);
1237
1238 return hci_req_run(&req, NULL);
1239}
1240
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001241static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1242{
1243 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1244 status);
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001245 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1246 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1247 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1248 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001249 wake_up(&hdev->suspend_wait_q);
1250 }
Howard Chungbf6a4e32021-01-22 16:36:17 +08001251
1252 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1253 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1254 wake_up(&hdev->suspend_wait_q);
1255 }
1256}
1257
1258static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1259 bool enable)
1260{
1261 struct hci_dev *hdev = req->hdev;
1262
1263 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1264 case HCI_ADV_MONITOR_EXT_MSFT:
1265 msft_req_add_set_filter_enable(req, enable);
1266 break;
1267 default:
1268 return;
1269 }
1270
1271 /* No need to block when enabling since it's on resume path */
1272 if (hdev->suspended && !enable)
1273 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001274}
1275
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001276/* Call with hci_dev_lock */
1277void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1278{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001279 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001280 struct hci_conn *conn;
1281 struct hci_request req;
1282 u8 page_scan;
1283 int disconnect_counter;
1284
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001285 if (next == hdev->suspend_state) {
1286 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1287 goto done;
1288 }
1289
1290 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001291 hci_req_init(&req, hdev);
1292
1293 if (next == BT_SUSPEND_DISCONNECT) {
1294 /* Mark device as suspended */
1295 hdev->suspended = true;
1296
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001297 /* Pause discovery if not already stopped */
1298 old_state = hdev->discovery.state;
1299 if (old_state != DISCOVERY_STOPPED) {
1300 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1301 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1302 queue_work(hdev->req_workqueue, &hdev->discov_update);
1303 }
1304
1305 hdev->discovery_paused = true;
1306 hdev->discovery_old_state = old_state;
1307
Daniel Winkler53274472020-09-15 14:14:27 -07001308 /* Stop directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001309 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1310 if (old_state) {
1311 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1312 cancel_delayed_work(&hdev->discov_off);
1313 queue_delayed_work(hdev->req_workqueue,
1314 &hdev->discov_off, 0);
1315 }
1316
Daniel Winkler53274472020-09-15 14:14:27 -07001317 /* Pause other advertisements */
1318 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001319 __hci_req_pause_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001320
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001321 hdev->advertising_paused = true;
1322 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001323
1324 /* Disable page scan if enabled */
1325 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1326 page_scan = SCAN_DISABLED;
1327 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1328 &page_scan);
1329 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1330 }
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001331
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001332 /* Disable LE passive scan if enabled */
Howard Chung36afe872020-11-26 12:22:22 +08001333 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1334 cancel_interleave_scan(hdev);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301335 hci_req_add_le_scan_disable(&req, false);
Howard Chung36afe872020-11-26 12:22:22 +08001336 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001337
Howard Chungbf6a4e32021-01-22 16:36:17 +08001338 /* Disable advertisement filters */
1339 hci_req_add_set_adv_filter_enable(&req, false);
1340
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001341 /* Prevent disconnects from causing scanning to be re-enabled */
1342 hdev->scanning_paused = true;
1343
1344 /* Run commands before disconnecting */
1345 hci_req_run(&req, suspend_req_complete);
1346
1347 disconnect_counter = 0;
1348 /* Soft disconnect everything (power off) */
1349 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1350 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1351 disconnect_counter++;
1352 }
1353
1354 if (disconnect_counter > 0) {
1355 bt_dev_dbg(hdev,
1356 "Had %d disconnects. Will wait on them",
1357 disconnect_counter);
1358 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1359 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001360 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001361 /* Unpause to take care of updating scanning params */
1362 hdev->scanning_paused = false;
1363 /* Enable event filter for paired devices */
1364 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001365 /* Enable passive scan at lower duty cycle */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001366 __hci_update_background_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001367 /* Pause scan changes again. */
1368 hdev->scanning_paused = true;
1369 hci_req_run(&req, suspend_req_complete);
1370 } else {
1371 hdev->suspended = false;
1372 hdev->scanning_paused = false;
1373
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001374 /* Clear any event filters and restore scan state */
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001375 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedie5b0ad62021-03-03 08:34:04 -08001376 __hci_req_update_scan(&req);
1377
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001378 /* Reset passive/background scanning to normal */
Abhishek Pandit-Subedi295fa2a2020-12-07 16:12:54 -08001379 __hci_update_background_scan(&req);
Howard Chungbf6a4e32021-01-22 16:36:17 +08001380 /* Enable all of the advertisement filters */
1381 hci_req_add_set_adv_filter_enable(&req, true);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001382
Daniel Winkler53274472020-09-15 14:14:27 -07001383 /* Unpause directed advertising */
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001384 hdev->advertising_paused = false;
1385 if (hdev->advertising_old_state) {
1386 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1387 hdev->suspend_tasks);
1388 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1389 queue_work(hdev->req_workqueue,
1390 &hdev->discoverable_update);
1391 hdev->advertising_old_state = 0;
1392 }
1393
Daniel Winkler53274472020-09-15 14:14:27 -07001394 /* Resume other advertisements */
1395 if (hdev->adv_instance_cnt)
Daniel Winkler2943d8e2020-11-06 15:20:19 -08001396 __hci_req_resume_adv_instances(&req);
Daniel Winkler53274472020-09-15 14:14:27 -07001397
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001398 /* Unpause discovery */
1399 hdev->discovery_paused = false;
1400 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1401 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1402 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1403 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1404 queue_work(hdev->req_workqueue, &hdev->discov_update);
1405 }
1406
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001407 hci_req_run(&req, suspend_req_complete);
1408 }
1409
1410 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001411
1412done:
1413 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1414 wake_up(&hdev->suspend_wait_q);
1415}
1416
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001417static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +02001418{
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001419 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001420}
1421
1422void __hci_req_disable_advertising(struct hci_request *req)
1423{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301424 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001425 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001426
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301427 } else {
1428 u8 enable = 0x00;
1429
1430 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1431 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001432}
1433
1434static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1435{
1436 u32 flags;
1437 struct adv_info *adv_instance;
1438
1439 if (instance == 0x00) {
1440 /* Instance 0 always manages the "Tx Power" and "Flags"
1441 * fields
1442 */
1443 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1444
1445 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1446 * corresponds to the "connectable" instance flag.
1447 */
1448 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1449 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1450
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001451 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1452 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1453 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001454 flags |= MGMT_ADV_FLAG_DISCOV;
1455
Johan Hedbergf2252572015-11-18 12:49:20 +02001456 return flags;
1457 }
1458
1459 adv_instance = hci_find_adv_instance(hdev, instance);
1460
1461 /* Return 0 when we got an invalid instance identifier. */
1462 if (!adv_instance)
1463 return 0;
1464
1465 return adv_instance->flags;
1466}
1467
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001468static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1469{
1470 /* If privacy is not enabled don't use RPA */
1471 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1472 return false;
1473
1474 /* If basic privacy mode is enabled use RPA */
1475 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1476 return true;
1477
1478 /* If limited privacy mode is enabled don't use RPA if we're
1479 * both discoverable and bondable.
1480 */
1481 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1482 hci_dev_test_flag(hdev, HCI_BONDABLE))
1483 return false;
1484
1485 /* We're neither bondable nor discoverable in the limited
1486 * privacy mode, therefore use RPA.
1487 */
1488 return true;
1489}
1490
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001491static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1492{
1493 /* If there is no connection we are OK to advertise. */
1494 if (hci_conn_num(hdev, LE_LINK) == 0)
1495 return true;
1496
1497 /* Check le_states if there is any connection in slave role. */
1498 if (hdev->conn_hash.le_num_slave > 0) {
1499 /* Slave connection state and non connectable mode bit 20. */
1500 if (!connectable && !(hdev->le_states[2] & 0x10))
1501 return false;
1502
1503 /* Slave connection state and connectable mode bit 38
1504 * and scannable bit 21.
1505 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001506 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1507 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001508 return false;
1509 }
1510
1511 /* Check le_states if there is any connection in master role. */
1512 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1513 /* Master connection state and non connectable mode bit 18. */
1514 if (!connectable && !(hdev->le_states[2] & 0x02))
1515 return false;
1516
1517 /* Master connection state and connectable mode bit 35 and
1518 * scannable 19.
1519 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001520 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001521 !(hdev->le_states[2] & 0x08)))
1522 return false;
1523 }
1524
1525 return true;
1526}
1527
Johan Hedbergf2252572015-11-18 12:49:20 +02001528void __hci_req_enable_advertising(struct hci_request *req)
1529{
1530 struct hci_dev *hdev = req->hdev;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001531 struct adv_info *adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001532 struct hci_cp_le_set_adv_param cp;
1533 u8 own_addr_type, enable = 0x01;
1534 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301535 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001536 u32 flags;
1537
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001538 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001539 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001540
1541 /* If the "connectable" instance flag was not set, then choose between
1542 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1543 */
1544 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1545 mgmt_get_connectable(hdev);
1546
1547 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001548 return;
1549
1550 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1551 __hci_req_disable_advertising(req);
1552
1553 /* Clear the HCI_LE_ADV bit temporarily so that the
1554 * hci_update_random_address knows that it's safe to go ahead
1555 * and write a new random address. The flag will be set back on
1556 * as soon as the SET_ADV_ENABLE HCI command completes.
1557 */
1558 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1559
Johan Hedbergf2252572015-11-18 12:49:20 +02001560 /* Set require_privacy to true only when non-connectable
1561 * advertising is used. In that case it is fine to use a
1562 * non-resolvable private address.
1563 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001564 if (hci_update_random_address(req, !connectable,
1565 adv_use_rpa(hdev, flags),
1566 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001567 return;
1568
1569 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001570
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001571 if (adv_instance) {
1572 adv_min_interval = adv_instance->min_interval;
1573 adv_max_interval = adv_instance->max_interval;
1574 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301575 adv_min_interval = hdev->le_adv_min_interval;
1576 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001577 }
1578
1579 if (connectable) {
1580 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301581 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001582 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301583 cp.type = LE_ADV_SCAN_IND;
1584 else
1585 cp.type = LE_ADV_NONCONN_IND;
1586
1587 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1588 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1589 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1590 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301591 }
1592 }
1593
1594 cp.min_interval = cpu_to_le16(adv_min_interval);
1595 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001596 cp.own_address_type = own_addr_type;
1597 cp.channel_map = hdev->le_adv_channel_map;
1598
1599 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1600
1601 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1602}
1603
Michał Narajowskif61851f2016-10-19 10:20:27 +02001604u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001605{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001606 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001607 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001608
Michał Narajowskif61851f2016-10-19 10:20:27 +02001609 /* no space left for name (+ NULL + type + len) */
1610 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1611 return ad_len;
1612
1613 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001614 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001615 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001616 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001617 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001618
Michał Narajowskif61851f2016-10-19 10:20:27 +02001619 /* use short name if present */
1620 short_len = strlen(hdev->short_name);
1621 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001622 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001623 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001624
Michał Narajowskif61851f2016-10-19 10:20:27 +02001625 /* use shortened full name if present, we already know that name
1626 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1627 */
1628 if (complete_len) {
1629 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1630
1631 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1632 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1633
1634 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1635 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001636 }
1637
1638 return ad_len;
1639}
1640
Michał Narajowski1b422062016-10-05 12:28:27 +02001641static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1642{
1643 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1644}
1645
Michał Narajowski7c295c42016-09-18 12:50:02 +02001646static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1647{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001648 u8 scan_rsp_len = 0;
1649
1650 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001651 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001652 }
1653
Michał Narajowski1b422062016-10-05 12:28:27 +02001654 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001655}
1656
Johan Hedbergf2252572015-11-18 12:49:20 +02001657static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1658 u8 *ptr)
1659{
1660 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001661 u32 instance_flags;
1662 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001663
1664 adv_instance = hci_find_adv_instance(hdev, instance);
1665 if (!adv_instance)
1666 return 0;
1667
Michał Narajowski7c295c42016-09-18 12:50:02 +02001668 instance_flags = adv_instance->flags;
1669
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001670 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001671 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001672 }
1673
Michał Narajowski1b422062016-10-05 12:28:27 +02001674 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001675 adv_instance->scan_rsp_len);
1676
Michał Narajowski7c295c42016-09-18 12:50:02 +02001677 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001678
1679 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1680 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1681
1682 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001683}
1684
Johan Hedbergcab054a2015-11-30 11:21:45 +02001685void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001686{
1687 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001688 u8 len;
1689
1690 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1691 return;
1692
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301693 if (ext_adv_capable(hdev)) {
1694 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001695
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301696 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001697
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001698 if (instance)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301699 len = create_instance_scan_rsp_data(hdev, instance,
1700 cp.data);
1701 else
Luiz Augusto von Dentza76a0d32020-11-13 16:44:33 -08001702 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001703
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301704 if (hdev->scan_rsp_data_len == len &&
1705 !memcmp(cp.data, hdev->scan_rsp_data, len))
1706 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001707
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301708 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1709 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001710
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001711 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301712 cp.length = len;
1713 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1714 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1715
1716 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1717 &cp);
1718 } else {
1719 struct hci_cp_le_set_scan_rsp_data cp;
1720
1721 memset(&cp, 0, sizeof(cp));
1722
1723 if (instance)
1724 len = create_instance_scan_rsp_data(hdev, instance,
1725 cp.data);
1726 else
1727 len = create_default_scan_rsp_data(hdev, cp.data);
1728
1729 if (hdev->scan_rsp_data_len == len &&
1730 !memcmp(cp.data, hdev->scan_rsp_data, len))
1731 return;
1732
1733 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1734 hdev->scan_rsp_data_len = len;
1735
1736 cp.length = len;
1737
1738 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1739 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001740}
1741
Johan Hedbergf2252572015-11-18 12:49:20 +02001742static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1743{
1744 struct adv_info *adv_instance = NULL;
1745 u8 ad_len = 0, flags = 0;
1746 u32 instance_flags;
1747
1748 /* Return 0 when the current instance identifier is invalid. */
1749 if (instance) {
1750 adv_instance = hci_find_adv_instance(hdev, instance);
1751 if (!adv_instance)
1752 return 0;
1753 }
1754
1755 instance_flags = get_adv_instance_flags(hdev, instance);
1756
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001757 /* If instance already has the flags set skip adding it once
1758 * again.
1759 */
1760 if (adv_instance && eir_get_data(adv_instance->adv_data,
1761 adv_instance->adv_data_len, EIR_FLAGS,
1762 NULL))
1763 goto skip_flags;
1764
Johan Hedbergf2252572015-11-18 12:49:20 +02001765 /* The Add Advertising command allows userspace to set both the general
1766 * and limited discoverable flags.
1767 */
1768 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1769 flags |= LE_AD_GENERAL;
1770
1771 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1772 flags |= LE_AD_LIMITED;
1773
Johan Hedbergf18ba582016-04-06 13:09:05 +03001774 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1775 flags |= LE_AD_NO_BREDR;
1776
Johan Hedbergf2252572015-11-18 12:49:20 +02001777 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1778 /* If a discovery flag wasn't provided, simply use the global
1779 * settings.
1780 */
1781 if (!flags)
1782 flags |= mgmt_get_adv_discov_flags(hdev);
1783
Johan Hedbergf2252572015-11-18 12:49:20 +02001784 /* If flags would still be empty, then there is no need to
1785 * include the "Flags" AD field".
1786 */
1787 if (flags) {
1788 ptr[0] = 0x02;
1789 ptr[1] = EIR_FLAGS;
1790 ptr[2] = flags;
1791
1792 ad_len += 3;
1793 ptr += 3;
1794 }
1795 }
1796
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001797skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001798 if (adv_instance) {
1799 memcpy(ptr, adv_instance->adv_data,
1800 adv_instance->adv_data_len);
1801 ad_len += adv_instance->adv_data_len;
1802 ptr += adv_instance->adv_data_len;
1803 }
1804
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301805 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1806 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001807
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301808 if (ext_adv_capable(hdev)) {
1809 if (adv_instance)
1810 adv_tx_power = adv_instance->tx_power;
1811 else
1812 adv_tx_power = hdev->adv_tx_power;
1813 } else {
1814 adv_tx_power = hdev->adv_tx_power;
1815 }
1816
1817 /* Provide Tx Power only if we can provide a valid value for it */
1818 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1819 ptr[0] = 0x02;
1820 ptr[1] = EIR_TX_POWER;
1821 ptr[2] = (u8)adv_tx_power;
1822
1823 ad_len += 3;
1824 ptr += 3;
1825 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001826 }
1827
1828 return ad_len;
1829}
1830
Johan Hedbergcab054a2015-11-30 11:21:45 +02001831void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001832{
1833 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001834 u8 len;
1835
1836 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1837 return;
1838
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301839 if (ext_adv_capable(hdev)) {
1840 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001841
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301842 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001843
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301844 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001845
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301846 /* There's nothing to do if the data hasn't changed */
1847 if (hdev->adv_data_len == len &&
1848 memcmp(cp.data, hdev->adv_data, len) == 0)
1849 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001850
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301851 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1852 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001853
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301854 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001855 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301856 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1857 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1858
1859 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1860 } else {
1861 struct hci_cp_le_set_adv_data cp;
1862
1863 memset(&cp, 0, sizeof(cp));
1864
1865 len = create_instance_adv_data(hdev, instance, cp.data);
1866
1867 /* There's nothing to do if the data hasn't changed */
1868 if (hdev->adv_data_len == len &&
1869 memcmp(cp.data, hdev->adv_data, len) == 0)
1870 return;
1871
1872 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1873 hdev->adv_data_len = len;
1874
1875 cp.length = len;
1876
1877 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1878 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001879}
1880
Johan Hedbergcab054a2015-11-30 11:21:45 +02001881int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001882{
1883 struct hci_request req;
1884
1885 hci_req_init(&req, hdev);
1886 __hci_req_update_adv_data(&req, instance);
1887
1888 return hci_req_run(&req, NULL);
1889}
1890
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301891static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1892 u16 opcode)
1893{
1894 BT_DBG("%s status %u", hdev->name, status);
1895}
1896
1897void hci_req_disable_address_resolution(struct hci_dev *hdev)
1898{
1899 struct hci_request req;
1900 __u8 enable = 0x00;
1901
1902 if (!use_ll_privacy(hdev) &&
1903 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1904 return;
1905
1906 hci_req_init(&req, hdev);
1907
1908 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1909
1910 hci_req_run(&req, enable_addr_resolution_complete);
1911}
1912
Johan Hedbergf2252572015-11-18 12:49:20 +02001913static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1914{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001915 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001916}
1917
1918void hci_req_reenable_advertising(struct hci_dev *hdev)
1919{
1920 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001921
1922 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001923 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001924 return;
1925
Johan Hedbergf2252572015-11-18 12:49:20 +02001926 hci_req_init(&req, hdev);
1927
Johan Hedbergcab054a2015-11-30 11:21:45 +02001928 if (hdev->cur_adv_instance) {
1929 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1930 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001931 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301932 if (ext_adv_capable(hdev)) {
1933 __hci_req_start_ext_adv(&req, 0x00);
1934 } else {
1935 __hci_req_update_adv_data(&req, 0x00);
1936 __hci_req_update_scan_rsp_data(&req, 0x00);
1937 __hci_req_enable_advertising(&req);
1938 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001939 }
1940
1941 hci_req_run(&req, adv_enable_complete);
1942}
1943
1944static void adv_timeout_expire(struct work_struct *work)
1945{
1946 struct hci_dev *hdev = container_of(work, struct hci_dev,
1947 adv_instance_expire.work);
1948
1949 struct hci_request req;
1950 u8 instance;
1951
Howard Chung22fbcfc2020-11-11 15:02:19 +08001952 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001953
1954 hci_dev_lock(hdev);
1955
1956 hdev->adv_instance_timeout = 0;
1957
Johan Hedbergcab054a2015-11-30 11:21:45 +02001958 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001959 if (instance == 0x00)
1960 goto unlock;
1961
1962 hci_req_init(&req, hdev);
1963
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001964 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001965
1966 if (list_empty(&hdev->adv_instances))
1967 __hci_req_disable_advertising(&req);
1968
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001969 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001970
1971unlock:
1972 hci_dev_unlock(hdev);
1973}
1974
Howard Chungc4f1f402020-11-26 12:22:21 +08001975static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1976 unsigned long opt)
1977{
1978 struct hci_dev *hdev = req->hdev;
1979 int ret = 0;
1980
1981 hci_dev_lock(hdev);
1982
1983 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1984 hci_req_add_le_scan_disable(req, false);
1985 hci_req_add_le_passive_scan(req);
1986
1987 switch (hdev->interleave_scan_state) {
1988 case INTERLEAVE_SCAN_ALLOWLIST:
1989 bt_dev_dbg(hdev, "next state: allowlist");
1990 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1991 break;
1992 case INTERLEAVE_SCAN_NO_FILTER:
1993 bt_dev_dbg(hdev, "next state: no filter");
1994 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1995 break;
1996 case INTERLEAVE_SCAN_NONE:
1997 BT_ERR("unexpected error");
1998 ret = -1;
1999 }
2000
2001 hci_dev_unlock(hdev);
2002
2003 return ret;
2004}
2005
2006static void interleave_scan_work(struct work_struct *work)
2007{
2008 struct hci_dev *hdev = container_of(work, struct hci_dev,
2009 interleave_scan.work);
2010 u8 status;
2011 unsigned long timeout;
2012
2013 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2014 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2015 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2016 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2017 } else {
2018 bt_dev_err(hdev, "unexpected error");
2019 return;
2020 }
2021
2022 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2023 HCI_CMD_TIMEOUT, &status);
2024
2025 /* Don't continue interleaving if it was canceled */
2026 if (is_interleave_scanning(hdev))
2027 queue_delayed_work(hdev->req_workqueue,
2028 &hdev->interleave_scan, timeout);
2029}
2030
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302031int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2032 bool use_rpa, struct adv_info *adv_instance,
2033 u8 *own_addr_type, bdaddr_t *rand_addr)
2034{
2035 int err;
2036
2037 bacpy(rand_addr, BDADDR_ANY);
2038
2039 /* If privacy is enabled use a resolvable private address. If
2040 * current RPA has expired then generate a new one.
2041 */
2042 if (use_rpa) {
2043 int to;
2044
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05302045 /* If Controller supports LL Privacy use own address type is
2046 * 0x03
2047 */
2048 if (use_ll_privacy(hdev))
2049 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2050 else
2051 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302052
2053 if (adv_instance) {
2054 if (!adv_instance->rpa_expired &&
2055 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2056 return 0;
2057
2058 adv_instance->rpa_expired = false;
2059 } else {
2060 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2061 !bacmp(&hdev->random_addr, &hdev->rpa))
2062 return 0;
2063 }
2064
2065 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2066 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01002067 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302068 return err;
2069 }
2070
2071 bacpy(rand_addr, &hdev->rpa);
2072
2073 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2074 if (adv_instance)
2075 queue_delayed_work(hdev->workqueue,
2076 &adv_instance->rpa_expired_cb, to);
2077 else
2078 queue_delayed_work(hdev->workqueue,
2079 &hdev->rpa_expired, to);
2080
2081 return 0;
2082 }
2083
2084 /* In case of required privacy without resolvable private address,
2085 * use an non-resolvable private address. This is useful for
2086 * non-connectable advertising.
2087 */
2088 if (require_privacy) {
2089 bdaddr_t nrpa;
2090
2091 while (true) {
2092 /* The non-resolvable private address is generated
2093 * from random six bytes with the two most significant
2094 * bits cleared.
2095 */
2096 get_random_bytes(&nrpa, 6);
2097 nrpa.b[5] &= 0x3f;
2098
2099 /* The non-resolvable private address shall not be
2100 * equal to the public address.
2101 */
2102 if (bacmp(&hdev->bdaddr, &nrpa))
2103 break;
2104 }
2105
2106 *own_addr_type = ADDR_LE_DEV_RANDOM;
2107 bacpy(rand_addr, &nrpa);
2108
2109 return 0;
2110 }
2111
2112 /* No privacy so use a public address. */
2113 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2114
2115 return 0;
2116}
2117
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302118void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2119{
2120 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2121}
2122
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302123int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302124{
2125 struct hci_cp_le_set_ext_adv_params cp;
2126 struct hci_dev *hdev = req->hdev;
2127 bool connectable;
2128 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302129 bdaddr_t random_addr;
2130 u8 own_addr_type;
2131 int err;
2132 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302133 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302134
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302135 if (instance > 0) {
2136 adv_instance = hci_find_adv_instance(hdev, instance);
2137 if (!adv_instance)
2138 return -EINVAL;
2139 } else {
2140 adv_instance = NULL;
2141 }
2142
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302143 flags = get_adv_instance_flags(hdev, instance);
2144
2145 /* If the "connectable" instance flag was not set, then choose between
2146 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2147 */
2148 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2149 mgmt_get_connectable(hdev);
2150
Colin Ian King75edd1f2018-11-09 13:27:36 +00002151 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302152 return -EPERM;
2153
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302154 /* Set require_privacy to true only when non-connectable
2155 * advertising is used. In that case it is fine to use a
2156 * non-resolvable private address.
2157 */
2158 err = hci_get_random_address(hdev, !connectable,
2159 adv_use_rpa(hdev, flags), adv_instance,
2160 &own_addr_type, &random_addr);
2161 if (err < 0)
2162 return err;
2163
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302164 memset(&cp, 0, sizeof(cp));
2165
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08002166 if (adv_instance) {
2167 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2168 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2169 cp.tx_power = adv_instance->tx_power;
2170 } else {
2171 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2172 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2173 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2174 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302175
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302176 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2177
2178 if (connectable) {
2179 if (secondary_adv)
2180 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2181 else
2182 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Daniel Winklerff02db12021-03-03 11:15:23 -08002183 } else if (adv_instance_is_scannable(hdev, instance) ||
2184 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302185 if (secondary_adv)
2186 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2187 else
2188 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2189 } else {
2190 if (secondary_adv)
2191 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2192 else
2193 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2194 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302195
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302196 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302197 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002198 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302199
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05302200 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2201 cp.primary_phy = HCI_ADV_PHY_1M;
2202 cp.secondary_phy = HCI_ADV_PHY_2M;
2203 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2204 cp.primary_phy = HCI_ADV_PHY_CODED;
2205 cp.secondary_phy = HCI_ADV_PHY_CODED;
2206 } else {
2207 /* In all other cases use 1M */
2208 cp.primary_phy = HCI_ADV_PHY_1M;
2209 cp.secondary_phy = HCI_ADV_PHY_1M;
2210 }
2211
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302212 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2213
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302214 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2215 bacmp(&random_addr, BDADDR_ANY)) {
2216 struct hci_cp_le_set_adv_set_rand_addr cp;
2217
2218 /* Check if random address need to be updated */
2219 if (adv_instance) {
2220 if (!bacmp(&random_addr, &adv_instance->random_addr))
2221 return 0;
2222 } else {
2223 if (!bacmp(&random_addr, &hdev->random_addr))
2224 return 0;
2225 }
2226
2227 memset(&cp, 0, sizeof(cp));
2228
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07002229 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05302230 bacpy(&cp.bdaddr, &random_addr);
2231
2232 hci_req_add(req,
2233 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2234 sizeof(cp), &cp);
2235 }
2236
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302237 return 0;
2238}
2239
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002240int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302241{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002242 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302243 struct hci_cp_le_set_ext_adv_enable *cp;
2244 struct hci_cp_ext_adv_set *adv_set;
2245 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002246 struct adv_info *adv_instance;
2247
2248 if (instance > 0) {
2249 adv_instance = hci_find_adv_instance(hdev, instance);
2250 if (!adv_instance)
2251 return -EINVAL;
2252 } else {
2253 adv_instance = NULL;
2254 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302255
2256 cp = (void *) data;
2257 adv_set = (void *) cp->data;
2258
2259 memset(cp, 0, sizeof(*cp));
2260
2261 cp->enable = 0x01;
2262 cp->num_of_sets = 0x01;
2263
2264 memset(adv_set, 0, sizeof(*adv_set));
2265
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002266 adv_set->handle = instance;
2267
2268 /* Set duration per instance since controller is responsible for
2269 * scheduling it.
2270 */
2271 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03002272 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002273
2274 /* Time = N * 10 ms */
2275 adv_set->duration = cpu_to_le16(duration / 10);
2276 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302277
2278 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2279 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2280 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002281
2282 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302283}
2284
Daniel Winkler37adf702020-07-14 14:16:00 -07002285int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2286{
2287 struct hci_dev *hdev = req->hdev;
2288 struct hci_cp_le_set_ext_adv_enable *cp;
2289 struct hci_cp_ext_adv_set *adv_set;
2290 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2291 u8 req_size;
2292
2293 /* If request specifies an instance that doesn't exist, fail */
2294 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2295 return -EINVAL;
2296
2297 memset(data, 0, sizeof(data));
2298
2299 cp = (void *)data;
2300 adv_set = (void *)cp->data;
2301
2302 /* Instance 0x00 indicates all advertising instances will be disabled */
2303 cp->num_of_sets = !!instance;
2304 cp->enable = 0x00;
2305
2306 adv_set->handle = instance;
2307
2308 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2309 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2310
2311 return 0;
2312}
2313
2314int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2315{
2316 struct hci_dev *hdev = req->hdev;
2317
2318 /* If request specifies an instance that doesn't exist, fail */
2319 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2320 return -EINVAL;
2321
2322 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2323
2324 return 0;
2325}
2326
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302327int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2328{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302329 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002330 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302331 int err;
2332
Daniel Winkler37adf702020-07-14 14:16:00 -07002333 /* If instance isn't pending, the chip knows about it, and it's safe to
2334 * disable
2335 */
2336 if (adv_instance && !adv_instance->pending)
2337 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302338
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302339 err = __hci_req_setup_ext_adv_instance(req, instance);
2340 if (err < 0)
2341 return err;
2342
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302343 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002344 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302345
2346 return 0;
2347}
2348
Johan Hedbergf2252572015-11-18 12:49:20 +02002349int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2350 bool force)
2351{
2352 struct hci_dev *hdev = req->hdev;
2353 struct adv_info *adv_instance = NULL;
2354 u16 timeout;
2355
2356 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002357 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002358 return -EPERM;
2359
2360 if (hdev->adv_instance_timeout)
2361 return -EBUSY;
2362
2363 adv_instance = hci_find_adv_instance(hdev, instance);
2364 if (!adv_instance)
2365 return -ENOENT;
2366
2367 /* A zero timeout means unlimited advertising. As long as there is
2368 * only one instance, duration should be ignored. We still set a timeout
2369 * in case further instances are being added later on.
2370 *
2371 * If the remaining lifetime of the instance is more than the duration
2372 * then the timeout corresponds to the duration, otherwise it will be
2373 * reduced to the remaining instance lifetime.
2374 */
2375 if (adv_instance->timeout == 0 ||
2376 adv_instance->duration <= adv_instance->remaining_time)
2377 timeout = adv_instance->duration;
2378 else
2379 timeout = adv_instance->remaining_time;
2380
2381 /* The remaining time is being reduced unless the instance is being
2382 * advertised without time limit.
2383 */
2384 if (adv_instance->timeout)
2385 adv_instance->remaining_time =
2386 adv_instance->remaining_time - timeout;
2387
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002388 /* Only use work for scheduling instances with legacy advertising */
2389 if (!ext_adv_capable(hdev)) {
2390 hdev->adv_instance_timeout = timeout;
2391 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002392 &hdev->adv_instance_expire,
2393 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002394 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002395
2396 /* If we're just re-scheduling the same instance again then do not
2397 * execute any HCI commands. This happens when a single instance is
2398 * being advertised.
2399 */
2400 if (!force && hdev->cur_adv_instance == instance &&
2401 hci_dev_test_flag(hdev, HCI_LE_ADV))
2402 return 0;
2403
2404 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302405 if (ext_adv_capable(hdev)) {
2406 __hci_req_start_ext_adv(req, instance);
2407 } else {
2408 __hci_req_update_adv_data(req, instance);
2409 __hci_req_update_scan_rsp_data(req, instance);
2410 __hci_req_enable_advertising(req);
2411 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002412
2413 return 0;
2414}
2415
Johan Hedbergf2252572015-11-18 12:49:20 +02002416/* For a single instance:
2417 * - force == true: The instance will be removed even when its remaining
2418 * lifetime is not zero.
2419 * - force == false: the instance will be deactivated but kept stored unless
2420 * the remaining lifetime is zero.
2421 *
2422 * For instance == 0x00:
2423 * - force == true: All instances will be removed regardless of their timeout
2424 * setting.
2425 * - force == false: Only instances that have a timeout will be removed.
2426 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002427void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2428 struct hci_request *req, u8 instance,
2429 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002430{
2431 struct adv_info *adv_instance, *n, *next_instance = NULL;
2432 int err;
2433 u8 rem_inst;
2434
2435 /* Cancel any timeout concerning the removed instance(s). */
2436 if (!instance || hdev->cur_adv_instance == instance)
2437 cancel_adv_timeout(hdev);
2438
2439 /* Get the next instance to advertise BEFORE we remove
2440 * the current one. This can be the same instance again
2441 * if there is only one instance.
2442 */
2443 if (instance && hdev->cur_adv_instance == instance)
2444 next_instance = hci_get_next_instance(hdev, instance);
2445
2446 if (instance == 0x00) {
2447 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2448 list) {
2449 if (!(force || adv_instance->timeout))
2450 continue;
2451
2452 rem_inst = adv_instance->instance;
2453 err = hci_remove_adv_instance(hdev, rem_inst);
2454 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002455 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002456 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002457 } else {
2458 adv_instance = hci_find_adv_instance(hdev, instance);
2459
2460 if (force || (adv_instance && adv_instance->timeout &&
2461 !adv_instance->remaining_time)) {
2462 /* Don't advertise a removed instance. */
2463 if (next_instance &&
2464 next_instance->instance == instance)
2465 next_instance = NULL;
2466
2467 err = hci_remove_adv_instance(hdev, instance);
2468 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002469 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002470 }
2471 }
2472
Johan Hedbergf2252572015-11-18 12:49:20 +02002473 if (!req || !hdev_is_powered(hdev) ||
2474 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2475 return;
2476
Daniel Winkler37adf702020-07-14 14:16:00 -07002477 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002478 __hci_req_schedule_adv_instance(req, next_instance->instance,
2479 false);
2480}
2481
Johan Hedberg0857dd32014-12-19 13:40:20 +02002482static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2483{
2484 struct hci_dev *hdev = req->hdev;
2485
2486 /* If we're advertising or initiating an LE connection we can't
2487 * go ahead and change the random address at this time. This is
2488 * because the eventual initiator address used for the
2489 * subsequently created connection will be undefined (some
2490 * controllers use the new address and others the one we had
2491 * when the operation started).
2492 *
2493 * In this kind of scenario skip the update and let the random
2494 * address be updated at the next cycle.
2495 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002496 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002497 hci_lookup_le_connect(hdev)) {
Howard Chung22fbcfc2020-11-11 15:02:19 +08002498 bt_dev_dbg(hdev, "Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002499 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002500 return;
2501 }
2502
2503 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2504}
2505
2506int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002507 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002508{
2509 struct hci_dev *hdev = req->hdev;
2510 int err;
2511
2512 /* If privacy is enabled use a resolvable private address. If
2513 * current RPA has expired or there is something else than
2514 * the current RPA in use, then generate a new one.
2515 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002516 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002517 int to;
2518
Sathish Narasimmand03c7592020-07-23 18:09:00 +05302519 /* If Controller supports LL Privacy use own address type is
2520 * 0x03
2521 */
2522 if (use_ll_privacy(hdev))
2523 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2524 else
2525 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02002526
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002527 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002528 !bacmp(&hdev->random_addr, &hdev->rpa))
2529 return 0;
2530
2531 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2532 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002533 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002534 return err;
2535 }
2536
2537 set_random_addr(req, &hdev->rpa);
2538
2539 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2540 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2541
2542 return 0;
2543 }
2544
2545 /* In case of required privacy without resolvable private address,
2546 * use an non-resolvable private address. This is useful for active
2547 * scanning and non-connectable advertising.
2548 */
2549 if (require_privacy) {
2550 bdaddr_t nrpa;
2551
2552 while (true) {
2553 /* The non-resolvable private address is generated
2554 * from random six bytes with the two most significant
2555 * bits cleared.
2556 */
2557 get_random_bytes(&nrpa, 6);
2558 nrpa.b[5] &= 0x3f;
2559
2560 /* The non-resolvable private address shall not be
2561 * equal to the public address.
2562 */
2563 if (bacmp(&hdev->bdaddr, &nrpa))
2564 break;
2565 }
2566
2567 *own_addr_type = ADDR_LE_DEV_RANDOM;
2568 set_random_addr(req, &nrpa);
2569 return 0;
2570 }
2571
2572 /* If forcing static address is in use or there is no public
2573 * address use the static address as random address (but skip
2574 * the HCI command if the current random address is already the
2575 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002576 *
2577 * In case BR/EDR has been disabled on a dual-mode controller
2578 * and a static address has been configured, then use that
2579 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002580 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002581 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002582 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002583 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002584 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002585 *own_addr_type = ADDR_LE_DEV_RANDOM;
2586 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2587 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2588 &hdev->static_addr);
2589 return 0;
2590 }
2591
2592 /* Neither privacy nor static address is being used so use a
2593 * public address.
2594 */
2595 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2596
2597 return 0;
2598}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002599
Johan Hedberg405a2612014-12-19 23:18:22 +02002600static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2601{
2602 struct bdaddr_list *b;
2603
2604 list_for_each_entry(b, &hdev->whitelist, list) {
2605 struct hci_conn *conn;
2606
2607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2608 if (!conn)
2609 return true;
2610
2611 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2612 return true;
2613 }
2614
2615 return false;
2616}
2617
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002618void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002619{
2620 struct hci_dev *hdev = req->hdev;
2621 u8 scan;
2622
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002623 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002624 return;
2625
2626 if (!hdev_is_powered(hdev))
2627 return;
2628
2629 if (mgmt_powering_down(hdev))
2630 return;
2631
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002632 if (hdev->scanning_paused)
2633 return;
2634
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002635 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002636 disconnected_whitelist_entries(hdev))
2637 scan = SCAN_PAGE;
2638 else
2639 scan = SCAN_DISABLED;
2640
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002641 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002642 scan |= SCAN_INQUIRY;
2643
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002644 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2645 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2646 return;
2647
Johan Hedberg405a2612014-12-19 23:18:22 +02002648 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2649}
2650
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002651static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002652{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002653 hci_dev_lock(req->hdev);
2654 __hci_req_update_scan(req);
2655 hci_dev_unlock(req->hdev);
2656 return 0;
2657}
Johan Hedberg405a2612014-12-19 23:18:22 +02002658
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002659static void scan_update_work(struct work_struct *work)
2660{
2661 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2662
2663 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002664}
2665
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002666static int connectable_update(struct hci_request *req, unsigned long opt)
2667{
2668 struct hci_dev *hdev = req->hdev;
2669
2670 hci_dev_lock(hdev);
2671
2672 __hci_req_update_scan(req);
2673
2674 /* If BR/EDR is not enabled and we disable advertising as a
2675 * by-product of disabling connectable, we need to update the
2676 * advertising flags.
2677 */
2678 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002679 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002680
2681 /* Update the advertising parameters if necessary */
2682 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302683 !list_empty(&hdev->adv_instances)) {
2684 if (ext_adv_capable(hdev))
2685 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2686 else
2687 __hci_req_enable_advertising(req);
2688 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002689
2690 __hci_update_background_scan(req);
2691
2692 hci_dev_unlock(hdev);
2693
2694 return 0;
2695}
2696
2697static void connectable_update_work(struct work_struct *work)
2698{
2699 struct hci_dev *hdev = container_of(work, struct hci_dev,
2700 connectable_update);
2701 u8 status;
2702
2703 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2704 mgmt_set_connectable_complete(hdev, status);
2705}
2706
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002707static u8 get_service_classes(struct hci_dev *hdev)
2708{
2709 struct bt_uuid *uuid;
2710 u8 val = 0;
2711
2712 list_for_each_entry(uuid, &hdev->uuids, list)
2713 val |= uuid->svc_hint;
2714
2715 return val;
2716}
2717
2718void __hci_req_update_class(struct hci_request *req)
2719{
2720 struct hci_dev *hdev = req->hdev;
2721 u8 cod[3];
2722
Howard Chung22fbcfc2020-11-11 15:02:19 +08002723 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002724
2725 if (!hdev_is_powered(hdev))
2726 return;
2727
2728 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2729 return;
2730
2731 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2732 return;
2733
2734 cod[0] = hdev->minor_class;
2735 cod[1] = hdev->major_class;
2736 cod[2] = get_service_classes(hdev);
2737
2738 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2739 cod[1] |= 0x20;
2740
2741 if (memcmp(cod, hdev->dev_class, 3) == 0)
2742 return;
2743
2744 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2745}
2746
Johan Hedbergaed1a882015-11-22 17:24:44 +03002747static void write_iac(struct hci_request *req)
2748{
2749 struct hci_dev *hdev = req->hdev;
2750 struct hci_cp_write_current_iac_lap cp;
2751
2752 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2753 return;
2754
2755 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2756 /* Limited discoverable mode */
2757 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2758 cp.iac_lap[0] = 0x00; /* LIAC */
2759 cp.iac_lap[1] = 0x8b;
2760 cp.iac_lap[2] = 0x9e;
2761 cp.iac_lap[3] = 0x33; /* GIAC */
2762 cp.iac_lap[4] = 0x8b;
2763 cp.iac_lap[5] = 0x9e;
2764 } else {
2765 /* General discoverable mode */
2766 cp.num_iac = 1;
2767 cp.iac_lap[0] = 0x33; /* GIAC */
2768 cp.iac_lap[1] = 0x8b;
2769 cp.iac_lap[2] = 0x9e;
2770 }
2771
2772 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2773 (cp.num_iac * 3) + 1, &cp);
2774}
2775
2776static int discoverable_update(struct hci_request *req, unsigned long opt)
2777{
2778 struct hci_dev *hdev = req->hdev;
2779
2780 hci_dev_lock(hdev);
2781
2782 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2783 write_iac(req);
2784 __hci_req_update_scan(req);
2785 __hci_req_update_class(req);
2786 }
2787
2788 /* Advertising instances don't use the global discoverable setting, so
2789 * only update AD if advertising was enabled using Set Advertising.
2790 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002791 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002792 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002793
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002794 /* Discoverable mode affects the local advertising
2795 * address in limited privacy mode.
2796 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302797 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2798 if (ext_adv_capable(hdev))
2799 __hci_req_start_ext_adv(req, 0x00);
2800 else
2801 __hci_req_enable_advertising(req);
2802 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002803 }
2804
Johan Hedbergaed1a882015-11-22 17:24:44 +03002805 hci_dev_unlock(hdev);
2806
2807 return 0;
2808}
2809
2810static void discoverable_update_work(struct work_struct *work)
2811{
2812 struct hci_dev *hdev = container_of(work, struct hci_dev,
2813 discoverable_update);
2814 u8 status;
2815
2816 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2817 mgmt_set_discoverable_complete(hdev, status);
2818}
2819
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002820void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2821 u8 reason)
2822{
2823 switch (conn->state) {
2824 case BT_CONNECTED:
2825 case BT_CONFIG:
2826 if (conn->type == AMP_LINK) {
2827 struct hci_cp_disconn_phy_link cp;
2828
2829 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2830 cp.reason = reason;
2831 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2832 &cp);
2833 } else {
2834 struct hci_cp_disconnect dc;
2835
2836 dc.handle = cpu_to_le16(conn->handle);
2837 dc.reason = reason;
2838 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2839 }
2840
2841 conn->state = BT_DISCONN;
2842
2843 break;
2844 case BT_CONNECT:
2845 if (conn->type == LE_LINK) {
2846 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2847 break;
2848 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2849 0, NULL);
2850 } else if (conn->type == ACL_LINK) {
2851 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2852 break;
2853 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2854 6, &conn->dst);
2855 }
2856 break;
2857 case BT_CONNECT2:
2858 if (conn->type == ACL_LINK) {
2859 struct hci_cp_reject_conn_req rej;
2860
2861 bacpy(&rej.bdaddr, &conn->dst);
2862 rej.reason = reason;
2863
2864 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2865 sizeof(rej), &rej);
2866 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2867 struct hci_cp_reject_sync_conn_req rej;
2868
2869 bacpy(&rej.bdaddr, &conn->dst);
2870
2871 /* SCO rejection has its own limited set of
2872 * allowed error values (0x0D-0x0F) which isn't
2873 * compatible with most values passed to this
2874 * function. To be safe hard-code one of the
2875 * values that's suitable for SCO.
2876 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002877 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002878
2879 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2880 sizeof(rej), &rej);
2881 }
2882 break;
2883 default:
2884 conn->state = BT_CLOSED;
2885 break;
2886 }
2887}
2888
2889static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2890{
2891 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002892 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002893}
2894
2895int hci_abort_conn(struct hci_conn *conn, u8 reason)
2896{
2897 struct hci_request req;
2898 int err;
2899
2900 hci_req_init(&req, conn->hdev);
2901
2902 __hci_abort_conn(&req, conn, reason);
2903
2904 err = hci_req_run(&req, abort_conn_complete);
2905 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002906 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002907 return err;
2908 }
2909
2910 return 0;
2911}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002912
Johan Hedberga1d01db2015-11-11 08:11:25 +02002913static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002914{
2915 hci_dev_lock(req->hdev);
2916 __hci_update_background_scan(req);
2917 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002918 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002919}
2920
2921static void bg_scan_update(struct work_struct *work)
2922{
2923 struct hci_dev *hdev = container_of(work, struct hci_dev,
2924 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002925 struct hci_conn *conn;
2926 u8 status;
2927 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002928
Johan Hedberg84235d22015-11-11 08:11:20 +02002929 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2930 if (!err)
2931 return;
2932
2933 hci_dev_lock(hdev);
2934
2935 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2936 if (conn)
2937 hci_le_conn_failed(conn, status);
2938
2939 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002940}
2941
Johan Hedberga1d01db2015-11-11 08:11:25 +02002942static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002943{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302944 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002945 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002946}
2947
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002948static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2949{
2950 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002951 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2952 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002953 struct hci_cp_inquiry cp;
2954
Howard Chung22fbcfc2020-11-11 15:02:19 +08002955 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002956
2957 hci_dev_lock(req->hdev);
2958 hci_inquiry_cache_flush(req->hdev);
2959 hci_dev_unlock(req->hdev);
2960
2961 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002962
2963 if (req->hdev->discovery.limited)
2964 memcpy(&cp.lap, liac, sizeof(cp.lap));
2965 else
2966 memcpy(&cp.lap, giac, sizeof(cp.lap));
2967
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002968 cp.length = length;
2969
2970 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2971
2972 return 0;
2973}
2974
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002975static void le_scan_disable_work(struct work_struct *work)
2976{
2977 struct hci_dev *hdev = container_of(work, struct hci_dev,
2978 le_scan_disable.work);
2979 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002980
Howard Chung22fbcfc2020-11-11 15:02:19 +08002981 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002982
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002983 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002984 return;
2985
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002986 cancel_delayed_work(&hdev->le_scan_restart);
2987
2988 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2989 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002990 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2991 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002992 return;
2993 }
2994
2995 hdev->discovery.scan_start = 0;
2996
2997 /* If we were running LE only scan, change discovery state. If
2998 * we were running both LE and BR/EDR inquiry simultaneously,
2999 * and BR/EDR inquiry is already finished, stop discovery,
3000 * otherwise BR/EDR inquiry will stop discovery when finished.
3001 * If we will resolve remote device name, do not change
3002 * discovery state.
3003 */
3004
3005 if (hdev->discovery.type == DISCOV_TYPE_LE)
3006 goto discov_stopped;
3007
3008 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3009 return;
3010
3011 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3012 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3013 hdev->discovery.state != DISCOVERY_RESOLVING)
3014 goto discov_stopped;
3015
3016 return;
3017 }
3018
3019 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3020 HCI_CMD_TIMEOUT, &status);
3021 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003022 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02003023 goto discov_stopped;
3024 }
3025
3026 return;
3027
3028discov_stopped:
3029 hci_dev_lock(hdev);
3030 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3031 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003032}
3033
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003034static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003035{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003036 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003037
3038 /* If controller is not scanning we are done. */
3039 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3040 return 0;
3041
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07003042 if (hdev->scanning_paused) {
3043 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3044 return 0;
3045 }
3046
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303047 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003048
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05303049 if (use_ext_scan(hdev)) {
3050 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3051
3052 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3053 ext_enable_cp.enable = LE_SCAN_ENABLE;
3054 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3055
3056 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3057 sizeof(ext_enable_cp), &ext_enable_cp);
3058 } else {
3059 struct hci_cp_le_set_scan_enable cp;
3060
3061 memset(&cp, 0, sizeof(cp));
3062 cp.enable = LE_SCAN_ENABLE;
3063 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3064 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3065 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003066
3067 return 0;
3068}
3069
3070static void le_scan_restart_work(struct work_struct *work)
3071{
3072 struct hci_dev *hdev = container_of(work, struct hci_dev,
3073 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003074 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003075 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003076
Howard Chung22fbcfc2020-11-11 15:02:19 +08003077 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003078
Johan Hedberg3dfe5902015-11-11 12:24:23 +02003079 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003080 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003081 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3082 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003083 return;
3084 }
3085
3086 hci_dev_lock(hdev);
3087
3088 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3089 !hdev->discovery.scan_start)
3090 goto unlock;
3091
3092 /* When the scan was started, hdev->le_scan_disable has been queued
3093 * after duration from scan_start. During scan restart this job
3094 * has been canceled, and we need to queue it again after proper
3095 * timeout, to make sure that scan does not run indefinitely.
3096 */
3097 duration = hdev->discovery.scan_duration;
3098 scan_start = hdev->discovery.scan_start;
3099 now = jiffies;
3100 if (now - scan_start <= duration) {
3101 int elapsed;
3102
3103 if (now >= scan_start)
3104 elapsed = now - scan_start;
3105 else
3106 elapsed = ULONG_MAX - scan_start + now;
3107
3108 timeout = duration - elapsed;
3109 } else {
3110 timeout = 0;
3111 }
3112
3113 queue_delayed_work(hdev->req_workqueue,
3114 &hdev->le_scan_disable, timeout);
3115
3116unlock:
3117 hci_dev_unlock(hdev);
3118}
3119
Johan Hedberge68f0722015-11-11 08:30:30 +02003120static int active_scan(struct hci_request *req, unsigned long opt)
3121{
3122 uint16_t interval = opt;
3123 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02003124 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02003125 /* White list is not used for discovery */
3126 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303127 /* Discovery doesn't require controller address resolution */
3128 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02003129 int err;
3130
Howard Chung22fbcfc2020-11-11 15:02:19 +08003131 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02003132
Johan Hedberge68f0722015-11-11 08:30:30 +02003133 /* If controller is scanning, it means the background scanning is
3134 * running. Thus, we should temporarily stop it in order to set the
3135 * discovery scanning parameters.
3136 */
Howard Chung422bb172020-11-26 12:22:23 +08003137 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303138 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08003139 cancel_interleave_scan(hdev);
3140 }
Johan Hedberge68f0722015-11-11 08:30:30 +02003141
3142 /* All active scans will be done with either a resolvable private
3143 * address (when privacy feature has been enabled) or non-resolvable
3144 * private address.
3145 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02003146 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3147 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02003148 if (err < 0)
3149 own_addr_type = ADDR_LE_DEV_PUBLIC;
3150
Alain Michaudd4edda02020-06-29 17:04:15 +00003151 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3152 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05303153 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02003154 return 0;
3155}
3156
3157static int interleaved_discov(struct hci_request *req, unsigned long opt)
3158{
3159 int err;
3160
Howard Chung22fbcfc2020-11-11 15:02:19 +08003161 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02003162
3163 err = active_scan(req, opt);
3164 if (err)
3165 return err;
3166
Johan Hedberg7df26b52015-11-11 12:24:21 +02003167 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02003168}
3169
3170static void start_discovery(struct hci_dev *hdev, u8 *status)
3171{
3172 unsigned long timeout;
3173
Howard Chung22fbcfc2020-11-11 15:02:19 +08003174 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02003175
3176 switch (hdev->discovery.type) {
3177 case DISCOV_TYPE_BREDR:
3178 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02003179 hci_req_sync(hdev, bredr_inquiry,
3180 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003181 status);
3182 return;
3183 case DISCOV_TYPE_INTERLEAVED:
3184 /* When running simultaneous discovery, the LE scanning time
3185 * should occupy the whole discovery time sine BR/EDR inquiry
3186 * and LE scanning are scheduled by the controller.
3187 *
3188 * For interleaving discovery in comparison, BR/EDR inquiry
3189 * and LE scanning are done sequentially with separate
3190 * timeouts.
3191 */
3192 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3193 &hdev->quirks)) {
3194 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3195 /* During simultaneous discovery, we double LE scan
3196 * interval. We must leave some time for the controller
3197 * to do BR/EDR inquiry.
3198 */
3199 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00003200 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02003201 status);
3202 break;
3203 }
3204
3205 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00003206 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003207 HCI_CMD_TIMEOUT, status);
3208 break;
3209 case DISCOV_TYPE_LE:
3210 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00003211 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02003212 HCI_CMD_TIMEOUT, status);
3213 break;
3214 default:
3215 *status = HCI_ERROR_UNSPECIFIED;
3216 return;
3217 }
3218
3219 if (*status)
3220 return;
3221
Howard Chung22fbcfc2020-11-11 15:02:19 +08003222 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02003223
3224 /* When service discovery is used and the controller has a
3225 * strict duplicate filter, it is important to remember the
3226 * start and duration of the scan. This is required for
3227 * restarting scanning during the discovery phase.
3228 */
3229 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3230 hdev->discovery.result_filtering) {
3231 hdev->discovery.scan_start = jiffies;
3232 hdev->discovery.scan_duration = timeout;
3233 }
3234
3235 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3236 timeout);
3237}
3238
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003239bool hci_req_stop_discovery(struct hci_request *req)
3240{
3241 struct hci_dev *hdev = req->hdev;
3242 struct discovery_state *d = &hdev->discovery;
3243 struct hci_cp_remote_name_req_cancel cp;
3244 struct inquiry_entry *e;
3245 bool ret = false;
3246
Howard Chung22fbcfc2020-11-11 15:02:19 +08003247 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003248
3249 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3250 if (test_bit(HCI_INQUIRY, &hdev->flags))
3251 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3252
3253 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3254 cancel_delayed_work(&hdev->le_scan_disable);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303255 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003256 }
3257
3258 ret = true;
3259 } else {
3260 /* Passive scanning */
3261 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05303262 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003263 ret = true;
3264 }
3265 }
3266
3267 /* No further actions needed for LE-only discovery */
3268 if (d->type == DISCOV_TYPE_LE)
3269 return ret;
3270
3271 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3272 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3273 NAME_PENDING);
3274 if (!e)
3275 return ret;
3276
3277 bacpy(&cp.bdaddr, &e->data.bdaddr);
3278 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3279 &cp);
3280 ret = true;
3281 }
3282
3283 return ret;
3284}
3285
3286static int stop_discovery(struct hci_request *req, unsigned long opt)
3287{
3288 hci_dev_lock(req->hdev);
3289 hci_req_stop_discovery(req);
3290 hci_dev_unlock(req->hdev);
3291
3292 return 0;
3293}
3294
Johan Hedberge68f0722015-11-11 08:30:30 +02003295static void discov_update(struct work_struct *work)
3296{
3297 struct hci_dev *hdev = container_of(work, struct hci_dev,
3298 discov_update);
3299 u8 status = 0;
3300
3301 switch (hdev->discovery.state) {
3302 case DISCOVERY_STARTING:
3303 start_discovery(hdev, &status);
3304 mgmt_start_discovery_complete(hdev, status);
3305 if (status)
3306 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3307 else
3308 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3309 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003310 case DISCOVERY_STOPPING:
3311 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3312 mgmt_stop_discovery_complete(hdev, status);
3313 if (!status)
3314 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3315 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003316 case DISCOVERY_STOPPED:
3317 default:
3318 return;
3319 }
3320}
3321
Johan Hedbergc366f552015-11-23 15:43:06 +02003322static void discov_off(struct work_struct *work)
3323{
3324 struct hci_dev *hdev = container_of(work, struct hci_dev,
3325 discov_off.work);
3326
Howard Chung22fbcfc2020-11-11 15:02:19 +08003327 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02003328
3329 hci_dev_lock(hdev);
3330
3331 /* When discoverable timeout triggers, then just make sure
3332 * the limited discoverable flag is cleared. Even in the case
3333 * of a timeout triggered from general discoverable, it is
3334 * safe to unconditionally clear the flag.
3335 */
3336 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3337 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3338 hdev->discov_timeout = 0;
3339
3340 hci_dev_unlock(hdev);
3341
3342 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3343 mgmt_new_settings(hdev);
3344}
3345
Johan Hedberg2ff13892015-11-25 16:15:44 +02003346static int powered_update_hci(struct hci_request *req, unsigned long opt)
3347{
3348 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003349 u8 link_sec;
3350
3351 hci_dev_lock(hdev);
3352
3353 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3354 !lmp_host_ssp_capable(hdev)) {
3355 u8 mode = 0x01;
3356
3357 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3358
3359 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3360 u8 support = 0x01;
3361
3362 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3363 sizeof(support), &support);
3364 }
3365 }
3366
3367 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3368 lmp_bredr_capable(hdev)) {
3369 struct hci_cp_write_le_host_supported cp;
3370
3371 cp.le = 0x01;
3372 cp.simul = 0x00;
3373
3374 /* Check first if we already have the right
3375 * host state (host features set)
3376 */
3377 if (cp.le != lmp_host_le_capable(hdev) ||
3378 cp.simul != lmp_host_le_br_capable(hdev))
3379 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3380 sizeof(cp), &cp);
3381 }
3382
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003383 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003384 /* Make sure the controller has a good default for
3385 * advertising data. This also applies to the case
3386 * where BR/EDR was toggled during the AUTO_OFF phase.
3387 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003388 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3389 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303390 int err;
3391
3392 if (ext_adv_capable(hdev)) {
3393 err = __hci_req_setup_ext_adv_instance(req,
3394 0x00);
3395 if (!err)
3396 __hci_req_update_scan_rsp_data(req,
3397 0x00);
3398 } else {
3399 err = 0;
3400 __hci_req_update_adv_data(req, 0x00);
3401 __hci_req_update_scan_rsp_data(req, 0x00);
3402 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003403
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303404 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303405 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303406 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303407 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003408 __hci_req_enable_ext_advertising(req,
3409 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303410 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003411 } else if (!list_empty(&hdev->adv_instances)) {
3412 struct adv_info *adv_instance;
3413
Johan Hedberg2ff13892015-11-25 16:15:44 +02003414 adv_instance = list_first_entry(&hdev->adv_instances,
3415 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003416 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003417 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003418 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003419 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003420 }
3421
3422 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3423 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3424 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3425 sizeof(link_sec), &link_sec);
3426
3427 if (lmp_bredr_capable(hdev)) {
3428 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3429 __hci_req_write_fast_connectable(req, true);
3430 else
3431 __hci_req_write_fast_connectable(req, false);
3432 __hci_req_update_scan(req);
3433 __hci_req_update_class(req);
3434 __hci_req_update_name(req);
3435 __hci_req_update_eir(req);
3436 }
3437
3438 hci_dev_unlock(hdev);
3439 return 0;
3440}
3441
3442int __hci_req_hci_power_on(struct hci_dev *hdev)
3443{
3444 /* Register the available SMP channels (BR/EDR and LE) only when
3445 * successfully powering on the controller. This late
3446 * registration is required so that LE SMP can clearly decide if
3447 * the public address or static address is used.
3448 */
3449 smp_register(hdev);
3450
3451 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3452 NULL);
3453}
3454
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003455void hci_request_setup(struct hci_dev *hdev)
3456{
Johan Hedberge68f0722015-11-11 08:30:30 +02003457 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003458 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003459 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003460 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003461 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003462 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003463 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3464 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003465 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08003466 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003467}
3468
3469void hci_request_cancel_all(struct hci_dev *hdev)
3470{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003471 hci_req_sync_cancel(hdev, ENODEV);
3472
Johan Hedberge68f0722015-11-11 08:30:30 +02003473 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003474 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003475 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003476 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003477 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003478 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003479 cancel_delayed_work_sync(&hdev->le_scan_disable);
3480 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003481
3482 if (hdev->adv_instance_timeout) {
3483 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3484 hdev->adv_instance_timeout = 0;
3485 }
Howard Chungc4f1f402020-11-26 12:22:21 +08003486
3487 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003488}