blob: 85de1f35661086a524739a5acfb00af7991cbdcd [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200368 }
369
Alain Michaud10873f92020-06-11 02:01:56 +0000370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
Johan Hedberg196a5e92015-11-22 18:55:44 +0200381/* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
384 *
385 * This function requires the caller holds hdev->lock.
386 */
387static void __hci_update_background_scan(struct hci_request *req)
388{
389 struct hci_dev *hdev = req->hdev;
390
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 return;
398
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 return;
402
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
405 return;
406
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
409 *
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
413 */
414 hci_discovery_filter_clear(hdev);
415
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200416 BT_DBG("%s ADV monitoring is %s", hdev->name,
417 hci_is_adv_monitoring(hdev) ? "on" : "off");
418
Johan Hedberg196a5e92015-11-22 18:55:44 +0200419 if (list_empty(&hdev->pend_le_conns) &&
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200420 list_empty(&hdev->pend_le_reports) &&
421 !hci_is_adv_monitoring(hdev)) {
Johan Hedberg196a5e92015-11-22 18:55:44 +0200422 /* If there is no pending LE connections or devices
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200423 * to be scanned for or no ADV monitors, we should stop the
424 * background scanning.
Johan Hedberg196a5e92015-11-22 18:55:44 +0200425 */
426
427 /* If controller is not scanning we are done. */
428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 return;
430
431 hci_req_add_le_scan_disable(req);
432
433 BT_DBG("%s stopping background scanning", hdev->name);
434 } else {
435 /* If there is at least one pending LE connection, we should
436 * keep the background scan running.
437 */
438
439 /* If controller is connecting, we should not start scanning
440 * since some controllers are not able to scan and connect at
441 * the same time.
442 */
443 if (hci_lookup_le_connect(hdev))
444 return;
445
446 /* If controller is currently scanning, we stop it to ensure we
447 * don't miss any advertising (due to duplicates filter).
448 */
449 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450 hci_req_add_le_scan_disable(req);
451
452 hci_req_add_le_passive_scan(req);
453
454 BT_DBG("%s starting background scanning", hdev->name);
455 }
456}
457
Johan Hedberg00cf5042015-11-25 16:15:41 +0200458void __hci_req_update_name(struct hci_request *req)
459{
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_local_name cp;
462
463 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464
465 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466}
467
Johan Hedbergb1a89172015-11-25 16:15:42 +0200468#define PNP_INFO_SVCLASS_ID 0x1200
469
470static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471{
472 u8 *ptr = data, *uuids_start = NULL;
473 struct bt_uuid *uuid;
474
475 if (len < 4)
476 return ptr;
477
478 list_for_each_entry(uuid, &hdev->uuids, list) {
479 u16 uuid16;
480
481 if (uuid->size != 16)
482 continue;
483
484 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 if (uuid16 < 0x1100)
486 continue;
487
488 if (uuid16 == PNP_INFO_SVCLASS_ID)
489 continue;
490
491 if (!uuids_start) {
492 uuids_start = ptr;
493 uuids_start[0] = 1;
494 uuids_start[1] = EIR_UUID16_ALL;
495 ptr += 2;
496 }
497
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u16) > len) {
500 uuids_start[1] = EIR_UUID16_SOME;
501 break;
502 }
503
504 *ptr++ = (uuid16 & 0x00ff);
505 *ptr++ = (uuid16 & 0xff00) >> 8;
506 uuids_start[0] += sizeof(uuid16);
507 }
508
509 return ptr;
510}
511
512static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513{
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
516
517 if (len < 6)
518 return ptr;
519
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 32)
522 continue;
523
524 if (!uuids_start) {
525 uuids_start = ptr;
526 uuids_start[0] = 1;
527 uuids_start[1] = EIR_UUID32_ALL;
528 ptr += 2;
529 }
530
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + sizeof(u32) > len) {
533 uuids_start[1] = EIR_UUID32_SOME;
534 break;
535 }
536
537 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 ptr += sizeof(u32);
539 uuids_start[0] += sizeof(u32);
540 }
541
542 return ptr;
543}
544
545static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546{
547 u8 *ptr = data, *uuids_start = NULL;
548 struct bt_uuid *uuid;
549
550 if (len < 18)
551 return ptr;
552
553 list_for_each_entry(uuid, &hdev->uuids, list) {
554 if (uuid->size != 128)
555 continue;
556
557 if (!uuids_start) {
558 uuids_start = ptr;
559 uuids_start[0] = 1;
560 uuids_start[1] = EIR_UUID128_ALL;
561 ptr += 2;
562 }
563
564 /* Stop if not enough space to put next UUID */
565 if ((ptr - data) + 16 > len) {
566 uuids_start[1] = EIR_UUID128_SOME;
567 break;
568 }
569
570 memcpy(ptr, uuid->uuid, 16);
571 ptr += 16;
572 uuids_start[0] += 16;
573 }
574
575 return ptr;
576}
577
578static void create_eir(struct hci_dev *hdev, u8 *data)
579{
580 u8 *ptr = data;
581 size_t name_len;
582
583 name_len = strlen(hdev->dev_name);
584
585 if (name_len > 0) {
586 /* EIR Data type */
587 if (name_len > 48) {
588 name_len = 48;
589 ptr[1] = EIR_NAME_SHORT;
590 } else
591 ptr[1] = EIR_NAME_COMPLETE;
592
593 /* EIR Data length */
594 ptr[0] = name_len + 1;
595
596 memcpy(ptr + 2, hdev->dev_name, name_len);
597
598 ptr += (name_len + 2);
599 }
600
601 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 ptr[0] = 2;
603 ptr[1] = EIR_TX_POWER;
604 ptr[2] = (u8) hdev->inq_tx_power;
605
606 ptr += 3;
607 }
608
609 if (hdev->devid_source > 0) {
610 ptr[0] = 9;
611 ptr[1] = EIR_DEVICE_ID;
612
613 put_unaligned_le16(hdev->devid_source, ptr + 2);
614 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 put_unaligned_le16(hdev->devid_product, ptr + 6);
616 put_unaligned_le16(hdev->devid_version, ptr + 8);
617
618 ptr += 10;
619 }
620
621 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624}
625
626void __hci_req_update_eir(struct hci_request *req)
627{
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_write_eir cp;
630
631 if (!hdev_is_powered(hdev))
632 return;
633
634 if (!lmp_ext_inq_capable(hdev))
635 return;
636
637 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 return;
639
640 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 return;
642
643 memset(&cp, 0, sizeof(cp));
644
645 create_eir(hdev, cp.data);
646
647 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 return;
649
650 memcpy(hdev->eir, cp.data, sizeof(cp.data));
651
652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653}
654
Johan Hedberg0857dd32014-12-19 13:40:20 +0200655void hci_req_add_le_scan_disable(struct hci_request *req)
656{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200658
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700659 if (hdev->scanning_paused) {
660 bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 return;
662 }
663
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530664 if (use_ext_scan(hdev)) {
665 struct hci_cp_le_set_ext_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 &cp);
671 } else {
672 struct hci_cp_le_set_scan_enable cp;
673
674 memset(&cp, 0, sizeof(cp));
675 cp.enable = LE_SCAN_DISABLE;
676 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530678
679 if (use_ll_privacy(hdev) &&
680 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
681 __u8 enable = 0x00;
682 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
683 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200684}
685
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700686static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
687 u8 bdaddr_type)
688{
689 struct hci_cp_le_del_from_white_list cp;
690
691 cp.bdaddr_type = bdaddr_type;
692 bacpy(&cp.bdaddr, bdaddr);
693
694 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
695 cp.bdaddr_type);
696 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530697
698 if (use_ll_privacy(req->hdev)) {
699 struct smp_irk *irk;
700
701 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
702 if (irk) {
703 struct hci_cp_le_del_from_resolv_list cp;
704
705 cp.bdaddr_type = bdaddr_type;
706 bacpy(&cp.bdaddr, bdaddr);
707
708 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
709 sizeof(cp), &cp);
710 }
711 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700712}
713
714/* Adds connection to white list if needed. On error, returns -1. */
715static int add_to_white_list(struct hci_request *req,
716 struct hci_conn_params *params, u8 *num_entries,
717 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200718{
719 struct hci_cp_le_add_to_white_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700720 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200721
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700722 /* Already in white list */
723 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
724 params->addr_type))
725 return 0;
726
727 /* Select filter policy to accept all advertising */
728 if (*num_entries >= hdev->le_white_list_size)
729 return -1;
730
731 /* White list can not be used with RPAs */
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530732 if (!allow_rpa && !use_ll_privacy(hdev) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700733 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
734 return -1;
735 }
736
737 /* During suspend, only wakeable devices can be in whitelist */
Abhishek Pandit-Subedia1fc7532020-06-17 16:39:10 +0200738 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
739 params->current_flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700740 return 0;
741
742 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200743 cp.bdaddr_type = params->addr_type;
744 bacpy(&cp.bdaddr, &params->addr);
745
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700746 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
747 cp.bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200748 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700749
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530750 if (use_ll_privacy(hdev)) {
751 struct smp_irk *irk;
752
753 irk = hci_find_irk_by_addr(hdev, &params->addr,
754 params->addr_type);
755 if (irk) {
756 struct hci_cp_le_add_to_resolv_list cp;
757
758 cp.bdaddr_type = params->addr_type;
759 bacpy(&cp.bdaddr, &params->addr);
760 memcpy(cp.peer_irk, irk->val, 16);
761
762 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
763 memcpy(cp.local_irk, hdev->irk, 16);
764 else
765 memset(cp.local_irk, 0, 16);
766
767 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
768 sizeof(cp), &cp);
769 }
770 }
771
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700772 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200773}
774
775static u8 update_white_list(struct hci_request *req)
776{
777 struct hci_dev *hdev = req->hdev;
778 struct hci_conn_params *params;
779 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700780 u8 num_entries = 0;
781 bool pend_conn, pend_report;
782 /* We allow whitelisting even with RPAs in suspend. In the worst case,
783 * we won't be able to wake from devices that use the privacy1.2
784 * features. Additionally, once we support privacy1.2 and IRK
785 * offloading, we can update this to also check for those conditions.
786 */
787 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200788
789 /* Go through the current white list programmed into the
790 * controller one by one and check if that address is still
791 * in the list of pending connections or list of devices to
792 * report. If not present in either list, then queue the
793 * command to remove it from the controller.
794 */
795 list_for_each_entry(b, &hdev->le_white_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700796 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
797 &b->bdaddr,
798 b->bdaddr_type);
799 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
800 &b->bdaddr,
801 b->bdaddr_type);
802
803 /* If the device is not likely to connect or report,
804 * remove it from the whitelist.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500805 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700806 if (!pend_conn && !pend_report) {
807 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200808 continue;
809 }
810
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700811 /* White list can not be used with RPAs */
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530812 if (!allow_rpa && !use_ll_privacy(hdev) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700813 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500814 return 0x00;
815 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200816
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700817 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200818 }
819
820 /* Since all no longer valid white list entries have been
821 * removed, walk through the list of pending connections
822 * and ensure that any new device gets programmed into
823 * the controller.
824 *
825 * If the list of the devices is larger than the list of
826 * available white list entries in the controller, then
827 * just abort and return filer policy value to not use the
828 * white list.
829 */
830 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700831 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200832 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200833 }
834
835 /* After adding all new pending connections, walk through
836 * the list of pending reports and also add these to the
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700837 * white list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200838 */
839 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700840 if (add_to_white_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200841 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200842 }
843
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200844 /* Once the controller offloading of advertisement monitor is in place,
845 * the if condition should include the support of MSFT extension
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700846 * support. If suspend is ongoing, whitelist should be the default to
847 * prevent waking by random advertisements.
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200848 */
Miao-chen Chou51b64c42020-07-07 15:52:28 -0700849 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200850 return 0x00;
851
Johan Hedberg0857dd32014-12-19 13:40:20 +0200852 /* Select filter policy to use white list */
853 return 0x01;
854}
855
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200856static bool scan_use_rpa(struct hci_dev *hdev)
857{
858 return hci_dev_test_flag(hdev, HCI_PRIVACY);
859}
860
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530861static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530862 u16 window, u8 own_addr_type, u8 filter_policy,
863 bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200864{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530865 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530866
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700867 if (hdev->scanning_paused) {
868 bt_dev_dbg(hdev, "Scanning is paused for suspend");
869 return;
870 }
871
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530872 if (use_ll_privacy(hdev) && addr_resolv) {
873 u8 enable = 0x01;
874 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
875 }
876
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530877 /* Use ext scanning if set ext scan param and ext scan enable is
878 * supported
879 */
880 if (use_ext_scan(hdev)) {
881 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
882 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
883 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530884 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
885 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530886
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530887 ext_param_cp = (void *)data;
888 phy_params = (void *)ext_param_cp->data;
889
890 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
891 ext_param_cp->own_addr_type = own_addr_type;
892 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530893
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530894 plen = sizeof(*ext_param_cp);
895
896 if (scan_1m(hdev) || scan_2m(hdev)) {
897 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
898
899 memset(phy_params, 0, sizeof(*phy_params));
900 phy_params->type = type;
901 phy_params->interval = cpu_to_le16(interval);
902 phy_params->window = cpu_to_le16(window);
903
904 plen += sizeof(*phy_params);
905 phy_params++;
906 }
907
908 if (scan_coded(hdev)) {
909 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
910
911 memset(phy_params, 0, sizeof(*phy_params));
912 phy_params->type = type;
913 phy_params->interval = cpu_to_le16(interval);
914 phy_params->window = cpu_to_le16(window);
915
916 plen += sizeof(*phy_params);
917 phy_params++;
918 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530919
920 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530921 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530922
923 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
924 ext_enable_cp.enable = LE_SCAN_ENABLE;
925 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
926
927 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
928 sizeof(ext_enable_cp), &ext_enable_cp);
929 } else {
930 struct hci_cp_le_set_scan_param param_cp;
931 struct hci_cp_le_set_scan_enable enable_cp;
932
933 memset(&param_cp, 0, sizeof(param_cp));
934 param_cp.type = type;
935 param_cp.interval = cpu_to_le16(interval);
936 param_cp.window = cpu_to_le16(window);
937 param_cp.own_address_type = own_addr_type;
938 param_cp.filter_policy = filter_policy;
939 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
940 &param_cp);
941
942 memset(&enable_cp, 0, sizeof(enable_cp));
943 enable_cp.enable = LE_SCAN_ENABLE;
944 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
945 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
946 &enable_cp);
947 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530948}
949
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530950/* Ensure to call hci_req_add_le_scan_disable() first to disable the
951 * controller based address resolution to be able to reconfigure
952 * resolving list.
953 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530954void hci_req_add_le_passive_scan(struct hci_request *req)
955{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200956 struct hci_dev *hdev = req->hdev;
957 u8 own_addr_type;
958 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700959 u16 window, interval;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530960 /* Background scanning should run with address resolution */
961 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700962
963 if (hdev->scanning_paused) {
964 bt_dev_dbg(hdev, "Scanning is paused for suspend");
965 return;
966 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200967
968 /* Set require_privacy to false since no SCAN_REQ are send
969 * during passive scanning. Not using an non-resolvable address
970 * here is important so that peer devices using direct
971 * advertising with our address will be correctly reported
972 * by the controller.
973 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200974 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
975 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200976 return;
977
978 /* Adding or removing entries from the white list must
979 * happen before enabling scanning. The controller does
980 * not allow white list modification while scanning.
981 */
982 filter_policy = update_white_list(req);
983
984 /* When the controller is using random resolvable addresses and
985 * with that having LE privacy enabled, then controllers with
986 * Extended Scanner Filter Policies support can now enable support
987 * for handling directed advertising.
988 *
989 * So instead of using filter polices 0x00 (no whitelist)
990 * and 0x01 (whitelist enabled) use the new filter policies
991 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
992 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700993 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200994 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
995 filter_policy |= 0x02;
996
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700997 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000998 window = hdev->le_scan_window_suspend;
999 interval = hdev->le_scan_int_suspend;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001000 } else {
1001 window = hdev->le_scan_window;
1002 interval = hdev->le_scan_interval;
1003 }
1004
1005 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1006 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05301007 own_addr_type, filter_policy, addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001008}
1009
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301010static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1011{
1012 struct adv_info *adv_instance;
1013
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001014 /* Instance 0x00 always set local name */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301015 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001016 return 1;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301017
1018 adv_instance = hci_find_adv_instance(hdev, instance);
1019 if (!adv_instance)
1020 return 0;
1021
1022 /* TODO: Take into account the "appearance" and "local-name" flags here.
1023 * These are currently being ignored as they are not supported.
1024 */
1025 return adv_instance->scan_rsp_len;
1026}
1027
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001028static void hci_req_clear_event_filter(struct hci_request *req)
1029{
1030 struct hci_cp_set_event_filter f;
1031
1032 memset(&f, 0, sizeof(f));
1033 f.flt_type = HCI_FLT_CLEAR_ALL;
1034 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1035
1036 /* Update page scan state (since we may have modified it when setting
1037 * the event filter).
1038 */
1039 __hci_req_update_scan(req);
1040}
1041
1042static void hci_req_set_event_filter(struct hci_request *req)
1043{
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001044 struct bdaddr_list_with_flags *b;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001045 struct hci_cp_set_event_filter f;
1046 struct hci_dev *hdev = req->hdev;
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001047 u8 scan = SCAN_DISABLED;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001048
1049 /* Always clear event filter when starting */
1050 hci_req_clear_event_filter(req);
1051
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001052 list_for_each_entry(b, &hdev->whitelist, list) {
1053 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1054 b->current_flags))
1055 continue;
1056
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001057 memset(&f, 0, sizeof(f));
1058 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1059 f.flt_type = HCI_FLT_CONN_SETUP;
1060 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1061 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1062
1063 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1064 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
Abhishek Pandit-Subedi7a929062020-06-17 16:39:09 +02001065 scan = SCAN_PAGE;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001066 }
1067
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001068 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1069}
1070
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001071static void hci_req_config_le_suspend_scan(struct hci_request *req)
1072{
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001073 /* Before changing params disable scan if enabled */
1074 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1075 hci_req_add_le_scan_disable(req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001076
1077 /* Configure params and enable scanning */
1078 hci_req_add_le_passive_scan(req);
1079
1080 /* Block suspend notifier on response */
1081 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1082}
1083
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001084static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1085{
1086 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1087 status);
1088 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1089 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1090 wake_up(&hdev->suspend_wait_q);
1091 }
1092}
1093
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001094/* Call with hci_dev_lock */
1095void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1096{
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001097 int old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001098 struct hci_conn *conn;
1099 struct hci_request req;
1100 u8 page_scan;
1101 int disconnect_counter;
1102
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001103 if (next == hdev->suspend_state) {
1104 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1105 goto done;
1106 }
1107
1108 hdev->suspend_state = next;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001109 hci_req_init(&req, hdev);
1110
1111 if (next == BT_SUSPEND_DISCONNECT) {
1112 /* Mark device as suspended */
1113 hdev->suspended = true;
1114
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001115 /* Pause discovery if not already stopped */
1116 old_state = hdev->discovery.state;
1117 if (old_state != DISCOVERY_STOPPED) {
1118 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1119 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1120 queue_work(hdev->req_workqueue, &hdev->discov_update);
1121 }
1122
1123 hdev->discovery_paused = true;
1124 hdev->discovery_old_state = old_state;
1125
1126 /* Stop advertising */
1127 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1128 if (old_state) {
1129 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1130 cancel_delayed_work(&hdev->discov_off);
1131 queue_delayed_work(hdev->req_workqueue,
1132 &hdev->discov_off, 0);
1133 }
1134
1135 hdev->advertising_paused = true;
1136 hdev->advertising_old_state = old_state;
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001137 /* Disable page scan */
1138 page_scan = SCAN_DISABLED;
1139 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1140
Manish Mandlik6fb00d42020-06-01 18:42:51 -07001141 /* Disable LE passive scan if enabled */
1142 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1143 hci_req_add_le_scan_disable(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001144
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001145 /* Mark task needing completion */
1146 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1147
1148 /* Prevent disconnects from causing scanning to be re-enabled */
1149 hdev->scanning_paused = true;
1150
1151 /* Run commands before disconnecting */
1152 hci_req_run(&req, suspend_req_complete);
1153
1154 disconnect_counter = 0;
1155 /* Soft disconnect everything (power off) */
1156 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1157 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1158 disconnect_counter++;
1159 }
1160
1161 if (disconnect_counter > 0) {
1162 bt_dev_dbg(hdev,
1163 "Had %d disconnects. Will wait on them",
1164 disconnect_counter);
1165 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1166 }
Abhishek Pandit-Subedi0d2c9822020-05-12 19:19:25 -07001167 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001168 /* Unpause to take care of updating scanning params */
1169 hdev->scanning_paused = false;
1170 /* Enable event filter for paired devices */
1171 hci_req_set_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001172 /* Enable passive scan at lower duty cycle */
1173 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001174 /* Pause scan changes again. */
1175 hdev->scanning_paused = true;
1176 hci_req_run(&req, suspend_req_complete);
1177 } else {
1178 hdev->suspended = false;
1179 hdev->scanning_paused = false;
1180
1181 hci_req_clear_event_filter(&req);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -07001182 /* Reset passive/background scanning to normal */
1183 hci_req_config_le_suspend_scan(&req);
Abhishek Pandit-Subedi4867bd02020-03-11 08:54:03 -07001184
1185 /* Unpause advertising */
1186 hdev->advertising_paused = false;
1187 if (hdev->advertising_old_state) {
1188 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1189 hdev->suspend_tasks);
1190 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1191 queue_work(hdev->req_workqueue,
1192 &hdev->discoverable_update);
1193 hdev->advertising_old_state = 0;
1194 }
1195
1196 /* Unpause discovery */
1197 hdev->discovery_paused = false;
1198 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1199 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1200 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1201 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1202 queue_work(hdev->req_workqueue, &hdev->discov_update);
1203 }
1204
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001205 hci_req_run(&req, suspend_req_complete);
1206 }
1207
1208 hdev->suspend_state = next;
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -07001209
1210done:
1211 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1212 wake_up(&hdev->suspend_wait_q);
1213}
1214
Johan Hedbergf2252572015-11-18 12:49:20 +02001215static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1216{
Johan Hedbergcab054a2015-11-30 11:21:45 +02001217 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001218 struct adv_info *adv_instance;
1219
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001220 /* Instance 0x00 always set local name */
Johan Hedbergf2252572015-11-18 12:49:20 +02001221 if (instance == 0x00)
Luiz Augusto von Dentz492ad782019-10-24 16:15:43 +03001222 return 1;
Johan Hedbergf2252572015-11-18 12:49:20 +02001223
1224 adv_instance = hci_find_adv_instance(hdev, instance);
1225 if (!adv_instance)
1226 return 0;
1227
1228 /* TODO: Take into account the "appearance" and "local-name" flags here.
1229 * These are currently being ignored as they are not supported.
1230 */
1231 return adv_instance->scan_rsp_len;
1232}
1233
1234void __hci_req_disable_advertising(struct hci_request *req)
1235{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301236 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -07001237 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001238
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301239 } else {
1240 u8 enable = 0x00;
1241
1242 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1243 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001244}
1245
1246static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1247{
1248 u32 flags;
1249 struct adv_info *adv_instance;
1250
1251 if (instance == 0x00) {
1252 /* Instance 0 always manages the "Tx Power" and "Flags"
1253 * fields
1254 */
1255 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1256
1257 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1258 * corresponds to the "connectable" instance flag.
1259 */
1260 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1261 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1262
Johan Hedberg6a19cc82016-03-11 09:56:32 +02001263 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1264 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1265 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +02001266 flags |= MGMT_ADV_FLAG_DISCOV;
1267
Johan Hedbergf2252572015-11-18 12:49:20 +02001268 return flags;
1269 }
1270
1271 adv_instance = hci_find_adv_instance(hdev, instance);
1272
1273 /* Return 0 when we got an invalid instance identifier. */
1274 if (!adv_instance)
1275 return 0;
1276
1277 return adv_instance->flags;
1278}
1279
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001280static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1281{
1282 /* If privacy is not enabled don't use RPA */
1283 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1284 return false;
1285
1286 /* If basic privacy mode is enabled use RPA */
1287 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1288 return true;
1289
1290 /* If limited privacy mode is enabled don't use RPA if we're
1291 * both discoverable and bondable.
1292 */
1293 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1294 hci_dev_test_flag(hdev, HCI_BONDABLE))
1295 return false;
1296
1297 /* We're neither bondable nor discoverable in the limited
1298 * privacy mode, therefore use RPA.
1299 */
1300 return true;
1301}
1302
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001303static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1304{
1305 /* If there is no connection we are OK to advertise. */
1306 if (hci_conn_num(hdev, LE_LINK) == 0)
1307 return true;
1308
1309 /* Check le_states if there is any connection in slave role. */
1310 if (hdev->conn_hash.le_num_slave > 0) {
1311 /* Slave connection state and non connectable mode bit 20. */
1312 if (!connectable && !(hdev->le_states[2] & 0x10))
1313 return false;
1314
1315 /* Slave connection state and connectable mode bit 38
1316 * and scannable bit 21.
1317 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001318 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1319 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001320 return false;
1321 }
1322
1323 /* Check le_states if there is any connection in master role. */
1324 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1325 /* Master connection state and non connectable mode bit 18. */
1326 if (!connectable && !(hdev->le_states[2] & 0x02))
1327 return false;
1328
1329 /* Master connection state and connectable mode bit 35 and
1330 * scannable 19.
1331 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001332 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001333 !(hdev->le_states[2] & 0x08)))
1334 return false;
1335 }
1336
1337 return true;
1338}
1339
Johan Hedbergf2252572015-11-18 12:49:20 +02001340void __hci_req_enable_advertising(struct hci_request *req)
1341{
1342 struct hci_dev *hdev = req->hdev;
1343 struct hci_cp_le_set_adv_param cp;
1344 u8 own_addr_type, enable = 0x01;
1345 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301346 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +02001347 u32 flags;
1348
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001349 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1350
1351 /* If the "connectable" instance flag was not set, then choose between
1352 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1353 */
1354 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1355 mgmt_get_connectable(hdev);
1356
1357 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001358 return;
1359
1360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1361 __hci_req_disable_advertising(req);
1362
1363 /* Clear the HCI_LE_ADV bit temporarily so that the
1364 * hci_update_random_address knows that it's safe to go ahead
1365 * and write a new random address. The flag will be set back on
1366 * as soon as the SET_ADV_ENABLE HCI command completes.
1367 */
1368 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1369
Johan Hedbergf2252572015-11-18 12:49:20 +02001370 /* Set require_privacy to true only when non-connectable
1371 * advertising is used. In that case it is fine to use a
1372 * non-resolvable private address.
1373 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001374 if (hci_update_random_address(req, !connectable,
1375 adv_use_rpa(hdev, flags),
1376 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001377 return;
1378
1379 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001380
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301381 if (connectable) {
Johan Hedbergf2252572015-11-18 12:49:20 +02001382 cp.type = LE_ADV_IND;
Johan Hedbergf2252572015-11-18 12:49:20 +02001383
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301384 adv_min_interval = hdev->le_adv_min_interval;
1385 adv_max_interval = hdev->le_adv_max_interval;
1386 } else {
1387 if (get_cur_adv_instance_scan_rsp_len(hdev))
1388 cp.type = LE_ADV_SCAN_IND;
1389 else
1390 cp.type = LE_ADV_NONCONN_IND;
1391
1392 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1393 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1394 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1395 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1396 } else {
1397 adv_min_interval = hdev->le_adv_min_interval;
1398 adv_max_interval = hdev->le_adv_max_interval;
1399 }
1400 }
1401
1402 cp.min_interval = cpu_to_le16(adv_min_interval);
1403 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001404 cp.own_address_type = own_addr_type;
1405 cp.channel_map = hdev->le_adv_channel_map;
1406
1407 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1408
1409 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1410}
1411
Michał Narajowskif61851f2016-10-19 10:20:27 +02001412u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001413{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001414 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001415 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001416
Michał Narajowskif61851f2016-10-19 10:20:27 +02001417 /* no space left for name (+ NULL + type + len) */
1418 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1419 return ad_len;
1420
1421 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001422 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001423 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001424 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001425 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001426
Michał Narajowskif61851f2016-10-19 10:20:27 +02001427 /* use short name if present */
1428 short_len = strlen(hdev->short_name);
1429 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001430 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001431 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001432
Michał Narajowskif61851f2016-10-19 10:20:27 +02001433 /* use shortened full name if present, we already know that name
1434 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1435 */
1436 if (complete_len) {
1437 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1438
1439 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1440 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1441
1442 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1443 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001444 }
1445
1446 return ad_len;
1447}
1448
Michał Narajowski1b422062016-10-05 12:28:27 +02001449static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1450{
1451 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1452}
1453
Michał Narajowski7c295c42016-09-18 12:50:02 +02001454static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1455{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001456 u8 scan_rsp_len = 0;
1457
1458 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001459 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001460 }
1461
Michał Narajowski1b422062016-10-05 12:28:27 +02001462 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001463}
1464
Johan Hedbergf2252572015-11-18 12:49:20 +02001465static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1466 u8 *ptr)
1467{
1468 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001469 u32 instance_flags;
1470 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001471
1472 adv_instance = hci_find_adv_instance(hdev, instance);
1473 if (!adv_instance)
1474 return 0;
1475
Michał Narajowski7c295c42016-09-18 12:50:02 +02001476 instance_flags = adv_instance->flags;
1477
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001478 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001479 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001480 }
1481
Michał Narajowski1b422062016-10-05 12:28:27 +02001482 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001483 adv_instance->scan_rsp_len);
1484
Michał Narajowski7c295c42016-09-18 12:50:02 +02001485 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001486
1487 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1488 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1489
1490 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001491}
1492
Johan Hedbergcab054a2015-11-30 11:21:45 +02001493void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001494{
1495 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001496 u8 len;
1497
1498 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1499 return;
1500
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301501 if (ext_adv_capable(hdev)) {
1502 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001503
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301504 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001505
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301506 if (instance)
1507 len = create_instance_scan_rsp_data(hdev, instance,
1508 cp.data);
1509 else
1510 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001511
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301512 if (hdev->scan_rsp_data_len == len &&
1513 !memcmp(cp.data, hdev->scan_rsp_data, len))
1514 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001515
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301516 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1517 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001518
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001519 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301520 cp.length = len;
1521 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1522 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1523
1524 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1525 &cp);
1526 } else {
1527 struct hci_cp_le_set_scan_rsp_data cp;
1528
1529 memset(&cp, 0, sizeof(cp));
1530
1531 if (instance)
1532 len = create_instance_scan_rsp_data(hdev, instance,
1533 cp.data);
1534 else
1535 len = create_default_scan_rsp_data(hdev, cp.data);
1536
1537 if (hdev->scan_rsp_data_len == len &&
1538 !memcmp(cp.data, hdev->scan_rsp_data, len))
1539 return;
1540
1541 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1542 hdev->scan_rsp_data_len = len;
1543
1544 cp.length = len;
1545
1546 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1547 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001548}
1549
Johan Hedbergf2252572015-11-18 12:49:20 +02001550static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1551{
1552 struct adv_info *adv_instance = NULL;
1553 u8 ad_len = 0, flags = 0;
1554 u32 instance_flags;
1555
1556 /* Return 0 when the current instance identifier is invalid. */
1557 if (instance) {
1558 adv_instance = hci_find_adv_instance(hdev, instance);
1559 if (!adv_instance)
1560 return 0;
1561 }
1562
1563 instance_flags = get_adv_instance_flags(hdev, instance);
1564
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001565 /* If instance already has the flags set skip adding it once
1566 * again.
1567 */
1568 if (adv_instance && eir_get_data(adv_instance->adv_data,
1569 adv_instance->adv_data_len, EIR_FLAGS,
1570 NULL))
1571 goto skip_flags;
1572
Johan Hedbergf2252572015-11-18 12:49:20 +02001573 /* The Add Advertising command allows userspace to set both the general
1574 * and limited discoverable flags.
1575 */
1576 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1577 flags |= LE_AD_GENERAL;
1578
1579 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1580 flags |= LE_AD_LIMITED;
1581
Johan Hedbergf18ba582016-04-06 13:09:05 +03001582 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1583 flags |= LE_AD_NO_BREDR;
1584
Johan Hedbergf2252572015-11-18 12:49:20 +02001585 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1586 /* If a discovery flag wasn't provided, simply use the global
1587 * settings.
1588 */
1589 if (!flags)
1590 flags |= mgmt_get_adv_discov_flags(hdev);
1591
Johan Hedbergf2252572015-11-18 12:49:20 +02001592 /* If flags would still be empty, then there is no need to
1593 * include the "Flags" AD field".
1594 */
1595 if (flags) {
1596 ptr[0] = 0x02;
1597 ptr[1] = EIR_FLAGS;
1598 ptr[2] = flags;
1599
1600 ad_len += 3;
1601 ptr += 3;
1602 }
1603 }
1604
Luiz Augusto von Dentz6012b932019-11-03 23:58:15 +02001605skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001606 if (adv_instance) {
1607 memcpy(ptr, adv_instance->adv_data,
1608 adv_instance->adv_data_len);
1609 ad_len += adv_instance->adv_data_len;
1610 ptr += adv_instance->adv_data_len;
1611 }
1612
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301613 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1614 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001615
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301616 if (ext_adv_capable(hdev)) {
1617 if (adv_instance)
1618 adv_tx_power = adv_instance->tx_power;
1619 else
1620 adv_tx_power = hdev->adv_tx_power;
1621 } else {
1622 adv_tx_power = hdev->adv_tx_power;
1623 }
1624
1625 /* Provide Tx Power only if we can provide a valid value for it */
1626 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1627 ptr[0] = 0x02;
1628 ptr[1] = EIR_TX_POWER;
1629 ptr[2] = (u8)adv_tx_power;
1630
1631 ad_len += 3;
1632 ptr += 3;
1633 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001634 }
1635
1636 return ad_len;
1637}
1638
Johan Hedbergcab054a2015-11-30 11:21:45 +02001639void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001640{
1641 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001642 u8 len;
1643
1644 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1645 return;
1646
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301647 if (ext_adv_capable(hdev)) {
1648 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001649
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301650 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001651
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301652 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001653
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301654 /* There's nothing to do if the data hasn't changed */
1655 if (hdev->adv_data_len == len &&
1656 memcmp(cp.data, hdev->adv_data, len) == 0)
1657 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001658
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301659 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1660 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001661
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301662 cp.length = len;
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001663 cp.handle = instance;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301664 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1665 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1666
1667 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1668 } else {
1669 struct hci_cp_le_set_adv_data cp;
1670
1671 memset(&cp, 0, sizeof(cp));
1672
1673 len = create_instance_adv_data(hdev, instance, cp.data);
1674
1675 /* There's nothing to do if the data hasn't changed */
1676 if (hdev->adv_data_len == len &&
1677 memcmp(cp.data, hdev->adv_data, len) == 0)
1678 return;
1679
1680 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1681 hdev->adv_data_len = len;
1682
1683 cp.length = len;
1684
1685 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1686 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001687}
1688
Johan Hedbergcab054a2015-11-30 11:21:45 +02001689int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001690{
1691 struct hci_request req;
1692
1693 hci_req_init(&req, hdev);
1694 __hci_req_update_adv_data(&req, instance);
1695
1696 return hci_req_run(&req, NULL);
1697}
1698
1699static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1700{
1701 BT_DBG("%s status %u", hdev->name, status);
1702}
1703
1704void hci_req_reenable_advertising(struct hci_dev *hdev)
1705{
1706 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001707
1708 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001709 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001710 return;
1711
Johan Hedbergf2252572015-11-18 12:49:20 +02001712 hci_req_init(&req, hdev);
1713
Johan Hedbergcab054a2015-11-30 11:21:45 +02001714 if (hdev->cur_adv_instance) {
1715 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1716 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001717 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301718 if (ext_adv_capable(hdev)) {
1719 __hci_req_start_ext_adv(&req, 0x00);
1720 } else {
1721 __hci_req_update_adv_data(&req, 0x00);
1722 __hci_req_update_scan_rsp_data(&req, 0x00);
1723 __hci_req_enable_advertising(&req);
1724 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001725 }
1726
1727 hci_req_run(&req, adv_enable_complete);
1728}
1729
1730static void adv_timeout_expire(struct work_struct *work)
1731{
1732 struct hci_dev *hdev = container_of(work, struct hci_dev,
1733 adv_instance_expire.work);
1734
1735 struct hci_request req;
1736 u8 instance;
1737
1738 BT_DBG("%s", hdev->name);
1739
1740 hci_dev_lock(hdev);
1741
1742 hdev->adv_instance_timeout = 0;
1743
Johan Hedbergcab054a2015-11-30 11:21:45 +02001744 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001745 if (instance == 0x00)
1746 goto unlock;
1747
1748 hci_req_init(&req, hdev);
1749
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001750 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001751
1752 if (list_empty(&hdev->adv_instances))
1753 __hci_req_disable_advertising(&req);
1754
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001755 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001756
1757unlock:
1758 hci_dev_unlock(hdev);
1759}
1760
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301761int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1762 bool use_rpa, struct adv_info *adv_instance,
1763 u8 *own_addr_type, bdaddr_t *rand_addr)
1764{
1765 int err;
1766
1767 bacpy(rand_addr, BDADDR_ANY);
1768
1769 /* If privacy is enabled use a resolvable private address. If
1770 * current RPA has expired then generate a new one.
1771 */
1772 if (use_rpa) {
1773 int to;
1774
1775 *own_addr_type = ADDR_LE_DEV_RANDOM;
1776
1777 if (adv_instance) {
1778 if (!adv_instance->rpa_expired &&
1779 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1780 return 0;
1781
1782 adv_instance->rpa_expired = false;
1783 } else {
1784 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1785 !bacmp(&hdev->random_addr, &hdev->rpa))
1786 return 0;
1787 }
1788
1789 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1790 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001791 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301792 return err;
1793 }
1794
1795 bacpy(rand_addr, &hdev->rpa);
1796
1797 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1798 if (adv_instance)
1799 queue_delayed_work(hdev->workqueue,
1800 &adv_instance->rpa_expired_cb, to);
1801 else
1802 queue_delayed_work(hdev->workqueue,
1803 &hdev->rpa_expired, to);
1804
1805 return 0;
1806 }
1807
1808 /* In case of required privacy without resolvable private address,
1809 * use an non-resolvable private address. This is useful for
1810 * non-connectable advertising.
1811 */
1812 if (require_privacy) {
1813 bdaddr_t nrpa;
1814
1815 while (true) {
1816 /* The non-resolvable private address is generated
1817 * from random six bytes with the two most significant
1818 * bits cleared.
1819 */
1820 get_random_bytes(&nrpa, 6);
1821 nrpa.b[5] &= 0x3f;
1822
1823 /* The non-resolvable private address shall not be
1824 * equal to the public address.
1825 */
1826 if (bacmp(&hdev->bdaddr, &nrpa))
1827 break;
1828 }
1829
1830 *own_addr_type = ADDR_LE_DEV_RANDOM;
1831 bacpy(rand_addr, &nrpa);
1832
1833 return 0;
1834 }
1835
1836 /* No privacy so use a public address. */
1837 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1838
1839 return 0;
1840}
1841
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301842void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1843{
1844 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1845}
1846
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301847int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301848{
1849 struct hci_cp_le_set_ext_adv_params cp;
1850 struct hci_dev *hdev = req->hdev;
1851 bool connectable;
1852 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301853 bdaddr_t random_addr;
1854 u8 own_addr_type;
1855 int err;
1856 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301857 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301858
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301859 if (instance > 0) {
1860 adv_instance = hci_find_adv_instance(hdev, instance);
1861 if (!adv_instance)
1862 return -EINVAL;
1863 } else {
1864 adv_instance = NULL;
1865 }
1866
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301867 flags = get_adv_instance_flags(hdev, instance);
1868
1869 /* If the "connectable" instance flag was not set, then choose between
1870 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1871 */
1872 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1873 mgmt_get_connectable(hdev);
1874
Colin Ian King75edd1f2018-11-09 13:27:36 +00001875 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301876 return -EPERM;
1877
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301878 /* Set require_privacy to true only when non-connectable
1879 * advertising is used. In that case it is fine to use a
1880 * non-resolvable private address.
1881 */
1882 err = hci_get_random_address(hdev, !connectable,
1883 adv_use_rpa(hdev, flags), adv_instance,
1884 &own_addr_type, &random_addr);
1885 if (err < 0)
1886 return err;
1887
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301888 memset(&cp, 0, sizeof(cp));
1889
Alain Michaud5cbd3eb2020-06-22 13:30:28 +00001890 /* In ext adv set param interval is 3 octets */
1891 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1892 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301893
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301894 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1895
1896 if (connectable) {
1897 if (secondary_adv)
1898 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1899 else
1900 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1901 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1902 if (secondary_adv)
1903 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1904 else
1905 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1906 } else {
1907 if (secondary_adv)
1908 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1909 else
1910 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1911 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301912
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301913 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301914 cp.channel_map = hdev->le_adv_channel_map;
1915 cp.tx_power = 127;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001916 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301917
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301918 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1919 cp.primary_phy = HCI_ADV_PHY_1M;
1920 cp.secondary_phy = HCI_ADV_PHY_2M;
1921 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1922 cp.primary_phy = HCI_ADV_PHY_CODED;
1923 cp.secondary_phy = HCI_ADV_PHY_CODED;
1924 } else {
1925 /* In all other cases use 1M */
1926 cp.primary_phy = HCI_ADV_PHY_1M;
1927 cp.secondary_phy = HCI_ADV_PHY_1M;
1928 }
1929
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301930 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1931
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301932 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1933 bacmp(&random_addr, BDADDR_ANY)) {
1934 struct hci_cp_le_set_adv_set_rand_addr cp;
1935
1936 /* Check if random address need to be updated */
1937 if (adv_instance) {
1938 if (!bacmp(&random_addr, &adv_instance->random_addr))
1939 return 0;
1940 } else {
1941 if (!bacmp(&random_addr, &hdev->random_addr))
1942 return 0;
1943 }
1944
1945 memset(&cp, 0, sizeof(cp));
1946
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001947 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301948 bacpy(&cp.bdaddr, &random_addr);
1949
1950 hci_req_add(req,
1951 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1952 sizeof(cp), &cp);
1953 }
1954
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301955 return 0;
1956}
1957
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001958int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301959{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001960 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301961 struct hci_cp_le_set_ext_adv_enable *cp;
1962 struct hci_cp_ext_adv_set *adv_set;
1963 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001964 struct adv_info *adv_instance;
1965
1966 if (instance > 0) {
1967 adv_instance = hci_find_adv_instance(hdev, instance);
1968 if (!adv_instance)
1969 return -EINVAL;
1970 } else {
1971 adv_instance = NULL;
1972 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301973
1974 cp = (void *) data;
1975 adv_set = (void *) cp->data;
1976
1977 memset(cp, 0, sizeof(*cp));
1978
1979 cp->enable = 0x01;
1980 cp->num_of_sets = 0x01;
1981
1982 memset(adv_set, 0, sizeof(*adv_set));
1983
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001984 adv_set->handle = instance;
1985
1986 /* Set duration per instance since controller is responsible for
1987 * scheduling it.
1988 */
1989 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001990 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001991
1992 /* Time = N * 10 ms */
1993 adv_set->duration = cpu_to_le16(duration / 10);
1994 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301995
1996 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1997 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1998 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001999
2000 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302001}
2002
Daniel Winkler37adf702020-07-14 14:16:00 -07002003int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2004{
2005 struct hci_dev *hdev = req->hdev;
2006 struct hci_cp_le_set_ext_adv_enable *cp;
2007 struct hci_cp_ext_adv_set *adv_set;
2008 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2009 u8 req_size;
2010
2011 /* If request specifies an instance that doesn't exist, fail */
2012 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2013 return -EINVAL;
2014
2015 memset(data, 0, sizeof(data));
2016
2017 cp = (void *)data;
2018 adv_set = (void *)cp->data;
2019
2020 /* Instance 0x00 indicates all advertising instances will be disabled */
2021 cp->num_of_sets = !!instance;
2022 cp->enable = 0x00;
2023
2024 adv_set->handle = instance;
2025
2026 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2027 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2028
2029 return 0;
2030}
2031
2032int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2033{
2034 struct hci_dev *hdev = req->hdev;
2035
2036 /* If request specifies an instance that doesn't exist, fail */
2037 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2038 return -EINVAL;
2039
2040 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2041
2042 return 0;
2043}
2044
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302045int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2046{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302047 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07002048 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302049 int err;
2050
Daniel Winkler37adf702020-07-14 14:16:00 -07002051 /* If instance isn't pending, the chip knows about it, and it's safe to
2052 * disable
2053 */
2054 if (adv_instance && !adv_instance->pending)
2055 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05302056
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302057 err = __hci_req_setup_ext_adv_instance(req, instance);
2058 if (err < 0)
2059 return err;
2060
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302061 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002062 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302063
2064 return 0;
2065}
2066
Johan Hedbergf2252572015-11-18 12:49:20 +02002067int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2068 bool force)
2069{
2070 struct hci_dev *hdev = req->hdev;
2071 struct adv_info *adv_instance = NULL;
2072 u16 timeout;
2073
2074 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002075 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02002076 return -EPERM;
2077
2078 if (hdev->adv_instance_timeout)
2079 return -EBUSY;
2080
2081 adv_instance = hci_find_adv_instance(hdev, instance);
2082 if (!adv_instance)
2083 return -ENOENT;
2084
2085 /* A zero timeout means unlimited advertising. As long as there is
2086 * only one instance, duration should be ignored. We still set a timeout
2087 * in case further instances are being added later on.
2088 *
2089 * If the remaining lifetime of the instance is more than the duration
2090 * then the timeout corresponds to the duration, otherwise it will be
2091 * reduced to the remaining instance lifetime.
2092 */
2093 if (adv_instance->timeout == 0 ||
2094 adv_instance->duration <= adv_instance->remaining_time)
2095 timeout = adv_instance->duration;
2096 else
2097 timeout = adv_instance->remaining_time;
2098
2099 /* The remaining time is being reduced unless the instance is being
2100 * advertised without time limit.
2101 */
2102 if (adv_instance->timeout)
2103 adv_instance->remaining_time =
2104 adv_instance->remaining_time - timeout;
2105
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002106 /* Only use work for scheduling instances with legacy advertising */
2107 if (!ext_adv_capable(hdev)) {
2108 hdev->adv_instance_timeout = timeout;
2109 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02002110 &hdev->adv_instance_expire,
2111 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002112 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002113
2114 /* If we're just re-scheduling the same instance again then do not
2115 * execute any HCI commands. This happens when a single instance is
2116 * being advertised.
2117 */
2118 if (!force && hdev->cur_adv_instance == instance &&
2119 hci_dev_test_flag(hdev, HCI_LE_ADV))
2120 return 0;
2121
2122 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302123 if (ext_adv_capable(hdev)) {
2124 __hci_req_start_ext_adv(req, instance);
2125 } else {
2126 __hci_req_update_adv_data(req, instance);
2127 __hci_req_update_scan_rsp_data(req, instance);
2128 __hci_req_enable_advertising(req);
2129 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002130
2131 return 0;
2132}
2133
2134static void cancel_adv_timeout(struct hci_dev *hdev)
2135{
2136 if (hdev->adv_instance_timeout) {
2137 hdev->adv_instance_timeout = 0;
2138 cancel_delayed_work(&hdev->adv_instance_expire);
2139 }
2140}
2141
2142/* For a single instance:
2143 * - force == true: The instance will be removed even when its remaining
2144 * lifetime is not zero.
2145 * - force == false: the instance will be deactivated but kept stored unless
2146 * the remaining lifetime is zero.
2147 *
2148 * For instance == 0x00:
2149 * - force == true: All instances will be removed regardless of their timeout
2150 * setting.
2151 * - force == false: Only instances that have a timeout will be removed.
2152 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002153void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2154 struct hci_request *req, u8 instance,
2155 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02002156{
2157 struct adv_info *adv_instance, *n, *next_instance = NULL;
2158 int err;
2159 u8 rem_inst;
2160
2161 /* Cancel any timeout concerning the removed instance(s). */
2162 if (!instance || hdev->cur_adv_instance == instance)
2163 cancel_adv_timeout(hdev);
2164
2165 /* Get the next instance to advertise BEFORE we remove
2166 * the current one. This can be the same instance again
2167 * if there is only one instance.
2168 */
2169 if (instance && hdev->cur_adv_instance == instance)
2170 next_instance = hci_get_next_instance(hdev, instance);
2171
2172 if (instance == 0x00) {
2173 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2174 list) {
2175 if (!(force || adv_instance->timeout))
2176 continue;
2177
2178 rem_inst = adv_instance->instance;
2179 err = hci_remove_adv_instance(hdev, rem_inst);
2180 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002181 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02002182 }
Johan Hedbergf2252572015-11-18 12:49:20 +02002183 } else {
2184 adv_instance = hci_find_adv_instance(hdev, instance);
2185
2186 if (force || (adv_instance && adv_instance->timeout &&
2187 !adv_instance->remaining_time)) {
2188 /* Don't advertise a removed instance. */
2189 if (next_instance &&
2190 next_instance->instance == instance)
2191 next_instance = NULL;
2192
2193 err = hci_remove_adv_instance(hdev, instance);
2194 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03002195 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02002196 }
2197 }
2198
Johan Hedbergf2252572015-11-18 12:49:20 +02002199 if (!req || !hdev_is_powered(hdev) ||
2200 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2201 return;
2202
Daniel Winkler37adf702020-07-14 14:16:00 -07002203 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02002204 __hci_req_schedule_adv_instance(req, next_instance->instance,
2205 false);
2206}
2207
Johan Hedberg0857dd32014-12-19 13:40:20 +02002208static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2209{
2210 struct hci_dev *hdev = req->hdev;
2211
2212 /* If we're advertising or initiating an LE connection we can't
2213 * go ahead and change the random address at this time. This is
2214 * because the eventual initiator address used for the
2215 * subsequently created connection will be undefined (some
2216 * controllers use the new address and others the one we had
2217 * when the operation started).
2218 *
2219 * In this kind of scenario skip the update and let the random
2220 * address be updated at the next cycle.
2221 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002222 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02002223 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002224 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002225 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02002226 return;
2227 }
2228
2229 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2230}
2231
2232int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002233 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02002234{
2235 struct hci_dev *hdev = req->hdev;
2236 int err;
2237
2238 /* If privacy is enabled use a resolvable private address. If
2239 * current RPA has expired or there is something else than
2240 * the current RPA in use, then generate a new one.
2241 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002242 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002243 int to;
2244
2245 *own_addr_type = ADDR_LE_DEV_RANDOM;
2246
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002247 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02002248 !bacmp(&hdev->random_addr, &hdev->rpa))
2249 return 0;
2250
2251 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2252 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002253 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02002254 return err;
2255 }
2256
2257 set_random_addr(req, &hdev->rpa);
2258
2259 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2260 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2261
2262 return 0;
2263 }
2264
2265 /* In case of required privacy without resolvable private address,
2266 * use an non-resolvable private address. This is useful for active
2267 * scanning and non-connectable advertising.
2268 */
2269 if (require_privacy) {
2270 bdaddr_t nrpa;
2271
2272 while (true) {
2273 /* The non-resolvable private address is generated
2274 * from random six bytes with the two most significant
2275 * bits cleared.
2276 */
2277 get_random_bytes(&nrpa, 6);
2278 nrpa.b[5] &= 0x3f;
2279
2280 /* The non-resolvable private address shall not be
2281 * equal to the public address.
2282 */
2283 if (bacmp(&hdev->bdaddr, &nrpa))
2284 break;
2285 }
2286
2287 *own_addr_type = ADDR_LE_DEV_RANDOM;
2288 set_random_addr(req, &nrpa);
2289 return 0;
2290 }
2291
2292 /* If forcing static address is in use or there is no public
2293 * address use the static address as random address (but skip
2294 * the HCI command if the current random address is already the
2295 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002296 *
2297 * In case BR/EDR has been disabled on a dual-mode controller
2298 * and a static address has been configured, then use that
2299 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02002300 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002301 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002302 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002303 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002304 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02002305 *own_addr_type = ADDR_LE_DEV_RANDOM;
2306 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2307 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2308 &hdev->static_addr);
2309 return 0;
2310 }
2311
2312 /* Neither privacy nor static address is being used so use a
2313 * public address.
2314 */
2315 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2316
2317 return 0;
2318}
Johan Hedberg2cf22212014-12-19 22:26:00 +02002319
Johan Hedberg405a2612014-12-19 23:18:22 +02002320static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2321{
2322 struct bdaddr_list *b;
2323
2324 list_for_each_entry(b, &hdev->whitelist, list) {
2325 struct hci_conn *conn;
2326
2327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2328 if (!conn)
2329 return true;
2330
2331 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2332 return true;
2333 }
2334
2335 return false;
2336}
2337
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002338void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02002339{
2340 struct hci_dev *hdev = req->hdev;
2341 u8 scan;
2342
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002343 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02002344 return;
2345
2346 if (!hdev_is_powered(hdev))
2347 return;
2348
2349 if (mgmt_powering_down(hdev))
2350 return;
2351
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07002352 if (hdev->scanning_paused)
2353 return;
2354
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002355 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02002356 disconnected_whitelist_entries(hdev))
2357 scan = SCAN_PAGE;
2358 else
2359 scan = SCAN_DISABLED;
2360
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002361 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02002362 scan |= SCAN_INQUIRY;
2363
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002364 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2365 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2366 return;
2367
Johan Hedberg405a2612014-12-19 23:18:22 +02002368 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2369}
2370
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002371static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02002372{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002373 hci_dev_lock(req->hdev);
2374 __hci_req_update_scan(req);
2375 hci_dev_unlock(req->hdev);
2376 return 0;
2377}
Johan Hedberg405a2612014-12-19 23:18:22 +02002378
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002379static void scan_update_work(struct work_struct *work)
2380{
2381 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2382
2383 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02002384}
2385
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002386static int connectable_update(struct hci_request *req, unsigned long opt)
2387{
2388 struct hci_dev *hdev = req->hdev;
2389
2390 hci_dev_lock(hdev);
2391
2392 __hci_req_update_scan(req);
2393
2394 /* If BR/EDR is not enabled and we disable advertising as a
2395 * by-product of disabling connectable, we need to update the
2396 * advertising flags.
2397 */
2398 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002399 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002400
2401 /* Update the advertising parameters if necessary */
2402 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302403 !list_empty(&hdev->adv_instances)) {
2404 if (ext_adv_capable(hdev))
2405 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2406 else
2407 __hci_req_enable_advertising(req);
2408 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002409
2410 __hci_update_background_scan(req);
2411
2412 hci_dev_unlock(hdev);
2413
2414 return 0;
2415}
2416
2417static void connectable_update_work(struct work_struct *work)
2418{
2419 struct hci_dev *hdev = container_of(work, struct hci_dev,
2420 connectable_update);
2421 u8 status;
2422
2423 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2424 mgmt_set_connectable_complete(hdev, status);
2425}
2426
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002427static u8 get_service_classes(struct hci_dev *hdev)
2428{
2429 struct bt_uuid *uuid;
2430 u8 val = 0;
2431
2432 list_for_each_entry(uuid, &hdev->uuids, list)
2433 val |= uuid->svc_hint;
2434
2435 return val;
2436}
2437
2438void __hci_req_update_class(struct hci_request *req)
2439{
2440 struct hci_dev *hdev = req->hdev;
2441 u8 cod[3];
2442
2443 BT_DBG("%s", hdev->name);
2444
2445 if (!hdev_is_powered(hdev))
2446 return;
2447
2448 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2449 return;
2450
2451 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2452 return;
2453
2454 cod[0] = hdev->minor_class;
2455 cod[1] = hdev->major_class;
2456 cod[2] = get_service_classes(hdev);
2457
2458 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2459 cod[1] |= 0x20;
2460
2461 if (memcmp(cod, hdev->dev_class, 3) == 0)
2462 return;
2463
2464 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2465}
2466
Johan Hedbergaed1a882015-11-22 17:24:44 +03002467static void write_iac(struct hci_request *req)
2468{
2469 struct hci_dev *hdev = req->hdev;
2470 struct hci_cp_write_current_iac_lap cp;
2471
2472 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2473 return;
2474
2475 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2476 /* Limited discoverable mode */
2477 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2478 cp.iac_lap[0] = 0x00; /* LIAC */
2479 cp.iac_lap[1] = 0x8b;
2480 cp.iac_lap[2] = 0x9e;
2481 cp.iac_lap[3] = 0x33; /* GIAC */
2482 cp.iac_lap[4] = 0x8b;
2483 cp.iac_lap[5] = 0x9e;
2484 } else {
2485 /* General discoverable mode */
2486 cp.num_iac = 1;
2487 cp.iac_lap[0] = 0x33; /* GIAC */
2488 cp.iac_lap[1] = 0x8b;
2489 cp.iac_lap[2] = 0x9e;
2490 }
2491
2492 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2493 (cp.num_iac * 3) + 1, &cp);
2494}
2495
2496static int discoverable_update(struct hci_request *req, unsigned long opt)
2497{
2498 struct hci_dev *hdev = req->hdev;
2499
2500 hci_dev_lock(hdev);
2501
2502 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2503 write_iac(req);
2504 __hci_req_update_scan(req);
2505 __hci_req_update_class(req);
2506 }
2507
2508 /* Advertising instances don't use the global discoverable setting, so
2509 * only update AD if advertising was enabled using Set Advertising.
2510 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002511 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002512 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002513
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002514 /* Discoverable mode affects the local advertising
2515 * address in limited privacy mode.
2516 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302517 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2518 if (ext_adv_capable(hdev))
2519 __hci_req_start_ext_adv(req, 0x00);
2520 else
2521 __hci_req_enable_advertising(req);
2522 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002523 }
2524
Johan Hedbergaed1a882015-11-22 17:24:44 +03002525 hci_dev_unlock(hdev);
2526
2527 return 0;
2528}
2529
2530static void discoverable_update_work(struct work_struct *work)
2531{
2532 struct hci_dev *hdev = container_of(work, struct hci_dev,
2533 discoverable_update);
2534 u8 status;
2535
2536 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2537 mgmt_set_discoverable_complete(hdev, status);
2538}
2539
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002540void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2541 u8 reason)
2542{
2543 switch (conn->state) {
2544 case BT_CONNECTED:
2545 case BT_CONFIG:
2546 if (conn->type == AMP_LINK) {
2547 struct hci_cp_disconn_phy_link cp;
2548
2549 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2550 cp.reason = reason;
2551 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2552 &cp);
2553 } else {
2554 struct hci_cp_disconnect dc;
2555
2556 dc.handle = cpu_to_le16(conn->handle);
2557 dc.reason = reason;
2558 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2559 }
2560
2561 conn->state = BT_DISCONN;
2562
2563 break;
2564 case BT_CONNECT:
2565 if (conn->type == LE_LINK) {
2566 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2567 break;
2568 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2569 0, NULL);
2570 } else if (conn->type == ACL_LINK) {
2571 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2572 break;
2573 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2574 6, &conn->dst);
2575 }
2576 break;
2577 case BT_CONNECT2:
2578 if (conn->type == ACL_LINK) {
2579 struct hci_cp_reject_conn_req rej;
2580
2581 bacpy(&rej.bdaddr, &conn->dst);
2582 rej.reason = reason;
2583
2584 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2585 sizeof(rej), &rej);
2586 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2587 struct hci_cp_reject_sync_conn_req rej;
2588
2589 bacpy(&rej.bdaddr, &conn->dst);
2590
2591 /* SCO rejection has its own limited set of
2592 * allowed error values (0x0D-0x0F) which isn't
2593 * compatible with most values passed to this
2594 * function. To be safe hard-code one of the
2595 * values that's suitable for SCO.
2596 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002597 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002598
2599 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2600 sizeof(rej), &rej);
2601 }
2602 break;
2603 default:
2604 conn->state = BT_CLOSED;
2605 break;
2606 }
2607}
2608
2609static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2610{
2611 if (status)
2612 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2613}
2614
2615int hci_abort_conn(struct hci_conn *conn, u8 reason)
2616{
2617 struct hci_request req;
2618 int err;
2619
2620 hci_req_init(&req, conn->hdev);
2621
2622 __hci_abort_conn(&req, conn, reason);
2623
2624 err = hci_req_run(&req, abort_conn_complete);
2625 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002626 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002627 return err;
2628 }
2629
2630 return 0;
2631}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002632
Johan Hedberga1d01db2015-11-11 08:11:25 +02002633static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002634{
2635 hci_dev_lock(req->hdev);
2636 __hci_update_background_scan(req);
2637 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002638 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002639}
2640
2641static void bg_scan_update(struct work_struct *work)
2642{
2643 struct hci_dev *hdev = container_of(work, struct hci_dev,
2644 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002645 struct hci_conn *conn;
2646 u8 status;
2647 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002648
Johan Hedberg84235d22015-11-11 08:11:20 +02002649 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2650 if (!err)
2651 return;
2652
2653 hci_dev_lock(hdev);
2654
2655 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2656 if (conn)
2657 hci_le_conn_failed(conn, status);
2658
2659 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002660}
2661
Johan Hedberga1d01db2015-11-11 08:11:25 +02002662static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002663{
2664 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002665 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002666}
2667
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002668static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2669{
2670 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002671 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2672 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002673 struct hci_cp_inquiry cp;
2674
2675 BT_DBG("%s", req->hdev->name);
2676
2677 hci_dev_lock(req->hdev);
2678 hci_inquiry_cache_flush(req->hdev);
2679 hci_dev_unlock(req->hdev);
2680
2681 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002682
2683 if (req->hdev->discovery.limited)
2684 memcpy(&cp.lap, liac, sizeof(cp.lap));
2685 else
2686 memcpy(&cp.lap, giac, sizeof(cp.lap));
2687
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002688 cp.length = length;
2689
2690 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2691
2692 return 0;
2693}
2694
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002695static void le_scan_disable_work(struct work_struct *work)
2696{
2697 struct hci_dev *hdev = container_of(work, struct hci_dev,
2698 le_scan_disable.work);
2699 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002700
2701 BT_DBG("%s", hdev->name);
2702
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002703 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002704 return;
2705
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002706 cancel_delayed_work(&hdev->le_scan_restart);
2707
2708 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2709 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002710 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2711 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002712 return;
2713 }
2714
2715 hdev->discovery.scan_start = 0;
2716
2717 /* If we were running LE only scan, change discovery state. If
2718 * we were running both LE and BR/EDR inquiry simultaneously,
2719 * and BR/EDR inquiry is already finished, stop discovery,
2720 * otherwise BR/EDR inquiry will stop discovery when finished.
2721 * If we will resolve remote device name, do not change
2722 * discovery state.
2723 */
2724
2725 if (hdev->discovery.type == DISCOV_TYPE_LE)
2726 goto discov_stopped;
2727
2728 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2729 return;
2730
2731 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2732 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2733 hdev->discovery.state != DISCOVERY_RESOLVING)
2734 goto discov_stopped;
2735
2736 return;
2737 }
2738
2739 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2740 HCI_CMD_TIMEOUT, &status);
2741 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002742 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002743 goto discov_stopped;
2744 }
2745
2746 return;
2747
2748discov_stopped:
2749 hci_dev_lock(hdev);
2750 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2751 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002752}
2753
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002754static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002755{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002756 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002757
2758 /* If controller is not scanning we are done. */
2759 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2760 return 0;
2761
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002762 if (hdev->scanning_paused) {
2763 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2764 return 0;
2765 }
2766
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002767 hci_req_add_le_scan_disable(req);
2768
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302769 if (use_ext_scan(hdev)) {
2770 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2771
2772 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2773 ext_enable_cp.enable = LE_SCAN_ENABLE;
2774 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2775
2776 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2777 sizeof(ext_enable_cp), &ext_enable_cp);
2778 } else {
2779 struct hci_cp_le_set_scan_enable cp;
2780
2781 memset(&cp, 0, sizeof(cp));
2782 cp.enable = LE_SCAN_ENABLE;
2783 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2784 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2785 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002786
2787 return 0;
2788}
2789
2790static void le_scan_restart_work(struct work_struct *work)
2791{
2792 struct hci_dev *hdev = container_of(work, struct hci_dev,
2793 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002794 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002795 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002796
2797 BT_DBG("%s", hdev->name);
2798
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002799 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002800 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002801 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2802 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002803 return;
2804 }
2805
2806 hci_dev_lock(hdev);
2807
2808 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2809 !hdev->discovery.scan_start)
2810 goto unlock;
2811
2812 /* When the scan was started, hdev->le_scan_disable has been queued
2813 * after duration from scan_start. During scan restart this job
2814 * has been canceled, and we need to queue it again after proper
2815 * timeout, to make sure that scan does not run indefinitely.
2816 */
2817 duration = hdev->discovery.scan_duration;
2818 scan_start = hdev->discovery.scan_start;
2819 now = jiffies;
2820 if (now - scan_start <= duration) {
2821 int elapsed;
2822
2823 if (now >= scan_start)
2824 elapsed = now - scan_start;
2825 else
2826 elapsed = ULONG_MAX - scan_start + now;
2827
2828 timeout = duration - elapsed;
2829 } else {
2830 timeout = 0;
2831 }
2832
2833 queue_delayed_work(hdev->req_workqueue,
2834 &hdev->le_scan_disable, timeout);
2835
2836unlock:
2837 hci_dev_unlock(hdev);
2838}
2839
Johan Hedberge68f0722015-11-11 08:30:30 +02002840static int active_scan(struct hci_request *req, unsigned long opt)
2841{
2842 uint16_t interval = opt;
2843 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002844 u8 own_addr_type;
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002845 /* White list is not used for discovery */
2846 u8 filter_policy = 0x00;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302847 /* Discovery doesn't require controller address resolution */
2848 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002849 int err;
2850
2851 BT_DBG("%s", hdev->name);
2852
Johan Hedberge68f0722015-11-11 08:30:30 +02002853 /* If controller is scanning, it means the background scanning is
2854 * running. Thus, we should temporarily stop it in order to set the
2855 * discovery scanning parameters.
2856 */
2857 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2858 hci_req_add_le_scan_disable(req);
2859
2860 /* All active scans will be done with either a resolvable private
2861 * address (when privacy feature has been enabled) or non-resolvable
2862 * private address.
2863 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002864 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2865 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002866 if (err < 0)
2867 own_addr_type = ADDR_LE_DEV_PUBLIC;
2868
Alain Michaudd4edda02020-06-29 17:04:15 +00002869 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2870 hdev->le_scan_window_discovery, own_addr_type,
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302871 filter_policy, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002872 return 0;
2873}
2874
2875static int interleaved_discov(struct hci_request *req, unsigned long opt)
2876{
2877 int err;
2878
2879 BT_DBG("%s", req->hdev->name);
2880
2881 err = active_scan(req, opt);
2882 if (err)
2883 return err;
2884
Johan Hedberg7df26b52015-11-11 12:24:21 +02002885 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002886}
2887
2888static void start_discovery(struct hci_dev *hdev, u8 *status)
2889{
2890 unsigned long timeout;
2891
2892 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2893
2894 switch (hdev->discovery.type) {
2895 case DISCOV_TYPE_BREDR:
2896 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002897 hci_req_sync(hdev, bredr_inquiry,
2898 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002899 status);
2900 return;
2901 case DISCOV_TYPE_INTERLEAVED:
2902 /* When running simultaneous discovery, the LE scanning time
2903 * should occupy the whole discovery time sine BR/EDR inquiry
2904 * and LE scanning are scheduled by the controller.
2905 *
2906 * For interleaving discovery in comparison, BR/EDR inquiry
2907 * and LE scanning are done sequentially with separate
2908 * timeouts.
2909 */
2910 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2911 &hdev->quirks)) {
2912 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2913 /* During simultaneous discovery, we double LE scan
2914 * interval. We must leave some time for the controller
2915 * to do BR/EDR inquiry.
2916 */
2917 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00002918 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002919 status);
2920 break;
2921 }
2922
2923 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00002924 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002925 HCI_CMD_TIMEOUT, status);
2926 break;
2927 case DISCOV_TYPE_LE:
2928 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00002929 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002930 HCI_CMD_TIMEOUT, status);
2931 break;
2932 default:
2933 *status = HCI_ERROR_UNSPECIFIED;
2934 return;
2935 }
2936
2937 if (*status)
2938 return;
2939
2940 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2941
2942 /* When service discovery is used and the controller has a
2943 * strict duplicate filter, it is important to remember the
2944 * start and duration of the scan. This is required for
2945 * restarting scanning during the discovery phase.
2946 */
2947 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2948 hdev->discovery.result_filtering) {
2949 hdev->discovery.scan_start = jiffies;
2950 hdev->discovery.scan_duration = timeout;
2951 }
2952
2953 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2954 timeout);
2955}
2956
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002957bool hci_req_stop_discovery(struct hci_request *req)
2958{
2959 struct hci_dev *hdev = req->hdev;
2960 struct discovery_state *d = &hdev->discovery;
2961 struct hci_cp_remote_name_req_cancel cp;
2962 struct inquiry_entry *e;
2963 bool ret = false;
2964
2965 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2966
2967 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2968 if (test_bit(HCI_INQUIRY, &hdev->flags))
2969 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2970
2971 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2972 cancel_delayed_work(&hdev->le_scan_disable);
2973 hci_req_add_le_scan_disable(req);
2974 }
2975
2976 ret = true;
2977 } else {
2978 /* Passive scanning */
2979 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2980 hci_req_add_le_scan_disable(req);
2981 ret = true;
2982 }
2983 }
2984
2985 /* No further actions needed for LE-only discovery */
2986 if (d->type == DISCOV_TYPE_LE)
2987 return ret;
2988
2989 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2990 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2991 NAME_PENDING);
2992 if (!e)
2993 return ret;
2994
2995 bacpy(&cp.bdaddr, &e->data.bdaddr);
2996 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2997 &cp);
2998 ret = true;
2999 }
3000
3001 return ret;
3002}
3003
3004static int stop_discovery(struct hci_request *req, unsigned long opt)
3005{
3006 hci_dev_lock(req->hdev);
3007 hci_req_stop_discovery(req);
3008 hci_dev_unlock(req->hdev);
3009
3010 return 0;
3011}
3012
Johan Hedberge68f0722015-11-11 08:30:30 +02003013static void discov_update(struct work_struct *work)
3014{
3015 struct hci_dev *hdev = container_of(work, struct hci_dev,
3016 discov_update);
3017 u8 status = 0;
3018
3019 switch (hdev->discovery.state) {
3020 case DISCOVERY_STARTING:
3021 start_discovery(hdev, &status);
3022 mgmt_start_discovery_complete(hdev, status);
3023 if (status)
3024 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3025 else
3026 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3027 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02003028 case DISCOVERY_STOPPING:
3029 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3030 mgmt_stop_discovery_complete(hdev, status);
3031 if (!status)
3032 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3033 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02003034 case DISCOVERY_STOPPED:
3035 default:
3036 return;
3037 }
3038}
3039
Johan Hedbergc366f552015-11-23 15:43:06 +02003040static void discov_off(struct work_struct *work)
3041{
3042 struct hci_dev *hdev = container_of(work, struct hci_dev,
3043 discov_off.work);
3044
3045 BT_DBG("%s", hdev->name);
3046
3047 hci_dev_lock(hdev);
3048
3049 /* When discoverable timeout triggers, then just make sure
3050 * the limited discoverable flag is cleared. Even in the case
3051 * of a timeout triggered from general discoverable, it is
3052 * safe to unconditionally clear the flag.
3053 */
3054 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3055 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3056 hdev->discov_timeout = 0;
3057
3058 hci_dev_unlock(hdev);
3059
3060 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3061 mgmt_new_settings(hdev);
3062}
3063
Johan Hedberg2ff13892015-11-25 16:15:44 +02003064static int powered_update_hci(struct hci_request *req, unsigned long opt)
3065{
3066 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02003067 u8 link_sec;
3068
3069 hci_dev_lock(hdev);
3070
3071 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3072 !lmp_host_ssp_capable(hdev)) {
3073 u8 mode = 0x01;
3074
3075 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3076
3077 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3078 u8 support = 0x01;
3079
3080 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3081 sizeof(support), &support);
3082 }
3083 }
3084
3085 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3086 lmp_bredr_capable(hdev)) {
3087 struct hci_cp_write_le_host_supported cp;
3088
3089 cp.le = 0x01;
3090 cp.simul = 0x00;
3091
3092 /* Check first if we already have the right
3093 * host state (host features set)
3094 */
3095 if (cp.le != lmp_host_le_capable(hdev) ||
3096 cp.simul != lmp_host_le_br_capable(hdev))
3097 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3098 sizeof(cp), &cp);
3099 }
3100
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003101 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02003102 /* Make sure the controller has a good default for
3103 * advertising data. This also applies to the case
3104 * where BR/EDR was toggled during the AUTO_OFF phase.
3105 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003106 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3107 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303108 int err;
3109
3110 if (ext_adv_capable(hdev)) {
3111 err = __hci_req_setup_ext_adv_instance(req,
3112 0x00);
3113 if (!err)
3114 __hci_req_update_scan_rsp_data(req,
3115 0x00);
3116 } else {
3117 err = 0;
3118 __hci_req_update_adv_data(req, 0x00);
3119 __hci_req_update_scan_rsp_data(req, 0x00);
3120 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003121
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303122 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303123 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303124 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05303125 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03003126 __hci_req_enable_ext_advertising(req,
3127 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05303128 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003129 } else if (!list_empty(&hdev->adv_instances)) {
3130 struct adv_info *adv_instance;
3131
Johan Hedberg2ff13892015-11-25 16:15:44 +02003132 adv_instance = list_first_entry(&hdev->adv_instances,
3133 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02003134 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003135 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02003136 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02003137 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02003138 }
3139
3140 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3141 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3142 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3143 sizeof(link_sec), &link_sec);
3144
3145 if (lmp_bredr_capable(hdev)) {
3146 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3147 __hci_req_write_fast_connectable(req, true);
3148 else
3149 __hci_req_write_fast_connectable(req, false);
3150 __hci_req_update_scan(req);
3151 __hci_req_update_class(req);
3152 __hci_req_update_name(req);
3153 __hci_req_update_eir(req);
3154 }
3155
3156 hci_dev_unlock(hdev);
3157 return 0;
3158}
3159
3160int __hci_req_hci_power_on(struct hci_dev *hdev)
3161{
3162 /* Register the available SMP channels (BR/EDR and LE) only when
3163 * successfully powering on the controller. This late
3164 * registration is required so that LE SMP can clearly decide if
3165 * the public address or static address is used.
3166 */
3167 smp_register(hdev);
3168
3169 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3170 NULL);
3171}
3172
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003173void hci_request_setup(struct hci_dev *hdev)
3174{
Johan Hedberge68f0722015-11-11 08:30:30 +02003175 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003176 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003177 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003178 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003179 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02003180 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003181 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3182 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02003183 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003184}
3185
3186void hci_request_cancel_all(struct hci_dev *hdev)
3187{
Johan Hedberg7df0f732015-11-12 15:15:00 +02003188 hci_req_sync_cancel(hdev, ENODEV);
3189
Johan Hedberge68f0722015-11-11 08:30:30 +02003190 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02003191 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02003192 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03003193 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03003194 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02003195 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02003196 cancel_delayed_work_sync(&hdev->le_scan_disable);
3197 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02003198
3199 if (hdev->adv_instance_timeout) {
3200 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3201 hdev->adv_instance_timeout = 0;
3202 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003203}