blob: ef5ced467f75c1ddcbede044162bf6d315280e91 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -070033#include "eir.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020034
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053042void hci_req_purge(struct hci_request *req)
43{
44 skb_queue_purge(&req->cmd_q);
45}
46
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080047bool hci_req_status_pend(struct hci_dev *hdev)
48{
49 return hdev->req_status == HCI_REQ_PEND;
50}
51
Johan Hedberge62144872015-04-02 13:41:08 +030052static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020054{
55 struct hci_dev *hdev = req->hdev;
56 struct sk_buff *skb;
57 unsigned long flags;
58
Howard Chung22fbcfc2020-11-11 15:02:19 +080059 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020060
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
63 */
64 if (req->err) {
65 skb_queue_purge(&req->cmd_q);
66 return req->err;
67 }
68
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
71 return -ENODATA;
72
73 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020074 if (complete) {
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020080
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86
87 return 0;
88}
89
Johan Hedberge62144872015-04-02 13:41:08 +030090int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91{
92 return req_run(req, complete, NULL);
93}
94
95int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96{
97 return req_run(req, NULL, complete);
98}
99
Luiz Augusto von Dentz161510c2021-10-27 16:58:39 -0700100void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 struct sk_buff *skb)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
108 if (skb)
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200114/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200115int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200117 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200118{
119 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200120 int err = 0;
121
Howard Chung22fbcfc2020-11-11 15:02:19 +0800122 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200123
124 hci_req_init(&req, hdev);
125
126 hdev->req_status = HCI_REQ_PEND;
127
Johan Hedberga1d01db2015-11-11 08:11:25 +0200128 err = func(&req, opt);
129 if (err) {
130 if (hci_status)
131 *hci_status = HCI_ERROR_UNSPECIFIED;
132 return err;
133 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200134
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200135 err = hci_req_run_skb(&req, hci_req_sync_complete);
136 if (err < 0) {
137 hdev->req_status = 0;
138
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200139 /* ENODATA means the HCI request command queue is empty.
140 * This can happen when a request with conditionals doesn't
141 * trigger any commands to be sent. This is normal behavior
142 * and should not trigger an error return.
143 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200144 if (err == -ENODATA) {
145 if (hci_status)
146 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200147 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200148 }
149
150 if (hci_status)
151 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200152
153 return err;
154 }
155
John Keeping67d8cee2018-04-19 16:29:37 +0100156 err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200158
John Keeping67d8cee2018-04-19 16:29:37 +0100159 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200160 return -EINTR;
161
162 switch (hdev->req_status) {
163 case HCI_REQ_DONE:
164 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200165 if (hci_status)
166 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200167 break;
168
169 case HCI_REQ_CANCELED:
170 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200171 if (hci_status)
172 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200173 break;
174
175 default:
176 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200177 if (hci_status)
178 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200179 break;
180 }
181
Frederic Dalleau9afee942016-08-23 07:59:19 +0200182 kfree_skb(hdev->req_skb);
183 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200184 hdev->req_status = hdev->req_result = 0;
185
Howard Chung22fbcfc2020-11-11 15:02:19 +0800186 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200187
188 return err;
189}
190
Johan Hedberga1d01db2015-11-11 08:11:25 +0200191int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200193 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200194{
195 int ret;
196
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200198 hci_req_sync_lock(hdev);
Lin Mae2cb6b82021-04-12 19:17:57 +0800199 /* check the state after obtaing the lock to protect the HCI_UP
200 * against any races from hci_dev_do_close when the controller
201 * gets removed.
202 */
203 if (test_bit(HCI_UP, &hdev->flags))
204 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205 else
206 ret = -ENETDOWN;
Johan Hedbergb5044302015-11-10 09:44:55 +0200207 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200208
209 return ret;
210}
211
Johan Hedberg0857dd32014-12-19 13:40:20 +0200212struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213 const void *param)
214{
215 int len = HCI_COMMAND_HDR_SIZE + plen;
216 struct hci_command_hdr *hdr;
217 struct sk_buff *skb;
218
219 skb = bt_skb_alloc(len, GFP_ATOMIC);
220 if (!skb)
221 return NULL;
222
Johannes Berg4df864c2017-06-16 14:29:21 +0200223 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200224 hdr->opcode = cpu_to_le16(opcode);
225 hdr->plen = plen;
226
227 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200228 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200229
Howard Chung22fbcfc2020-11-11 15:02:19 +0800230 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200231
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100232 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200234
235 return skb;
236}
237
238/* Queue a command to an asynchronous HCI request */
239void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 const void *param, u8 event)
241{
242 struct hci_dev *hdev = req->hdev;
243 struct sk_buff *skb;
244
Howard Chung22fbcfc2020-11-11 15:02:19 +0800245 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200246
247 /* If an error occurred during request building, there is no point in
248 * queueing the HCI command. We can simply return.
249 */
250 if (req->err)
251 return;
252
253 skb = hci_prepare_cmd(hdev, opcode, plen, param);
254 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100255 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200257 req->err = -ENOMEM;
258 return;
259 }
260
261 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200262 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200263
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100264 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200265
266 skb_queue_tail(&req->cmd_q, skb);
267}
268
269void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270 const void *param)
271{
272 hci_req_add_ev(req, opcode, plen, param, 0);
273}
274
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200275void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
276{
277 struct hci_dev *hdev = req->hdev;
278 struct hci_cp_write_page_scan_activity acp;
279 u8 type;
280
281 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
282 return;
283
284 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
285 return;
286
287 if (enable) {
288 type = PAGE_SCAN_TYPE_INTERLACED;
289
290 /* 160 msec page scan interval */
291 acp.interval = cpu_to_le16(0x0100);
292 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000293 type = hdev->def_page_scan_type;
294 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200295 }
296
Alain Michaud10873f92020-06-11 02:01:56 +0000297 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200298
299 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300 __cpu_to_le16(hdev->page_scan_window) != acp.window)
301 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
302 sizeof(acp), &acp);
303
304 if (hdev->page_scan_type != type)
305 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
306}
307
Howard Chungc4f1f402020-11-26 12:22:21 +0800308static void start_interleave_scan(struct hci_dev *hdev)
309{
310 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311 queue_delayed_work(hdev->req_workqueue,
312 &hdev->interleave_scan, 0);
313}
314
315static bool is_interleave_scanning(struct hci_dev *hdev)
316{
317 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
318}
319
320static void cancel_interleave_scan(struct hci_dev *hdev)
321{
322 bt_dev_dbg(hdev, "cancelling interleave scan");
323
324 cancel_delayed_work_sync(&hdev->interleave_scan);
325
326 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
327}
328
329/* Return true if interleave_scan wasn't started until exiting this function,
330 * otherwise, return false
331 */
332static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
333{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800334 /* Do interleaved scan only if all of the following are true:
335 * - There is at least one ADV monitor
336 * - At least one pending LE connection or one device to be scanned for
337 * - Monitor offloading is not supported
338 * If so, we should alternate between allowlist scan and one without
339 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800340 */
341 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800343 list_empty(&hdev->pend_le_reports)) &&
344 hci_get_adv_monitor_offload_ext(hdev) ==
345 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800346 bool is_interleaving = is_interleave_scanning(hdev);
347
348 if (use_interleaving && !is_interleaving) {
349 start_interleave_scan(hdev);
350 bt_dev_dbg(hdev, "starting interleave scan");
351 return true;
352 }
353
354 if (!use_interleaving && is_interleaving)
355 cancel_interleave_scan(hdev);
356
357 return false;
358}
359
Johan Hedberg00cf5042015-11-25 16:15:41 +0200360void __hci_req_update_name(struct hci_request *req)
361{
362 struct hci_dev *hdev = req->hdev;
363 struct hci_cp_write_local_name cp;
364
365 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
366
367 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
368}
369
Johan Hedbergb1a89172015-11-25 16:15:42 +0200370void __hci_req_update_eir(struct hci_request *req)
371{
372 struct hci_dev *hdev = req->hdev;
373 struct hci_cp_write_eir cp;
374
375 if (!hdev_is_powered(hdev))
376 return;
377
378 if (!lmp_ext_inq_capable(hdev))
379 return;
380
381 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
382 return;
383
384 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
385 return;
386
387 memset(&cp, 0, sizeof(cp));
388
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700389 eir_create(hdev, cp.data);
Johan Hedbergb1a89172015-11-25 16:15:42 +0200390
391 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
392 return;
393
394 memcpy(hdev->eir, cp.data, sizeof(cp.data));
395
396 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
397}
398
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530399void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200400{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530401 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200402
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700403 if (hdev->scanning_paused) {
404 bt_dev_dbg(hdev, "Scanning is paused for suspend");
405 return;
406 }
407
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530408 if (use_ext_scan(hdev)) {
409 struct hci_cp_le_set_ext_scan_enable cp;
410
411 memset(&cp, 0, sizeof(cp));
412 cp.enable = LE_SCAN_DISABLE;
413 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
414 &cp);
415 } else {
416 struct hci_cp_le_set_scan_enable cp;
417
418 memset(&cp, 0, sizeof(cp));
419 cp.enable = LE_SCAN_DISABLE;
420 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
421 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530422
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530423 /* Disable address resolution */
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700424 if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530425 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530426
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530427 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
428 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200429}
430
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800431static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
432 u8 bdaddr_type)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700433{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800434 struct hci_cp_le_del_from_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700435
436 cp.bdaddr_type = bdaddr_type;
437 bacpy(&cp.bdaddr, bdaddr);
438
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800439 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700440 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800441 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530442
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700443 if (use_ll_privacy(req->hdev)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530444 struct smp_irk *irk;
445
446 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
447 if (irk) {
448 struct hci_cp_le_del_from_resolv_list cp;
449
450 cp.bdaddr_type = bdaddr_type;
451 bacpy(&cp.bdaddr, bdaddr);
452
453 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
454 sizeof(cp), &cp);
455 }
456 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700457}
458
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800459/* Adds connection to accept list if needed. On error, returns -1. */
460static int add_to_accept_list(struct hci_request *req,
461 struct hci_conn_params *params, u8 *num_entries,
462 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200463{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800464 struct hci_cp_le_add_to_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700465 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200466
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800467 /* Already in accept list */
468 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700469 params->addr_type))
470 return 0;
471
472 /* Select filter policy to accept all advertising */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800473 if (*num_entries >= hdev->le_accept_list_size)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700474 return -1;
475
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800476 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530477 if (!allow_rpa &&
478 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700479 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
480 return -1;
481 }
482
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800483 /* During suspend, only wakeable devices can be in accept list */
Luiz Augusto von Dentzfe92ee62021-12-01 11:49:50 -0800484 if (hdev->suspended &&
485 !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700486 return 0;
487
488 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200489 cp.bdaddr_type = params->addr_type;
490 bacpy(&cp.bdaddr, &params->addr);
491
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800492 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700493 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800494 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700495
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700496 if (use_ll_privacy(hdev)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530497 struct smp_irk *irk;
498
499 irk = hci_find_irk_by_addr(hdev, &params->addr,
500 params->addr_type);
501 if (irk) {
502 struct hci_cp_le_add_to_resolv_list cp;
503
504 cp.bdaddr_type = params->addr_type;
505 bacpy(&cp.bdaddr, &params->addr);
506 memcpy(cp.peer_irk, irk->val, 16);
507
508 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509 memcpy(cp.local_irk, hdev->irk, 16);
510 else
511 memset(cp.local_irk, 0, 16);
512
513 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
514 sizeof(cp), &cp);
515 }
516 }
517
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700518 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200519}
520
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800521static u8 update_accept_list(struct hci_request *req)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200522{
523 struct hci_dev *hdev = req->hdev;
524 struct hci_conn_params *params;
525 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700526 u8 num_entries = 0;
527 bool pend_conn, pend_report;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800528 /* We allow usage of accept list even with RPAs in suspend. In the worst
529 * case, we won't be able to wake from devices that use the privacy1.2
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700530 * features. Additionally, once we support privacy1.2 and IRK
531 * offloading, we can update this to also check for those conditions.
532 */
533 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200534
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700535 if (use_ll_privacy(hdev))
Sathish Narasimman8ce85ad2021-04-05 20:00:41 +0530536 allow_rpa = true;
537
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800538 /* Go through the current accept list programmed into the
Johan Hedberg0857dd32014-12-19 13:40:20 +0200539 * controller one by one and check if that address is still
540 * in the list of pending connections or list of devices to
541 * report. If not present in either list, then queue the
542 * command to remove it from the controller.
543 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800544 list_for_each_entry(b, &hdev->le_accept_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700545 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
546 &b->bdaddr,
547 b->bdaddr_type);
548 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
549 &b->bdaddr,
550 b->bdaddr_type);
551
552 /* If the device is not likely to connect or report,
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800553 * remove it from the accept list.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500554 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700555 if (!pend_conn && !pend_report) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800556 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200557 continue;
558 }
559
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800560 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530561 if (!allow_rpa &&
562 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700563 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500564 return 0x00;
565 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200566
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700567 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200568 }
569
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800570 /* Since all no longer valid accept list entries have been
Johan Hedberg0857dd32014-12-19 13:40:20 +0200571 * removed, walk through the list of pending connections
572 * and ensure that any new device gets programmed into
573 * the controller.
574 *
575 * If the list of the devices is larger than the list of
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800576 * available accept list entries in the controller, then
Johan Hedberg0857dd32014-12-19 13:40:20 +0200577 * just abort and return filer policy value to not use the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800578 * accept list.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200579 */
580 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800581 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200582 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200583 }
584
585 /* After adding all new pending connections, walk through
586 * the list of pending reports and also add these to the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800587 * accept list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200588 */
589 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800590 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200591 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200592 }
593
Howard Chungc4f1f402020-11-26 12:22:21 +0800594 /* Use the allowlist unless the following conditions are all true:
595 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800596 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800597 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200598 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800599 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800600 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800601 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200602 return 0x00;
603
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800604 /* Select filter policy to use accept list */
Johan Hedberg0857dd32014-12-19 13:40:20 +0200605 return 0x01;
606}
607
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200608static bool scan_use_rpa(struct hci_dev *hdev)
609{
610 return hci_dev_test_flag(hdev, HCI_PRIVACY);
611}
612
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530613static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530614 u16 window, u8 own_addr_type, u8 filter_policy,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800615 bool filter_dup, bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200616{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530617 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530618
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700619 if (hdev->scanning_paused) {
620 bt_dev_dbg(hdev, "Scanning is paused for suspend");
621 return;
622 }
623
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700624 if (use_ll_privacy(hdev) && addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530625 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530626
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530627 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
628 }
629
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530630 /* Use ext scanning if set ext scan param and ext scan enable is
631 * supported
632 */
633 if (use_ext_scan(hdev)) {
634 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530637 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
638 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530639
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530640 ext_param_cp = (void *)data;
641 phy_params = (void *)ext_param_cp->data;
642
643 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644 ext_param_cp->own_addr_type = own_addr_type;
645 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530646
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530647 plen = sizeof(*ext_param_cp);
648
649 if (scan_1m(hdev) || scan_2m(hdev)) {
650 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
651
652 memset(phy_params, 0, sizeof(*phy_params));
653 phy_params->type = type;
654 phy_params->interval = cpu_to_le16(interval);
655 phy_params->window = cpu_to_le16(window);
656
657 plen += sizeof(*phy_params);
658 phy_params++;
659 }
660
661 if (scan_coded(hdev)) {
662 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
663
664 memset(phy_params, 0, sizeof(*phy_params));
665 phy_params->type = type;
666 phy_params->interval = cpu_to_le16(interval);
667 phy_params->window = cpu_to_le16(window);
668
669 plen += sizeof(*phy_params);
670 phy_params++;
671 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530672
673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530674 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530675
676 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677 ext_enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800678 ext_enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530679
680 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681 sizeof(ext_enable_cp), &ext_enable_cp);
682 } else {
683 struct hci_cp_le_set_scan_param param_cp;
684 struct hci_cp_le_set_scan_enable enable_cp;
685
686 memset(&param_cp, 0, sizeof(param_cp));
687 param_cp.type = type;
688 param_cp.interval = cpu_to_le16(interval);
689 param_cp.window = cpu_to_le16(window);
690 param_cp.own_address_type = own_addr_type;
691 param_cp.filter_policy = filter_policy;
692 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
693 &param_cp);
694
695 memset(&enable_cp, 0, sizeof(enable_cp));
696 enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800697 enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530698 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
699 &enable_cp);
700 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530701}
702
Alain Michaud9a9373f2020-07-31 01:05:34 +0000703/* Returns true if an le connection is in the scanning state */
704static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
705{
706 struct hci_conn_hash *h = &hdev->conn_hash;
707 struct hci_conn *c;
708
709 rcu_read_lock();
710
711 list_for_each_entry_rcu(c, &h->list, list) {
712 if (c->type == LE_LINK && c->state == BT_CONNECT &&
713 test_bit(HCI_CONN_SCANNING, &c->flags)) {
714 rcu_read_unlock();
715 return true;
716 }
717 }
718
719 rcu_read_unlock();
720
721 return false;
722}
723
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530724/* Ensure to call hci_req_add_le_scan_disable() first to disable the
725 * controller based address resolution to be able to reconfigure
726 * resolving list.
727 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530728void hci_req_add_le_passive_scan(struct hci_request *req)
729{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200730 struct hci_dev *hdev = req->hdev;
731 u8 own_addr_type;
732 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700733 u16 window, interval;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800734 /* Default is to enable duplicates filter */
735 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530736 /* Background scanning should run with address resolution */
737 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700738
739 if (hdev->scanning_paused) {
740 bt_dev_dbg(hdev, "Scanning is paused for suspend");
741 return;
742 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200743
744 /* Set require_privacy to false since no SCAN_REQ are send
745 * during passive scanning. Not using an non-resolvable address
746 * here is important so that peer devices using direct
747 * advertising with our address will be correctly reported
748 * by the controller.
749 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200750 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
751 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200752 return;
753
Howard Chung80af16a2020-11-26 12:22:25 +0800754 if (hdev->enable_advmon_interleave_scan &&
755 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +0800756 return;
757
758 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800759 /* Adding or removing entries from the accept list must
Johan Hedberg0857dd32014-12-19 13:40:20 +0200760 * happen before enabling scanning. The controller does
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800761 * not allow accept list modification while scanning.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200762 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800763 filter_policy = update_accept_list(req);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200764
765 /* When the controller is using random resolvable addresses and
766 * with that having LE privacy enabled, then controllers with
767 * Extended Scanner Filter Policies support can now enable support
768 * for handling directed advertising.
769 *
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800770 * So instead of using filter polices 0x00 (no accept list)
771 * and 0x01 (accept list enabled) use the new filter policies
772 * 0x02 (no accept list) and 0x03 (accept list enabled).
Johan Hedberg0857dd32014-12-19 13:40:20 +0200773 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700774 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200775 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776 filter_policy |= 0x02;
777
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700778 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000779 window = hdev->le_scan_window_suspend;
780 interval = hdev->le_scan_int_suspend;
Alain Michaud9a9373f2020-07-31 01:05:34 +0000781 } else if (hci_is_le_conn_scanning(hdev)) {
782 window = hdev->le_scan_window_connect;
783 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +0800784 } else if (hci_is_adv_monitoring(hdev)) {
785 window = hdev->le_scan_window_adv_monitor;
786 interval = hdev->le_scan_int_adv_monitor;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800787
788 /* Disable duplicates filter when scanning for advertisement
789 * monitor for the following reasons.
790 *
791 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792 * controllers ignore RSSI_Sampling_Period when the duplicates
793 * filter is enabled.
794 *
795 * For SW pattern filtering, when we're not doing interleaved
796 * scanning, it is necessary to disable duplicates filter,
797 * otherwise hosts can only receive one advertisement and it's
798 * impossible to know if a peer is still in range.
799 */
800 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700801 } else {
802 window = hdev->le_scan_window;
803 interval = hdev->le_scan_interval;
804 }
805
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800806 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
807 filter_policy);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700808 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800809 own_addr_type, filter_policy, filter_dup,
810 addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200811}
812
Daniel Winkler53274472020-09-15 14:14:27 -0700813static void cancel_adv_timeout(struct hci_dev *hdev)
814{
815 if (hdev->adv_instance_timeout) {
816 hdev->adv_instance_timeout = 0;
817 cancel_delayed_work(&hdev->adv_instance_expire);
818 }
819}
820
821/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800822void __hci_req_pause_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -0700823{
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800824 bt_dev_dbg(req->hdev, "Pausing advertising instances");
Daniel Winkler53274472020-09-15 14:14:27 -0700825
826 /* Call to disable any advertisements active on the controller.
827 * This will succeed even if no advertisements are configured.
828 */
829 __hci_req_disable_advertising(req);
830
831 /* If we are using software rotation, pause the loop */
832 if (!ext_adv_capable(req->hdev))
833 cancel_adv_timeout(req->hdev);
834}
835
836/* This function requires the caller holds hdev->lock */
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800837static void __hci_req_resume_adv_instances(struct hci_request *req)
Daniel Winkler53274472020-09-15 14:14:27 -0700838{
839 struct adv_info *adv;
840
841 bt_dev_dbg(req->hdev, "Resuming advertising instances");
842
843 if (ext_adv_capable(req->hdev)) {
844 /* Call for each tracked instance to be re-enabled */
845 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
846 __hci_req_enable_ext_advertising(req,
847 adv->instance);
848 }
849
850 } else {
851 /* Schedule for most recent instance to be restarted and begin
852 * the software rotation loop
853 */
854 __hci_req_schedule_adv_instance(req,
855 req->hdev->cur_adv_instance,
856 true);
857 }
858}
859
Daniel Winkler2943d8e2020-11-06 15:20:19 -0800860/* This function requires the caller holds hdev->lock */
861int hci_req_resume_adv_instances(struct hci_dev *hdev)
862{
863 struct hci_request req;
864
865 hci_req_init(&req, hdev);
866 __hci_req_resume_adv_instances(&req);
867
868 return hci_req_run(&req, NULL);
869}
870
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -0800871static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +0200872{
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700873 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +0200874}
875
876void __hci_req_disable_advertising(struct hci_request *req)
877{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530878 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -0700879 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +0200880
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530881 } else {
882 u8 enable = 0x00;
883
884 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
885 }
Johan Hedbergf2252572015-11-18 12:49:20 +0200886}
887
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200888static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
889{
890 /* If privacy is not enabled don't use RPA */
891 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
892 return false;
893
894 /* If basic privacy mode is enabled use RPA */
895 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
896 return true;
897
898 /* If limited privacy mode is enabled don't use RPA if we're
899 * both discoverable and bondable.
900 */
901 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
902 hci_dev_test_flag(hdev, HCI_BONDABLE))
903 return false;
904
905 /* We're neither bondable nor discoverable in the limited
906 * privacy mode, therefore use RPA.
907 */
908 return true;
909}
910
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100911static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
912{
913 /* If there is no connection we are OK to advertise. */
914 if (hci_conn_num(hdev, LE_LINK) == 0)
915 return true;
916
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800917 /* Check le_states if there is any connection in peripheral role. */
918 if (hdev->conn_hash.le_num_peripheral > 0) {
919 /* Peripheral connection state and non connectable mode bit 20.
920 */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100921 if (!connectable && !(hdev->le_states[2] & 0x10))
922 return false;
923
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800924 /* Peripheral connection state and connectable mode bit 38
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100925 * and scannable bit 21.
926 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100927 if (connectable && (!(hdev->le_states[4] & 0x40) ||
928 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100929 return false;
930 }
931
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800932 /* Check le_states if there is any connection in central role. */
933 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
934 /* Central connection state and non connectable mode bit 18. */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100935 if (!connectable && !(hdev->le_states[2] & 0x02))
936 return false;
937
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800938 /* Central connection state and connectable mode bit 35 and
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100939 * scannable 19.
940 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100941 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100942 !(hdev->le_states[2] & 0x08)))
943 return false;
944 }
945
946 return true;
947}
948
Johan Hedbergf2252572015-11-18 12:49:20 +0200949void __hci_req_enable_advertising(struct hci_request *req)
950{
951 struct hci_dev *hdev = req->hdev;
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700952 struct adv_info *adv;
Johan Hedbergf2252572015-11-18 12:49:20 +0200953 struct hci_cp_le_set_adv_param cp;
954 u8 own_addr_type, enable = 0x01;
955 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530956 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +0200957 u32 flags;
958
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700959 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
960 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100961
962 /* If the "connectable" instance flag was not set, then choose between
963 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
964 */
965 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
966 mgmt_get_connectable(hdev);
967
968 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +0200969 return;
970
971 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
972 __hci_req_disable_advertising(req);
973
974 /* Clear the HCI_LE_ADV bit temporarily so that the
975 * hci_update_random_address knows that it's safe to go ahead
976 * and write a new random address. The flag will be set back on
977 * as soon as the SET_ADV_ENABLE HCI command completes.
978 */
979 hci_dev_clear_flag(hdev, HCI_LE_ADV);
980
Johan Hedbergf2252572015-11-18 12:49:20 +0200981 /* Set require_privacy to true only when non-connectable
982 * advertising is used. In that case it is fine to use a
983 * non-resolvable private address.
984 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200985 if (hci_update_random_address(req, !connectable,
986 adv_use_rpa(hdev, flags),
987 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +0200988 return;
989
990 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +0200991
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700992 if (adv) {
993 adv_min_interval = adv->min_interval;
994 adv_max_interval = adv->max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -0800995 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530996 adv_min_interval = hdev->le_adv_min_interval;
997 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -0800998 }
999
1000 if (connectable) {
1001 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301002 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -08001003 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301004 cp.type = LE_ADV_SCAN_IND;
1005 else
1006 cp.type = LE_ADV_NONCONN_IND;
1007
1008 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1009 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1010 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1011 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +05301012 }
1013 }
1014
1015 cp.min_interval = cpu_to_le16(adv_min_interval);
1016 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +02001017 cp.own_address_type = own_addr_type;
1018 cp.channel_map = hdev->le_adv_channel_map;
1019
1020 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1021
1022 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1023}
1024
Johan Hedbergcab054a2015-11-30 11:21:45 +02001025void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001026{
1027 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001028 u8 len;
1029
1030 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1031 return;
1032
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301033 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001034 struct {
1035 struct hci_cp_le_set_ext_scan_rsp_data cp;
1036 u8 data[HCI_MAX_EXT_AD_LENGTH];
1037 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001038
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001039 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001040
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001041 len = eir_create_scan_rsp(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001042
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301043 if (hdev->scan_rsp_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001044 !memcmp(pdu.data, hdev->scan_rsp_data, len))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301045 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001046
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001047 memcpy(hdev->scan_rsp_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301048 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001049
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001050 pdu.cp.handle = instance;
1051 pdu.cp.length = len;
1052 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1053 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301054
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001055 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1056 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301057 } else {
1058 struct hci_cp_le_set_scan_rsp_data cp;
1059
1060 memset(&cp, 0, sizeof(cp));
1061
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001062 len = eir_create_scan_rsp(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301063
1064 if (hdev->scan_rsp_data_len == len &&
1065 !memcmp(cp.data, hdev->scan_rsp_data, len))
1066 return;
1067
1068 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1069 hdev->scan_rsp_data_len = len;
1070
1071 cp.length = len;
1072
1073 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1074 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001075}
1076
Johan Hedbergcab054a2015-11-30 11:21:45 +02001077void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001078{
1079 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001080 u8 len;
1081
1082 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1083 return;
1084
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301085 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001086 struct {
1087 struct hci_cp_le_set_ext_adv_data cp;
1088 u8 data[HCI_MAX_EXT_AD_LENGTH];
1089 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001090
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001091 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001092
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001093 len = eir_create_adv_data(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001094
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301095 /* There's nothing to do if the data hasn't changed */
1096 if (hdev->adv_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001097 memcmp(pdu.data, hdev->adv_data, len) == 0)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301098 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001099
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001100 memcpy(hdev->adv_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301101 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001102
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001103 pdu.cp.length = len;
1104 pdu.cp.handle = instance;
1105 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1106 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301107
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001108 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1109 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301110 } else {
1111 struct hci_cp_le_set_adv_data cp;
1112
1113 memset(&cp, 0, sizeof(cp));
1114
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001115 len = eir_create_adv_data(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301116
1117 /* There's nothing to do if the data hasn't changed */
1118 if (hdev->adv_data_len == len &&
1119 memcmp(cp.data, hdev->adv_data, len) == 0)
1120 return;
1121
1122 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1123 hdev->adv_data_len = len;
1124
1125 cp.length = len;
1126
1127 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1128 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001129}
1130
Johan Hedbergcab054a2015-11-30 11:21:45 +02001131int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001132{
1133 struct hci_request req;
1134
1135 hci_req_init(&req, hdev);
1136 __hci_req_update_adv_data(&req, instance);
1137
1138 return hci_req_run(&req, NULL);
1139}
1140
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301141static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1142 u16 opcode)
1143{
1144 BT_DBG("%s status %u", hdev->name, status);
1145}
1146
1147void hci_req_disable_address_resolution(struct hci_dev *hdev)
1148{
1149 struct hci_request req;
1150 __u8 enable = 0x00;
1151
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -07001152 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301153 return;
1154
1155 hci_req_init(&req, hdev);
1156
1157 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1158
1159 hci_req_run(&req, enable_addr_resolution_complete);
1160}
1161
Johan Hedbergf2252572015-11-18 12:49:20 +02001162static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1163{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001164 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001165}
1166
1167void hci_req_reenable_advertising(struct hci_dev *hdev)
1168{
1169 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001170
1171 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001172 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001173 return;
1174
Johan Hedbergf2252572015-11-18 12:49:20 +02001175 hci_req_init(&req, hdev);
1176
Johan Hedbergcab054a2015-11-30 11:21:45 +02001177 if (hdev->cur_adv_instance) {
1178 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1179 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001180 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301181 if (ext_adv_capable(hdev)) {
1182 __hci_req_start_ext_adv(&req, 0x00);
1183 } else {
1184 __hci_req_update_adv_data(&req, 0x00);
1185 __hci_req_update_scan_rsp_data(&req, 0x00);
1186 __hci_req_enable_advertising(&req);
1187 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001188 }
1189
1190 hci_req_run(&req, adv_enable_complete);
1191}
1192
1193static void adv_timeout_expire(struct work_struct *work)
1194{
1195 struct hci_dev *hdev = container_of(work, struct hci_dev,
1196 adv_instance_expire.work);
1197
1198 struct hci_request req;
1199 u8 instance;
1200
Howard Chung22fbcfc2020-11-11 15:02:19 +08001201 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001202
1203 hci_dev_lock(hdev);
1204
1205 hdev->adv_instance_timeout = 0;
1206
Johan Hedbergcab054a2015-11-30 11:21:45 +02001207 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001208 if (instance == 0x00)
1209 goto unlock;
1210
1211 hci_req_init(&req, hdev);
1212
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001213 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001214
1215 if (list_empty(&hdev->adv_instances))
1216 __hci_req_disable_advertising(&req);
1217
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001218 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001219
1220unlock:
1221 hci_dev_unlock(hdev);
1222}
1223
Howard Chungc4f1f402020-11-26 12:22:21 +08001224static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1225 unsigned long opt)
1226{
1227 struct hci_dev *hdev = req->hdev;
1228 int ret = 0;
1229
1230 hci_dev_lock(hdev);
1231
1232 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1233 hci_req_add_le_scan_disable(req, false);
1234 hci_req_add_le_passive_scan(req);
1235
1236 switch (hdev->interleave_scan_state) {
1237 case INTERLEAVE_SCAN_ALLOWLIST:
1238 bt_dev_dbg(hdev, "next state: allowlist");
1239 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1240 break;
1241 case INTERLEAVE_SCAN_NO_FILTER:
1242 bt_dev_dbg(hdev, "next state: no filter");
1243 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1244 break;
1245 case INTERLEAVE_SCAN_NONE:
1246 BT_ERR("unexpected error");
1247 ret = -1;
1248 }
1249
1250 hci_dev_unlock(hdev);
1251
1252 return ret;
1253}
1254
1255static void interleave_scan_work(struct work_struct *work)
1256{
1257 struct hci_dev *hdev = container_of(work, struct hci_dev,
1258 interleave_scan.work);
1259 u8 status;
1260 unsigned long timeout;
1261
1262 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1263 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1264 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1265 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1266 } else {
1267 bt_dev_err(hdev, "unexpected error");
1268 return;
1269 }
1270
1271 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1272 HCI_CMD_TIMEOUT, &status);
1273
1274 /* Don't continue interleaving if it was canceled */
1275 if (is_interleave_scanning(hdev))
1276 queue_delayed_work(hdev->req_workqueue,
1277 &hdev->interleave_scan, timeout);
1278}
1279
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301280int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1281 bool use_rpa, struct adv_info *adv_instance,
1282 u8 *own_addr_type, bdaddr_t *rand_addr)
1283{
1284 int err;
1285
1286 bacpy(rand_addr, BDADDR_ANY);
1287
1288 /* If privacy is enabled use a resolvable private address. If
1289 * current RPA has expired then generate a new one.
1290 */
1291 if (use_rpa) {
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301292 /* If Controller supports LL Privacy use own address type is
1293 * 0x03
1294 */
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -07001295 if (use_ll_privacy(hdev))
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301296 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1297 else
1298 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301299
1300 if (adv_instance) {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001301 if (adv_rpa_valid(adv_instance))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301302 return 0;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301303 } else {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001304 if (rpa_valid(hdev))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301305 return 0;
1306 }
1307
1308 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1309 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001310 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301311 return err;
1312 }
1313
1314 bacpy(rand_addr, &hdev->rpa);
1315
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301316 return 0;
1317 }
1318
1319 /* In case of required privacy without resolvable private address,
1320 * use an non-resolvable private address. This is useful for
1321 * non-connectable advertising.
1322 */
1323 if (require_privacy) {
1324 bdaddr_t nrpa;
1325
1326 while (true) {
1327 /* The non-resolvable private address is generated
1328 * from random six bytes with the two most significant
1329 * bits cleared.
1330 */
1331 get_random_bytes(&nrpa, 6);
1332 nrpa.b[5] &= 0x3f;
1333
1334 /* The non-resolvable private address shall not be
1335 * equal to the public address.
1336 */
1337 if (bacmp(&hdev->bdaddr, &nrpa))
1338 break;
1339 }
1340
1341 *own_addr_type = ADDR_LE_DEV_RANDOM;
1342 bacpy(rand_addr, &nrpa);
1343
1344 return 0;
1345 }
1346
1347 /* No privacy so use a public address. */
1348 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1349
1350 return 0;
1351}
1352
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301353void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1354{
1355 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1356}
1357
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001358static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1359{
1360 struct hci_dev *hdev = req->hdev;
1361
1362 /* If we're advertising or initiating an LE connection we can't
1363 * go ahead and change the random address at this time. This is
1364 * because the eventual initiator address used for the
1365 * subsequently created connection will be undefined (some
1366 * controllers use the new address and others the one we had
1367 * when the operation started).
1368 *
1369 * In this kind of scenario skip the update and let the random
1370 * address be updated at the next cycle.
1371 */
1372 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1373 hci_lookup_le_connect(hdev)) {
1374 bt_dev_dbg(hdev, "Deferring random address update");
1375 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1376 return;
1377 }
1378
1379 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1380}
1381
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301382int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301383{
1384 struct hci_cp_le_set_ext_adv_params cp;
1385 struct hci_dev *hdev = req->hdev;
1386 bool connectable;
1387 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301388 bdaddr_t random_addr;
1389 u8 own_addr_type;
1390 int err;
1391 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301392 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301393
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301394 if (instance > 0) {
1395 adv_instance = hci_find_adv_instance(hdev, instance);
1396 if (!adv_instance)
1397 return -EINVAL;
1398 } else {
1399 adv_instance = NULL;
1400 }
1401
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001402 flags = hci_adv_instance_flags(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301403
1404 /* If the "connectable" instance flag was not set, then choose between
1405 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1406 */
1407 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1408 mgmt_get_connectable(hdev);
1409
Colin Ian King75edd1f2018-11-09 13:27:36 +00001410 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301411 return -EPERM;
1412
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301413 /* Set require_privacy to true only when non-connectable
1414 * advertising is used. In that case it is fine to use a
1415 * non-resolvable private address.
1416 */
1417 err = hci_get_random_address(hdev, !connectable,
1418 adv_use_rpa(hdev, flags), adv_instance,
1419 &own_addr_type, &random_addr);
1420 if (err < 0)
1421 return err;
1422
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301423 memset(&cp, 0, sizeof(cp));
1424
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001425 if (adv_instance) {
1426 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1427 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1428 cp.tx_power = adv_instance->tx_power;
1429 } else {
1430 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1431 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1432 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1433 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301434
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301435 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1436
1437 if (connectable) {
1438 if (secondary_adv)
1439 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1440 else
1441 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001442 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
Daniel Winklerff02db12021-03-03 11:15:23 -08001443 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301444 if (secondary_adv)
1445 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1446 else
1447 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1448 } else {
1449 if (secondary_adv)
1450 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1451 else
1452 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1453 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301454
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301455 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301456 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001457 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301458
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301459 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1460 cp.primary_phy = HCI_ADV_PHY_1M;
1461 cp.secondary_phy = HCI_ADV_PHY_2M;
1462 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1463 cp.primary_phy = HCI_ADV_PHY_CODED;
1464 cp.secondary_phy = HCI_ADV_PHY_CODED;
1465 } else {
1466 /* In all other cases use 1M */
1467 cp.primary_phy = HCI_ADV_PHY_1M;
1468 cp.secondary_phy = HCI_ADV_PHY_1M;
1469 }
1470
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301471 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1472
Luiz Augusto von Dentzcf75ad82021-10-27 16:58:44 -07001473 if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1474 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301475 bacmp(&random_addr, BDADDR_ANY)) {
1476 struct hci_cp_le_set_adv_set_rand_addr cp;
1477
1478 /* Check if random address need to be updated */
1479 if (adv_instance) {
1480 if (!bacmp(&random_addr, &adv_instance->random_addr))
1481 return 0;
1482 } else {
1483 if (!bacmp(&random_addr, &hdev->random_addr))
1484 return 0;
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001485 /* Instance 0x00 doesn't have an adv_info, instead it
1486 * uses hdev->random_addr to track its address so
1487 * whenever it needs to be updated this also set the
1488 * random address since hdev->random_addr is shared with
1489 * scan state machine.
1490 */
1491 set_random_addr(req, &random_addr);
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301492 }
1493
1494 memset(&cp, 0, sizeof(cp));
1495
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001496 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301497 bacpy(&cp.bdaddr, &random_addr);
1498
1499 hci_req_add(req,
1500 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1501 sizeof(cp), &cp);
1502 }
1503
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301504 return 0;
1505}
1506
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001507int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301508{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001509 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301510 struct hci_cp_le_set_ext_adv_enable *cp;
1511 struct hci_cp_ext_adv_set *adv_set;
1512 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001513 struct adv_info *adv_instance;
1514
1515 if (instance > 0) {
1516 adv_instance = hci_find_adv_instance(hdev, instance);
1517 if (!adv_instance)
1518 return -EINVAL;
1519 } else {
1520 adv_instance = NULL;
1521 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301522
1523 cp = (void *) data;
1524 adv_set = (void *) cp->data;
1525
1526 memset(cp, 0, sizeof(*cp));
1527
1528 cp->enable = 0x01;
1529 cp->num_of_sets = 0x01;
1530
1531 memset(adv_set, 0, sizeof(*adv_set));
1532
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001533 adv_set->handle = instance;
1534
1535 /* Set duration per instance since controller is responsible for
1536 * scheduling it.
1537 */
1538 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001539 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001540
1541 /* Time = N * 10 ms */
1542 adv_set->duration = cpu_to_le16(duration / 10);
1543 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301544
1545 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1546 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1547 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001548
1549 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301550}
1551
Daniel Winkler37adf702020-07-14 14:16:00 -07001552int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1553{
1554 struct hci_dev *hdev = req->hdev;
1555 struct hci_cp_le_set_ext_adv_enable *cp;
1556 struct hci_cp_ext_adv_set *adv_set;
1557 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1558 u8 req_size;
1559
1560 /* If request specifies an instance that doesn't exist, fail */
1561 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1562 return -EINVAL;
1563
1564 memset(data, 0, sizeof(data));
1565
1566 cp = (void *)data;
1567 adv_set = (void *)cp->data;
1568
1569 /* Instance 0x00 indicates all advertising instances will be disabled */
1570 cp->num_of_sets = !!instance;
1571 cp->enable = 0x00;
1572
1573 adv_set->handle = instance;
1574
1575 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1576 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1577
1578 return 0;
1579}
1580
1581int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1582{
1583 struct hci_dev *hdev = req->hdev;
1584
1585 /* If request specifies an instance that doesn't exist, fail */
1586 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1587 return -EINVAL;
1588
1589 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1590
1591 return 0;
1592}
1593
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301594int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1595{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301596 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07001597 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301598 int err;
1599
Daniel Winkler37adf702020-07-14 14:16:00 -07001600 /* If instance isn't pending, the chip knows about it, and it's safe to
1601 * disable
1602 */
1603 if (adv_instance && !adv_instance->pending)
1604 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301605
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301606 err = __hci_req_setup_ext_adv_instance(req, instance);
1607 if (err < 0)
1608 return err;
1609
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301610 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001611 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301612
1613 return 0;
1614}
1615
Johan Hedbergf2252572015-11-18 12:49:20 +02001616int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1617 bool force)
1618{
1619 struct hci_dev *hdev = req->hdev;
1620 struct adv_info *adv_instance = NULL;
1621 u16 timeout;
1622
1623 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001624 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001625 return -EPERM;
1626
1627 if (hdev->adv_instance_timeout)
1628 return -EBUSY;
1629
1630 adv_instance = hci_find_adv_instance(hdev, instance);
1631 if (!adv_instance)
1632 return -ENOENT;
1633
1634 /* A zero timeout means unlimited advertising. As long as there is
1635 * only one instance, duration should be ignored. We still set a timeout
1636 * in case further instances are being added later on.
1637 *
1638 * If the remaining lifetime of the instance is more than the duration
1639 * then the timeout corresponds to the duration, otherwise it will be
1640 * reduced to the remaining instance lifetime.
1641 */
1642 if (adv_instance->timeout == 0 ||
1643 adv_instance->duration <= adv_instance->remaining_time)
1644 timeout = adv_instance->duration;
1645 else
1646 timeout = adv_instance->remaining_time;
1647
1648 /* The remaining time is being reduced unless the instance is being
1649 * advertised without time limit.
1650 */
1651 if (adv_instance->timeout)
1652 adv_instance->remaining_time =
1653 adv_instance->remaining_time - timeout;
1654
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001655 /* Only use work for scheduling instances with legacy advertising */
1656 if (!ext_adv_capable(hdev)) {
1657 hdev->adv_instance_timeout = timeout;
1658 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02001659 &hdev->adv_instance_expire,
1660 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001661 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001662
1663 /* If we're just re-scheduling the same instance again then do not
1664 * execute any HCI commands. This happens when a single instance is
1665 * being advertised.
1666 */
1667 if (!force && hdev->cur_adv_instance == instance &&
1668 hci_dev_test_flag(hdev, HCI_LE_ADV))
1669 return 0;
1670
1671 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301672 if (ext_adv_capable(hdev)) {
1673 __hci_req_start_ext_adv(req, instance);
1674 } else {
1675 __hci_req_update_adv_data(req, instance);
1676 __hci_req_update_scan_rsp_data(req, instance);
1677 __hci_req_enable_advertising(req);
1678 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001679
1680 return 0;
1681}
1682
Johan Hedbergf2252572015-11-18 12:49:20 +02001683/* For a single instance:
1684 * - force == true: The instance will be removed even when its remaining
1685 * lifetime is not zero.
1686 * - force == false: the instance will be deactivated but kept stored unless
1687 * the remaining lifetime is zero.
1688 *
1689 * For instance == 0x00:
1690 * - force == true: All instances will be removed regardless of their timeout
1691 * setting.
1692 * - force == false: Only instances that have a timeout will be removed.
1693 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001694void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1695 struct hci_request *req, u8 instance,
1696 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001697{
1698 struct adv_info *adv_instance, *n, *next_instance = NULL;
1699 int err;
1700 u8 rem_inst;
1701
1702 /* Cancel any timeout concerning the removed instance(s). */
1703 if (!instance || hdev->cur_adv_instance == instance)
1704 cancel_adv_timeout(hdev);
1705
1706 /* Get the next instance to advertise BEFORE we remove
1707 * the current one. This can be the same instance again
1708 * if there is only one instance.
1709 */
1710 if (instance && hdev->cur_adv_instance == instance)
1711 next_instance = hci_get_next_instance(hdev, instance);
1712
1713 if (instance == 0x00) {
1714 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1715 list) {
1716 if (!(force || adv_instance->timeout))
1717 continue;
1718
1719 rem_inst = adv_instance->instance;
1720 err = hci_remove_adv_instance(hdev, rem_inst);
1721 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001722 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001723 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001724 } else {
1725 adv_instance = hci_find_adv_instance(hdev, instance);
1726
1727 if (force || (adv_instance && adv_instance->timeout &&
1728 !adv_instance->remaining_time)) {
1729 /* Don't advertise a removed instance. */
1730 if (next_instance &&
1731 next_instance->instance == instance)
1732 next_instance = NULL;
1733
1734 err = hci_remove_adv_instance(hdev, instance);
1735 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001736 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001737 }
1738 }
1739
Johan Hedbergf2252572015-11-18 12:49:20 +02001740 if (!req || !hdev_is_powered(hdev) ||
1741 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1742 return;
1743
Daniel Winkler37adf702020-07-14 14:16:00 -07001744 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02001745 __hci_req_schedule_adv_instance(req, next_instance->instance,
1746 false);
1747}
1748
Johan Hedberg0857dd32014-12-19 13:40:20 +02001749int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001750 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001751{
1752 struct hci_dev *hdev = req->hdev;
1753 int err;
1754
1755 /* If privacy is enabled use a resolvable private address. If
1756 * current RPA has expired or there is something else than
1757 * the current RPA in use, then generate a new one.
1758 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001759 if (use_rpa) {
Sathish Narasimmand03c7592020-07-23 18:09:00 +05301760 /* If Controller supports LL Privacy use own address type is
1761 * 0x03
1762 */
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -07001763 if (use_ll_privacy(hdev))
Sathish Narasimmand03c7592020-07-23 18:09:00 +05301764 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1765 else
1766 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02001767
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001768 if (rpa_valid(hdev))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001769 return 0;
1770
1771 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1772 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001773 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001774 return err;
1775 }
1776
1777 set_random_addr(req, &hdev->rpa);
1778
Johan Hedberg0857dd32014-12-19 13:40:20 +02001779 return 0;
1780 }
1781
1782 /* In case of required privacy without resolvable private address,
1783 * use an non-resolvable private address. This is useful for active
1784 * scanning and non-connectable advertising.
1785 */
1786 if (require_privacy) {
1787 bdaddr_t nrpa;
1788
1789 while (true) {
1790 /* The non-resolvable private address is generated
1791 * from random six bytes with the two most significant
1792 * bits cleared.
1793 */
1794 get_random_bytes(&nrpa, 6);
1795 nrpa.b[5] &= 0x3f;
1796
1797 /* The non-resolvable private address shall not be
1798 * equal to the public address.
1799 */
1800 if (bacmp(&hdev->bdaddr, &nrpa))
1801 break;
1802 }
1803
1804 *own_addr_type = ADDR_LE_DEV_RANDOM;
1805 set_random_addr(req, &nrpa);
1806 return 0;
1807 }
1808
1809 /* If forcing static address is in use or there is no public
1810 * address use the static address as random address (but skip
1811 * the HCI command if the current random address is already the
1812 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001813 *
1814 * In case BR/EDR has been disabled on a dual-mode controller
1815 * and a static address has been configured, then use that
1816 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001817 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001818 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001819 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001820 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001821 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001822 *own_addr_type = ADDR_LE_DEV_RANDOM;
1823 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1824 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1825 &hdev->static_addr);
1826 return 0;
1827 }
1828
1829 /* Neither privacy nor static address is being used so use a
1830 * public address.
1831 */
1832 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1833
1834 return 0;
1835}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001836
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001837static bool disconnected_accept_list_entries(struct hci_dev *hdev)
Johan Hedberg405a2612014-12-19 23:18:22 +02001838{
1839 struct bdaddr_list *b;
1840
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001841 list_for_each_entry(b, &hdev->accept_list, list) {
Johan Hedberg405a2612014-12-19 23:18:22 +02001842 struct hci_conn *conn;
1843
1844 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1845 if (!conn)
1846 return true;
1847
1848 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1849 return true;
1850 }
1851
1852 return false;
1853}
1854
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001855void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001856{
1857 struct hci_dev *hdev = req->hdev;
1858 u8 scan;
1859
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001860 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001861 return;
1862
1863 if (!hdev_is_powered(hdev))
1864 return;
1865
1866 if (mgmt_powering_down(hdev))
1867 return;
1868
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001869 if (hdev->scanning_paused)
1870 return;
1871
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001872 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001873 disconnected_accept_list_entries(hdev))
Johan Hedberg405a2612014-12-19 23:18:22 +02001874 scan = SCAN_PAGE;
1875 else
1876 scan = SCAN_DISABLED;
1877
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001878 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001879 scan |= SCAN_INQUIRY;
1880
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001881 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1882 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1883 return;
1884
Johan Hedberg405a2612014-12-19 23:18:22 +02001885 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1886}
1887
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001888static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001889{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001890 hci_dev_lock(req->hdev);
1891 __hci_req_update_scan(req);
1892 hci_dev_unlock(req->hdev);
1893 return 0;
1894}
Johan Hedberg405a2612014-12-19 23:18:22 +02001895
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001896static void scan_update_work(struct work_struct *work)
1897{
1898 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1899
1900 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001901}
1902
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001903static u8 get_service_classes(struct hci_dev *hdev)
1904{
1905 struct bt_uuid *uuid;
1906 u8 val = 0;
1907
1908 list_for_each_entry(uuid, &hdev->uuids, list)
1909 val |= uuid->svc_hint;
1910
1911 return val;
1912}
1913
1914void __hci_req_update_class(struct hci_request *req)
1915{
1916 struct hci_dev *hdev = req->hdev;
1917 u8 cod[3];
1918
Howard Chung22fbcfc2020-11-11 15:02:19 +08001919 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001920
1921 if (!hdev_is_powered(hdev))
1922 return;
1923
1924 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1925 return;
1926
1927 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1928 return;
1929
1930 cod[0] = hdev->minor_class;
1931 cod[1] = hdev->major_class;
1932 cod[2] = get_service_classes(hdev);
1933
1934 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1935 cod[1] |= 0x20;
1936
1937 if (memcmp(cod, hdev->dev_class, 3) == 0)
1938 return;
1939
1940 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1941}
1942
Johan Hedbergaed1a882015-11-22 17:24:44 +03001943static void write_iac(struct hci_request *req)
1944{
1945 struct hci_dev *hdev = req->hdev;
1946 struct hci_cp_write_current_iac_lap cp;
1947
1948 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1949 return;
1950
1951 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1952 /* Limited discoverable mode */
1953 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1954 cp.iac_lap[0] = 0x00; /* LIAC */
1955 cp.iac_lap[1] = 0x8b;
1956 cp.iac_lap[2] = 0x9e;
1957 cp.iac_lap[3] = 0x33; /* GIAC */
1958 cp.iac_lap[4] = 0x8b;
1959 cp.iac_lap[5] = 0x9e;
1960 } else {
1961 /* General discoverable mode */
1962 cp.num_iac = 1;
1963 cp.iac_lap[0] = 0x33; /* GIAC */
1964 cp.iac_lap[1] = 0x8b;
1965 cp.iac_lap[2] = 0x9e;
1966 }
1967
1968 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1969 (cp.num_iac * 3) + 1, &cp);
1970}
1971
1972static int discoverable_update(struct hci_request *req, unsigned long opt)
1973{
1974 struct hci_dev *hdev = req->hdev;
1975
1976 hci_dev_lock(hdev);
1977
1978 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1979 write_iac(req);
1980 __hci_req_update_scan(req);
1981 __hci_req_update_class(req);
1982 }
1983
1984 /* Advertising instances don't use the global discoverable setting, so
1985 * only update AD if advertising was enabled using Set Advertising.
1986 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001987 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001988 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001989
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001990 /* Discoverable mode affects the local advertising
1991 * address in limited privacy mode.
1992 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301993 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1994 if (ext_adv_capable(hdev))
1995 __hci_req_start_ext_adv(req, 0x00);
1996 else
1997 __hci_req_enable_advertising(req);
1998 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001999 }
2000
Johan Hedbergaed1a882015-11-22 17:24:44 +03002001 hci_dev_unlock(hdev);
2002
2003 return 0;
2004}
2005
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002006void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2007 u8 reason)
2008{
2009 switch (conn->state) {
2010 case BT_CONNECTED:
2011 case BT_CONFIG:
2012 if (conn->type == AMP_LINK) {
2013 struct hci_cp_disconn_phy_link cp;
2014
2015 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2016 cp.reason = reason;
2017 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2018 &cp);
2019 } else {
2020 struct hci_cp_disconnect dc;
2021
2022 dc.handle = cpu_to_le16(conn->handle);
2023 dc.reason = reason;
2024 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2025 }
2026
2027 conn->state = BT_DISCONN;
2028
2029 break;
2030 case BT_CONNECT:
2031 if (conn->type == LE_LINK) {
2032 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2033 break;
2034 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2035 0, NULL);
2036 } else if (conn->type == ACL_LINK) {
2037 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2038 break;
2039 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2040 6, &conn->dst);
2041 }
2042 break;
2043 case BT_CONNECT2:
2044 if (conn->type == ACL_LINK) {
2045 struct hci_cp_reject_conn_req rej;
2046
2047 bacpy(&rej.bdaddr, &conn->dst);
2048 rej.reason = reason;
2049
2050 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2051 sizeof(rej), &rej);
2052 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2053 struct hci_cp_reject_sync_conn_req rej;
2054
2055 bacpy(&rej.bdaddr, &conn->dst);
2056
2057 /* SCO rejection has its own limited set of
2058 * allowed error values (0x0D-0x0F) which isn't
2059 * compatible with most values passed to this
2060 * function. To be safe hard-code one of the
2061 * values that's suitable for SCO.
2062 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002063 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002064
2065 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2066 sizeof(rej), &rej);
2067 }
2068 break;
2069 default:
2070 conn->state = BT_CLOSED;
2071 break;
2072 }
2073}
2074
2075static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2076{
2077 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002078 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002079}
2080
2081int hci_abort_conn(struct hci_conn *conn, u8 reason)
2082{
2083 struct hci_request req;
2084 int err;
2085
2086 hci_req_init(&req, conn->hdev);
2087
2088 __hci_abort_conn(&req, conn, reason);
2089
2090 err = hci_req_run(&req, abort_conn_complete);
2091 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002092 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002093 return err;
2094 }
2095
2096 return 0;
2097}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002098
Johan Hedberga1d01db2015-11-11 08:11:25 +02002099static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002100{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302101 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002102 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002103}
2104
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002105static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2106{
2107 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002108 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2109 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002110 struct hci_cp_inquiry cp;
2111
Archie Pusaka06752d12021-04-01 11:11:33 +08002112 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2113 return 0;
2114
Howard Chung22fbcfc2020-11-11 15:02:19 +08002115 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002116
2117 hci_dev_lock(req->hdev);
2118 hci_inquiry_cache_flush(req->hdev);
2119 hci_dev_unlock(req->hdev);
2120
2121 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002122
2123 if (req->hdev->discovery.limited)
2124 memcpy(&cp.lap, liac, sizeof(cp.lap));
2125 else
2126 memcpy(&cp.lap, giac, sizeof(cp.lap));
2127
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002128 cp.length = length;
2129
2130 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2131
2132 return 0;
2133}
2134
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002135static void le_scan_disable_work(struct work_struct *work)
2136{
2137 struct hci_dev *hdev = container_of(work, struct hci_dev,
2138 le_scan_disable.work);
2139 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002140
Howard Chung22fbcfc2020-11-11 15:02:19 +08002141 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002142
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002143 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002144 return;
2145
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002146 cancel_delayed_work(&hdev->le_scan_restart);
2147
2148 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2149 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002150 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2151 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002152 return;
2153 }
2154
2155 hdev->discovery.scan_start = 0;
2156
2157 /* If we were running LE only scan, change discovery state. If
2158 * we were running both LE and BR/EDR inquiry simultaneously,
2159 * and BR/EDR inquiry is already finished, stop discovery,
2160 * otherwise BR/EDR inquiry will stop discovery when finished.
2161 * If we will resolve remote device name, do not change
2162 * discovery state.
2163 */
2164
2165 if (hdev->discovery.type == DISCOV_TYPE_LE)
2166 goto discov_stopped;
2167
2168 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2169 return;
2170
2171 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2172 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2173 hdev->discovery.state != DISCOVERY_RESOLVING)
2174 goto discov_stopped;
2175
2176 return;
2177 }
2178
2179 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2180 HCI_CMD_TIMEOUT, &status);
2181 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002182 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002183 goto discov_stopped;
2184 }
2185
2186 return;
2187
2188discov_stopped:
2189 hci_dev_lock(hdev);
2190 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2191 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002192}
2193
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002194static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002195{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002196 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002197
2198 /* If controller is not scanning we are done. */
2199 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2200 return 0;
2201
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002202 if (hdev->scanning_paused) {
2203 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2204 return 0;
2205 }
2206
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302207 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002208
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302209 if (use_ext_scan(hdev)) {
2210 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2211
2212 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2213 ext_enable_cp.enable = LE_SCAN_ENABLE;
2214 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2215
2216 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2217 sizeof(ext_enable_cp), &ext_enable_cp);
2218 } else {
2219 struct hci_cp_le_set_scan_enable cp;
2220
2221 memset(&cp, 0, sizeof(cp));
2222 cp.enable = LE_SCAN_ENABLE;
2223 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2224 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2225 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002226
2227 return 0;
2228}
2229
2230static void le_scan_restart_work(struct work_struct *work)
2231{
2232 struct hci_dev *hdev = container_of(work, struct hci_dev,
2233 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002234 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002235 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002236
Howard Chung22fbcfc2020-11-11 15:02:19 +08002237 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002238
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002239 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002240 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002241 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2242 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002243 return;
2244 }
2245
2246 hci_dev_lock(hdev);
2247
2248 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2249 !hdev->discovery.scan_start)
2250 goto unlock;
2251
2252 /* When the scan was started, hdev->le_scan_disable has been queued
2253 * after duration from scan_start. During scan restart this job
2254 * has been canceled, and we need to queue it again after proper
2255 * timeout, to make sure that scan does not run indefinitely.
2256 */
2257 duration = hdev->discovery.scan_duration;
2258 scan_start = hdev->discovery.scan_start;
2259 now = jiffies;
2260 if (now - scan_start <= duration) {
2261 int elapsed;
2262
2263 if (now >= scan_start)
2264 elapsed = now - scan_start;
2265 else
2266 elapsed = ULONG_MAX - scan_start + now;
2267
2268 timeout = duration - elapsed;
2269 } else {
2270 timeout = 0;
2271 }
2272
2273 queue_delayed_work(hdev->req_workqueue,
2274 &hdev->le_scan_disable, timeout);
2275
2276unlock:
2277 hci_dev_unlock(hdev);
2278}
2279
Johan Hedberge68f0722015-11-11 08:30:30 +02002280static int active_scan(struct hci_request *req, unsigned long opt)
2281{
2282 uint16_t interval = opt;
2283 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002284 u8 own_addr_type;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002285 /* Accept list is not used for discovery */
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002286 u8 filter_policy = 0x00;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002287 /* Default is to enable duplicates filter */
2288 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302289 /* Discovery doesn't require controller address resolution */
2290 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002291 int err;
2292
Howard Chung22fbcfc2020-11-11 15:02:19 +08002293 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002294
Johan Hedberge68f0722015-11-11 08:30:30 +02002295 /* If controller is scanning, it means the background scanning is
2296 * running. Thus, we should temporarily stop it in order to set the
2297 * discovery scanning parameters.
2298 */
Howard Chung422bb172020-11-26 12:22:23 +08002299 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302300 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08002301 cancel_interleave_scan(hdev);
2302 }
Johan Hedberge68f0722015-11-11 08:30:30 +02002303
2304 /* All active scans will be done with either a resolvable private
2305 * address (when privacy feature has been enabled) or non-resolvable
2306 * private address.
2307 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002308 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2309 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002310 if (err < 0)
2311 own_addr_type = ADDR_LE_DEV_PUBLIC;
2312
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002313 if (hci_is_adv_monitoring(hdev)) {
2314 /* Duplicate filter should be disabled when some advertisement
2315 * monitor is activated, otherwise AdvMon can only receive one
2316 * advertisement for one peer(*) during active scanning, and
2317 * might report loss to these peers.
2318 *
2319 * Note that different controllers have different meanings of
2320 * |duplicate|. Some of them consider packets with the same
2321 * address as duplicate, and others consider packets with the
2322 * same address and the same RSSI as duplicate. Although in the
2323 * latter case we don't need to disable duplicate filter, but
2324 * it is common to have active scanning for a short period of
2325 * time, the power impact should be neglectable.
2326 */
2327 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2328 }
2329
Alain Michaudd4edda02020-06-29 17:04:15 +00002330 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2331 hdev->le_scan_window_discovery, own_addr_type,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002332 filter_policy, filter_dup, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002333 return 0;
2334}
2335
2336static int interleaved_discov(struct hci_request *req, unsigned long opt)
2337{
2338 int err;
2339
Howard Chung22fbcfc2020-11-11 15:02:19 +08002340 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002341
2342 err = active_scan(req, opt);
2343 if (err)
2344 return err;
2345
Johan Hedberg7df26b52015-11-11 12:24:21 +02002346 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002347}
2348
2349static void start_discovery(struct hci_dev *hdev, u8 *status)
2350{
2351 unsigned long timeout;
2352
Howard Chung22fbcfc2020-11-11 15:02:19 +08002353 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002354
2355 switch (hdev->discovery.type) {
2356 case DISCOV_TYPE_BREDR:
2357 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002358 hci_req_sync(hdev, bredr_inquiry,
2359 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002360 status);
2361 return;
2362 case DISCOV_TYPE_INTERLEAVED:
2363 /* When running simultaneous discovery, the LE scanning time
2364 * should occupy the whole discovery time sine BR/EDR inquiry
2365 * and LE scanning are scheduled by the controller.
2366 *
2367 * For interleaving discovery in comparison, BR/EDR inquiry
2368 * and LE scanning are done sequentially with separate
2369 * timeouts.
2370 */
2371 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2372 &hdev->quirks)) {
2373 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2374 /* During simultaneous discovery, we double LE scan
2375 * interval. We must leave some time for the controller
2376 * to do BR/EDR inquiry.
2377 */
2378 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00002379 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002380 status);
2381 break;
2382 }
2383
2384 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00002385 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002386 HCI_CMD_TIMEOUT, status);
2387 break;
2388 case DISCOV_TYPE_LE:
2389 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00002390 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002391 HCI_CMD_TIMEOUT, status);
2392 break;
2393 default:
2394 *status = HCI_ERROR_UNSPECIFIED;
2395 return;
2396 }
2397
2398 if (*status)
2399 return;
2400
Howard Chung22fbcfc2020-11-11 15:02:19 +08002401 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02002402
2403 /* When service discovery is used and the controller has a
2404 * strict duplicate filter, it is important to remember the
2405 * start and duration of the scan. This is required for
2406 * restarting scanning during the discovery phase.
2407 */
2408 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2409 hdev->discovery.result_filtering) {
2410 hdev->discovery.scan_start = jiffies;
2411 hdev->discovery.scan_duration = timeout;
2412 }
2413
2414 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2415 timeout);
2416}
2417
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002418bool hci_req_stop_discovery(struct hci_request *req)
2419{
2420 struct hci_dev *hdev = req->hdev;
2421 struct discovery_state *d = &hdev->discovery;
2422 struct hci_cp_remote_name_req_cancel cp;
2423 struct inquiry_entry *e;
2424 bool ret = false;
2425
Howard Chung22fbcfc2020-11-11 15:02:19 +08002426 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002427
2428 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2429 if (test_bit(HCI_INQUIRY, &hdev->flags))
2430 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2431
2432 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2433 cancel_delayed_work(&hdev->le_scan_disable);
Sonny Sasakac06632a2021-03-15 10:30:59 -07002434 cancel_delayed_work(&hdev->le_scan_restart);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302435 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002436 }
2437
2438 ret = true;
2439 } else {
2440 /* Passive scanning */
2441 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302442 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002443 ret = true;
2444 }
2445 }
2446
2447 /* No further actions needed for LE-only discovery */
2448 if (d->type == DISCOV_TYPE_LE)
2449 return ret;
2450
2451 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2452 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2453 NAME_PENDING);
2454 if (!e)
2455 return ret;
2456
2457 bacpy(&cp.bdaddr, &e->data.bdaddr);
2458 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2459 &cp);
2460 ret = true;
2461 }
2462
2463 return ret;
2464}
2465
Kiran K9798fbd2021-09-07 15:42:44 +05302466static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2467 u16 opcode)
2468{
2469 bt_dev_dbg(hdev, "status %u", status);
2470}
2471
2472int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2473{
2474 struct hci_request req;
2475 int err;
2476 __u8 vnd_len, *vnd_data = NULL;
2477 struct hci_op_configure_data_path *cmd = NULL;
2478
2479 hci_req_init(&req, hdev);
2480
2481 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2482 &vnd_data);
2483 if (err < 0)
2484 goto error;
2485
2486 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2487 if (!cmd) {
2488 err = -ENOMEM;
2489 goto error;
2490 }
2491
2492 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2493 if (err < 0)
2494 goto error;
2495
2496 cmd->vnd_len = vnd_len;
2497 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2498
2499 cmd->direction = 0x00;
2500 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2501
2502 cmd->direction = 0x01;
2503 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2504
2505 err = hci_req_run(&req, config_data_path_complete);
2506error:
2507
2508 kfree(cmd);
2509 kfree(vnd_data);
2510 return err;
2511}
2512
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002513static int stop_discovery(struct hci_request *req, unsigned long opt)
2514{
2515 hci_dev_lock(req->hdev);
2516 hci_req_stop_discovery(req);
2517 hci_dev_unlock(req->hdev);
2518
2519 return 0;
2520}
2521
Johan Hedberge68f0722015-11-11 08:30:30 +02002522static void discov_update(struct work_struct *work)
2523{
2524 struct hci_dev *hdev = container_of(work, struct hci_dev,
2525 discov_update);
2526 u8 status = 0;
2527
2528 switch (hdev->discovery.state) {
2529 case DISCOVERY_STARTING:
2530 start_discovery(hdev, &status);
2531 mgmt_start_discovery_complete(hdev, status);
2532 if (status)
2533 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2534 else
2535 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2536 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002537 case DISCOVERY_STOPPING:
2538 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2539 mgmt_stop_discovery_complete(hdev, status);
2540 if (!status)
2541 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2542 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002543 case DISCOVERY_STOPPED:
2544 default:
2545 return;
2546 }
2547}
2548
Johan Hedbergc366f552015-11-23 15:43:06 +02002549static void discov_off(struct work_struct *work)
2550{
2551 struct hci_dev *hdev = container_of(work, struct hci_dev,
2552 discov_off.work);
2553
Howard Chung22fbcfc2020-11-11 15:02:19 +08002554 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02002555
2556 hci_dev_lock(hdev);
2557
2558 /* When discoverable timeout triggers, then just make sure
2559 * the limited discoverable flag is cleared. Even in the case
2560 * of a timeout triggered from general discoverable, it is
2561 * safe to unconditionally clear the flag.
2562 */
2563 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2564 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2565 hdev->discov_timeout = 0;
2566
2567 hci_dev_unlock(hdev);
2568
2569 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2570 mgmt_new_settings(hdev);
2571}
2572
Johan Hedberg2ff13892015-11-25 16:15:44 +02002573static int powered_update_hci(struct hci_request *req, unsigned long opt)
2574{
2575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002576 u8 link_sec;
2577
2578 hci_dev_lock(hdev);
2579
2580 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2581 !lmp_host_ssp_capable(hdev)) {
2582 u8 mode = 0x01;
2583
2584 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2585
2586 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2587 u8 support = 0x01;
2588
2589 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2590 sizeof(support), &support);
2591 }
2592 }
2593
2594 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2595 lmp_bredr_capable(hdev)) {
2596 struct hci_cp_write_le_host_supported cp;
2597
2598 cp.le = 0x01;
2599 cp.simul = 0x00;
2600
2601 /* Check first if we already have the right
2602 * host state (host features set)
2603 */
2604 if (cp.le != lmp_host_le_capable(hdev) ||
2605 cp.simul != lmp_host_le_br_capable(hdev))
2606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2607 sizeof(cp), &cp);
2608 }
2609
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002610 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002611 /* Make sure the controller has a good default for
2612 * advertising data. This also applies to the case
2613 * where BR/EDR was toggled during the AUTO_OFF phase.
2614 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002615 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2616 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302617 int err;
2618
2619 if (ext_adv_capable(hdev)) {
2620 err = __hci_req_setup_ext_adv_instance(req,
2621 0x00);
2622 if (!err)
2623 __hci_req_update_scan_rsp_data(req,
2624 0x00);
2625 } else {
2626 err = 0;
2627 __hci_req_update_adv_data(req, 0x00);
2628 __hci_req_update_scan_rsp_data(req, 0x00);
2629 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002630
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302631 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302632 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302633 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302634 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002635 __hci_req_enable_ext_advertising(req,
2636 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302637 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002638 } else if (!list_empty(&hdev->adv_instances)) {
2639 struct adv_info *adv_instance;
2640
Johan Hedberg2ff13892015-11-25 16:15:44 +02002641 adv_instance = list_first_entry(&hdev->adv_instances,
2642 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002643 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002644 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002645 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002646 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002647 }
2648
2649 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2650 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2652 sizeof(link_sec), &link_sec);
2653
2654 if (lmp_bredr_capable(hdev)) {
2655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2656 __hci_req_write_fast_connectable(req, true);
2657 else
2658 __hci_req_write_fast_connectable(req, false);
2659 __hci_req_update_scan(req);
2660 __hci_req_update_class(req);
2661 __hci_req_update_name(req);
2662 __hci_req_update_eir(req);
2663 }
2664
2665 hci_dev_unlock(hdev);
2666 return 0;
2667}
2668
2669int __hci_req_hci_power_on(struct hci_dev *hdev)
2670{
2671 /* Register the available SMP channels (BR/EDR and LE) only when
2672 * successfully powering on the controller. This late
2673 * registration is required so that LE SMP can clearly decide if
2674 * the public address or static address is used.
2675 */
2676 smp_register(hdev);
2677
2678 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2679 NULL);
2680}
2681
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002682void hci_request_setup(struct hci_dev *hdev)
2683{
Johan Hedberge68f0722015-11-11 08:30:30 +02002684 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002685 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002686 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002687 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2688 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002689 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08002690 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002691}
2692
2693void hci_request_cancel_all(struct hci_dev *hdev)
2694{
Benjamin Berg744451c2021-12-17 16:28:09 +01002695 __hci_cmd_sync_cancel(hdev, ENODEV);
Johan Hedberg7df0f732015-11-12 15:15:00 +02002696
Johan Hedberge68f0722015-11-11 08:30:30 +02002697 cancel_work_sync(&hdev->discov_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002698 cancel_work_sync(&hdev->scan_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002699 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002700 cancel_delayed_work_sync(&hdev->le_scan_disable);
2701 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002702
2703 if (hdev->adv_instance_timeout) {
2704 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2705 hdev->adv_instance_timeout = 0;
2706 }
Howard Chungc4f1f402020-11-26 12:22:21 +08002707
2708 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002709}