blob: 42c8047a9897dd6fa98e4e0e28a8e09b44c507f4 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
Howard Chungbf6a4e32021-01-22 16:36:17 +080032#include "msft.h"
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -070033#include "eir.h"
Johan Hedberg0857dd32014-12-19 13:40:20 +020034
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053042void hci_req_purge(struct hci_request *req)
43{
44 skb_queue_purge(&req->cmd_q);
45}
46
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080047bool hci_req_status_pend(struct hci_dev *hdev)
48{
49 return hdev->req_status == HCI_REQ_PEND;
50}
51
Johan Hedberge62144872015-04-02 13:41:08 +030052static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020054{
55 struct hci_dev *hdev = req->hdev;
56 struct sk_buff *skb;
57 unsigned long flags;
58
Howard Chung22fbcfc2020-11-11 15:02:19 +080059 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
Johan Hedberg0857dd32014-12-19 13:40:20 +020060
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
63 */
64 if (req->err) {
65 skb_queue_purge(&req->cmd_q);
66 return req->err;
67 }
68
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
71 return -ENODATA;
72
73 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020074 if (complete) {
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020080
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86
87 return 0;
88}
89
Johan Hedberge62144872015-04-02 13:41:08 +030090int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91{
92 return req_run(req, complete, NULL);
93}
94
95int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96{
97 return req_run(req, NULL, complete);
98}
99
Luiz Augusto von Dentz161510c2021-10-27 16:58:39 -0700100void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 struct sk_buff *skb)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102{
Howard Chung22fbcfc2020-11-11 15:02:19 +0800103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
108 if (skb)
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200114/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200115int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200117 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200118{
119 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200120 int err = 0;
121
Howard Chung22fbcfc2020-11-11 15:02:19 +0800122 bt_dev_dbg(hdev, "start");
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200123
124 hci_req_init(&req, hdev);
125
126 hdev->req_status = HCI_REQ_PEND;
127
Johan Hedberga1d01db2015-11-11 08:11:25 +0200128 err = func(&req, opt);
129 if (err) {
130 if (hci_status)
131 *hci_status = HCI_ERROR_UNSPECIFIED;
132 return err;
133 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200134
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200135 err = hci_req_run_skb(&req, hci_req_sync_complete);
136 if (err < 0) {
137 hdev->req_status = 0;
138
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200139 /* ENODATA means the HCI request command queue is empty.
140 * This can happen when a request with conditionals doesn't
141 * trigger any commands to be sent. This is normal behavior
142 * and should not trigger an error return.
143 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200144 if (err == -ENODATA) {
145 if (hci_status)
146 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200147 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200148 }
149
150 if (hci_status)
151 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200152
153 return err;
154 }
155
John Keeping67d8cee2018-04-19 16:29:37 +0100156 err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200158
John Keeping67d8cee2018-04-19 16:29:37 +0100159 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200160 return -EINTR;
161
162 switch (hdev->req_status) {
163 case HCI_REQ_DONE:
164 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200165 if (hci_status)
166 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200167 break;
168
169 case HCI_REQ_CANCELED:
170 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200171 if (hci_status)
172 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200173 break;
174
175 default:
176 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200177 if (hci_status)
178 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200179 break;
180 }
181
Frederic Dalleau9afee942016-08-23 07:59:19 +0200182 kfree_skb(hdev->req_skb);
183 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200184 hdev->req_status = hdev->req_result = 0;
185
Howard Chung22fbcfc2020-11-11 15:02:19 +0800186 bt_dev_dbg(hdev, "end: err %d", err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200187
188 return err;
189}
190
Johan Hedberga1d01db2015-11-11 08:11:25 +0200191int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200193 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200194{
195 int ret;
196
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200198 hci_req_sync_lock(hdev);
Lin Mae2cb6b82021-04-12 19:17:57 +0800199 /* check the state after obtaing the lock to protect the HCI_UP
200 * against any races from hci_dev_do_close when the controller
201 * gets removed.
202 */
203 if (test_bit(HCI_UP, &hdev->flags))
204 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205 else
206 ret = -ENETDOWN;
Johan Hedbergb5044302015-11-10 09:44:55 +0200207 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200208
209 return ret;
210}
211
Johan Hedberg0857dd32014-12-19 13:40:20 +0200212struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213 const void *param)
214{
215 int len = HCI_COMMAND_HDR_SIZE + plen;
216 struct hci_command_hdr *hdr;
217 struct sk_buff *skb;
218
219 skb = bt_skb_alloc(len, GFP_ATOMIC);
220 if (!skb)
221 return NULL;
222
Johannes Berg4df864c2017-06-16 14:29:21 +0200223 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200224 hdr->opcode = cpu_to_le16(opcode);
225 hdr->plen = plen;
226
227 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200228 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200229
Howard Chung22fbcfc2020-11-11 15:02:19 +0800230 bt_dev_dbg(hdev, "skb len %d", skb->len);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200231
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100232 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200234
235 return skb;
236}
237
238/* Queue a command to an asynchronous HCI request */
239void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 const void *param, u8 event)
241{
242 struct hci_dev *hdev = req->hdev;
243 struct sk_buff *skb;
244
Howard Chung22fbcfc2020-11-11 15:02:19 +0800245 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200246
247 /* If an error occurred during request building, there is no point in
248 * queueing the HCI command. We can simply return.
249 */
250 if (req->err)
251 return;
252
253 skb = hci_prepare_cmd(hdev, opcode, plen, param);
254 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100255 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200257 req->err = -ENOMEM;
258 return;
259 }
260
261 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200262 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200263
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100264 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200265
266 skb_queue_tail(&req->cmd_q, skb);
267}
268
269void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270 const void *param)
271{
272 hci_req_add_ev(req, opcode, plen, param, 0);
273}
274
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200275void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
276{
277 struct hci_dev *hdev = req->hdev;
278 struct hci_cp_write_page_scan_activity acp;
279 u8 type;
280
281 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
282 return;
283
284 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
285 return;
286
287 if (enable) {
288 type = PAGE_SCAN_TYPE_INTERLACED;
289
290 /* 160 msec page scan interval */
291 acp.interval = cpu_to_le16(0x0100);
292 } else {
Alain Michaud10873f92020-06-11 02:01:56 +0000293 type = hdev->def_page_scan_type;
294 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200295 }
296
Alain Michaud10873f92020-06-11 02:01:56 +0000297 acp.window = cpu_to_le16(hdev->def_page_scan_window);
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200298
299 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300 __cpu_to_le16(hdev->page_scan_window) != acp.window)
301 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
302 sizeof(acp), &acp);
303
304 if (hdev->page_scan_type != type)
305 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
306}
307
Howard Chungc4f1f402020-11-26 12:22:21 +0800308static void start_interleave_scan(struct hci_dev *hdev)
309{
310 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311 queue_delayed_work(hdev->req_workqueue,
312 &hdev->interleave_scan, 0);
313}
314
315static bool is_interleave_scanning(struct hci_dev *hdev)
316{
317 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
318}
319
320static void cancel_interleave_scan(struct hci_dev *hdev)
321{
322 bt_dev_dbg(hdev, "cancelling interleave scan");
323
324 cancel_delayed_work_sync(&hdev->interleave_scan);
325
326 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
327}
328
329/* Return true if interleave_scan wasn't started until exiting this function,
330 * otherwise, return false
331 */
332static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
333{
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800334 /* Do interleaved scan only if all of the following are true:
335 * - There is at least one ADV monitor
336 * - At least one pending LE connection or one device to be scanned for
337 * - Monitor offloading is not supported
338 * If so, we should alternate between allowlist scan and one without
339 * any filters to save power.
Howard Chungc4f1f402020-11-26 12:22:21 +0800340 */
341 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342 !(list_empty(&hdev->pend_le_conns) &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800343 list_empty(&hdev->pend_le_reports)) &&
344 hci_get_adv_monitor_offload_ext(hdev) ==
345 HCI_ADV_MONITOR_EXT_NONE;
Howard Chungc4f1f402020-11-26 12:22:21 +0800346 bool is_interleaving = is_interleave_scanning(hdev);
347
348 if (use_interleaving && !is_interleaving) {
349 start_interleave_scan(hdev);
350 bt_dev_dbg(hdev, "starting interleave scan");
351 return true;
352 }
353
354 if (!use_interleaving && is_interleaving)
355 cancel_interleave_scan(hdev);
356
357 return false;
358}
359
Johan Hedberg00cf5042015-11-25 16:15:41 +0200360void __hci_req_update_name(struct hci_request *req)
361{
362 struct hci_dev *hdev = req->hdev;
363 struct hci_cp_write_local_name cp;
364
365 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
366
367 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
368}
369
Johan Hedbergb1a89172015-11-25 16:15:42 +0200370void __hci_req_update_eir(struct hci_request *req)
371{
372 struct hci_dev *hdev = req->hdev;
373 struct hci_cp_write_eir cp;
374
375 if (!hdev_is_powered(hdev))
376 return;
377
378 if (!lmp_ext_inq_capable(hdev))
379 return;
380
381 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
382 return;
383
384 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
385 return;
386
387 memset(&cp, 0, sizeof(cp));
388
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700389 eir_create(hdev, cp.data);
Johan Hedbergb1a89172015-11-25 16:15:42 +0200390
391 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
392 return;
393
394 memcpy(hdev->eir, cp.data, sizeof(cp.data));
395
396 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
397}
398
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530399void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200400{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530401 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200402
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700403 if (hdev->scanning_paused) {
404 bt_dev_dbg(hdev, "Scanning is paused for suspend");
405 return;
406 }
407
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530408 if (use_ext_scan(hdev)) {
409 struct hci_cp_le_set_ext_scan_enable cp;
410
411 memset(&cp, 0, sizeof(cp));
412 cp.enable = LE_SCAN_DISABLE;
413 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
414 &cp);
415 } else {
416 struct hci_cp_le_set_scan_enable cp;
417
418 memset(&cp, 0, sizeof(cp));
419 cp.enable = LE_SCAN_DISABLE;
420 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
421 }
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530422
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +0530423 /* Disable address resolution */
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700424 if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530425 __u8 enable = 0x00;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530426
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530427 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
428 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200429}
430
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800431static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
432 u8 bdaddr_type)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700433{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800434 struct hci_cp_le_del_from_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700435
436 cp.bdaddr_type = bdaddr_type;
437 bacpy(&cp.bdaddr, bdaddr);
438
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800439 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700440 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800441 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530442
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700443 if (use_ll_privacy(req->hdev)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530444 struct smp_irk *irk;
445
446 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
447 if (irk) {
448 struct hci_cp_le_del_from_resolv_list cp;
449
450 cp.bdaddr_type = bdaddr_type;
451 bacpy(&cp.bdaddr, bdaddr);
452
453 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
454 sizeof(cp), &cp);
455 }
456 }
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700457}
458
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800459/* Adds connection to accept list if needed. On error, returns -1. */
460static int add_to_accept_list(struct hci_request *req,
461 struct hci_conn_params *params, u8 *num_entries,
462 bool allow_rpa)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200463{
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800464 struct hci_cp_le_add_to_accept_list cp;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700465 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200466
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800467 /* Already in accept list */
468 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700469 params->addr_type))
470 return 0;
471
472 /* Select filter policy to accept all advertising */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800473 if (*num_entries >= hdev->le_accept_list_size)
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700474 return -1;
475
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800476 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530477 if (!allow_rpa &&
478 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700479 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
480 return -1;
481 }
482
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800483 /* During suspend, only wakeable devices can be in accept list */
Luiz Augusto von Dentzfe92ee62021-12-01 11:49:50 -0800484 if (hdev->suspended &&
485 !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700486 return 0;
487
488 *num_entries += 1;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200489 cp.bdaddr_type = params->addr_type;
490 bacpy(&cp.bdaddr, &params->addr);
491
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800492 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700493 cp.bdaddr_type);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800494 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700495
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700496 if (use_ll_privacy(hdev)) {
Marcel Holtmann0eee35b2020-07-23 18:08:58 +0530497 struct smp_irk *irk;
498
499 irk = hci_find_irk_by_addr(hdev, &params->addr,
500 params->addr_type);
501 if (irk) {
502 struct hci_cp_le_add_to_resolv_list cp;
503
504 cp.bdaddr_type = params->addr_type;
505 bacpy(&cp.bdaddr, &params->addr);
506 memcpy(cp.peer_irk, irk->val, 16);
507
508 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509 memcpy(cp.local_irk, hdev->irk, 16);
510 else
511 memset(cp.local_irk, 0, 16);
512
513 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
514 sizeof(cp), &cp);
515 }
516 }
517
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700518 return 0;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200519}
520
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800521static u8 update_accept_list(struct hci_request *req)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200522{
523 struct hci_dev *hdev = req->hdev;
524 struct hci_conn_params *params;
525 struct bdaddr_list *b;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700526 u8 num_entries = 0;
527 bool pend_conn, pend_report;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800528 /* We allow usage of accept list even with RPAs in suspend. In the worst
529 * case, we won't be able to wake from devices that use the privacy1.2
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700530 * features. Additionally, once we support privacy1.2 and IRK
531 * offloading, we can update this to also check for those conditions.
532 */
533 bool allow_rpa = hdev->suspended;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200534
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700535 if (use_ll_privacy(hdev))
Sathish Narasimman8ce85ad2021-04-05 20:00:41 +0530536 allow_rpa = true;
537
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800538 /* Go through the current accept list programmed into the
Johan Hedberg0857dd32014-12-19 13:40:20 +0200539 * controller one by one and check if that address is still
540 * in the list of pending connections or list of devices to
541 * report. If not present in either list, then queue the
542 * command to remove it from the controller.
543 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800544 list_for_each_entry(b, &hdev->le_accept_list, list) {
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700545 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
546 &b->bdaddr,
547 b->bdaddr_type);
548 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
549 &b->bdaddr,
550 b->bdaddr_type);
551
552 /* If the device is not likely to connect or report,
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800553 * remove it from the accept list.
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500554 */
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700555 if (!pend_conn && !pend_report) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800556 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200557 continue;
558 }
559
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800560 /* Accept list can not be used with RPAs */
Sathish Narasimman1fb17df2020-10-29 13:18:21 +0530561 if (!allow_rpa &&
562 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700563 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500564 return 0x00;
565 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200566
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700567 num_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200568 }
569
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800570 /* Since all no longer valid accept list entries have been
Johan Hedberg0857dd32014-12-19 13:40:20 +0200571 * removed, walk through the list of pending connections
572 * and ensure that any new device gets programmed into
573 * the controller.
574 *
575 * If the list of the devices is larger than the list of
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800576 * available accept list entries in the controller, then
Johan Hedberg0857dd32014-12-19 13:40:20 +0200577 * just abort and return filer policy value to not use the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800578 * accept list.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200579 */
580 list_for_each_entry(params, &hdev->pend_le_conns, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800581 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200582 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200583 }
584
585 /* After adding all new pending connections, walk through
586 * the list of pending reports and also add these to the
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800587 * accept list if there is still space. Abort if space runs out.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200588 */
589 list_for_each_entry(params, &hdev->pend_le_reports, action) {
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800590 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200591 return 0x00;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200592 }
593
Howard Chungc4f1f402020-11-26 12:22:21 +0800594 /* Use the allowlist unless the following conditions are all true:
595 * - We are not currently suspending
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800596 * - There are 1 or more ADV monitors registered and it's not offloaded
Howard Chungc4f1f402020-11-26 12:22:21 +0800597 * - Interleaved scanning is not currently using the allowlist
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200598 */
Howard Chungc4f1f402020-11-26 12:22:21 +0800599 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
Archie Pusaka58ceb1e2021-01-22 16:36:16 +0800600 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
Howard Chungc4f1f402020-11-26 12:22:21 +0800601 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
Miao-chen Chou8208f5a2020-06-17 16:39:18 +0200602 return 0x00;
603
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800604 /* Select filter policy to use accept list */
Johan Hedberg0857dd32014-12-19 13:40:20 +0200605 return 0x01;
606}
607
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200608static bool scan_use_rpa(struct hci_dev *hdev)
609{
610 return hci_dev_test_flag(hdev, HCI_PRIVACY);
611}
612
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530613static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530614 u16 window, u8 own_addr_type, u8 filter_policy,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800615 bool filter_dup, bool addr_resolv)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200616{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530617 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530618
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -0700619 if (hdev->scanning_paused) {
620 bt_dev_dbg(hdev, "Scanning is paused for suspend");
621 return;
622 }
623
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -0700624 if (use_ll_privacy(hdev) && addr_resolv) {
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530625 u8 enable = 0x01;
Sathish Narasimmancbbdfa62020-07-23 18:09:03 +0530626
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530627 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
628 }
629
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530630 /* Use ext scanning if set ext scan param and ext scan enable is
631 * supported
632 */
633 if (use_ext_scan(hdev)) {
634 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530637 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
638 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530639
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530640 ext_param_cp = (void *)data;
641 phy_params = (void *)ext_param_cp->data;
642
643 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644 ext_param_cp->own_addr_type = own_addr_type;
645 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530646
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530647 plen = sizeof(*ext_param_cp);
648
649 if (scan_1m(hdev) || scan_2m(hdev)) {
650 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
651
652 memset(phy_params, 0, sizeof(*phy_params));
653 phy_params->type = type;
654 phy_params->interval = cpu_to_le16(interval);
655 phy_params->window = cpu_to_le16(window);
656
657 plen += sizeof(*phy_params);
658 phy_params++;
659 }
660
661 if (scan_coded(hdev)) {
662 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
663
664 memset(phy_params, 0, sizeof(*phy_params));
665 phy_params->type = type;
666 phy_params->interval = cpu_to_le16(interval);
667 phy_params->window = cpu_to_le16(window);
668
669 plen += sizeof(*phy_params);
670 phy_params++;
671 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530672
673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530674 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530675
676 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677 ext_enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800678 ext_enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530679
680 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681 sizeof(ext_enable_cp), &ext_enable_cp);
682 } else {
683 struct hci_cp_le_set_scan_param param_cp;
684 struct hci_cp_le_set_scan_enable enable_cp;
685
686 memset(&param_cp, 0, sizeof(param_cp));
687 param_cp.type = type;
688 param_cp.interval = cpu_to_le16(interval);
689 param_cp.window = cpu_to_le16(window);
690 param_cp.own_address_type = own_addr_type;
691 param_cp.filter_policy = filter_policy;
692 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
693 &param_cp);
694
695 memset(&enable_cp, 0, sizeof(enable_cp));
696 enable_cp.enable = LE_SCAN_ENABLE;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800697 enable_cp.filter_dup = filter_dup;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530698 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
699 &enable_cp);
700 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530701}
702
Alain Michaud9a9373f2020-07-31 01:05:34 +0000703/* Returns true if an le connection is in the scanning state */
704static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
705{
706 struct hci_conn_hash *h = &hdev->conn_hash;
707 struct hci_conn *c;
708
709 rcu_read_lock();
710
711 list_for_each_entry_rcu(c, &h->list, list) {
712 if (c->type == LE_LINK && c->state == BT_CONNECT &&
713 test_bit(HCI_CONN_SCANNING, &c->flags)) {
714 rcu_read_unlock();
715 return true;
716 }
717 }
718
719 rcu_read_unlock();
720
721 return false;
722}
723
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530724/* Ensure to call hci_req_add_le_scan_disable() first to disable the
725 * controller based address resolution to be able to reconfigure
726 * resolving list.
727 */
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530728void hci_req_add_le_passive_scan(struct hci_request *req)
729{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200730 struct hci_dev *hdev = req->hdev;
731 u8 own_addr_type;
732 u8 filter_policy;
Abhishek Pandit-Subediaaebf8e2020-05-12 19:09:32 -0700733 u16 window, interval;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800734 /* Default is to enable duplicates filter */
735 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +0530736 /* Background scanning should run with address resolution */
737 bool addr_resolv = true;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700738
739 if (hdev->scanning_paused) {
740 bt_dev_dbg(hdev, "Scanning is paused for suspend");
741 return;
742 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200743
744 /* Set require_privacy to false since no SCAN_REQ are send
745 * during passive scanning. Not using an non-resolvable address
746 * here is important so that peer devices using direct
747 * advertising with our address will be correctly reported
748 * by the controller.
749 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200750 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
751 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200752 return;
753
Howard Chung80af16a2020-11-26 12:22:25 +0800754 if (hdev->enable_advmon_interleave_scan &&
755 __hci_update_interleaved_scan(hdev))
Howard Chungc4f1f402020-11-26 12:22:21 +0800756 return;
757
758 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800759 /* Adding or removing entries from the accept list must
Johan Hedberg0857dd32014-12-19 13:40:20 +0200760 * happen before enabling scanning. The controller does
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800761 * not allow accept list modification while scanning.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200762 */
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800763 filter_policy = update_accept_list(req);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200764
765 /* When the controller is using random resolvable addresses and
766 * with that having LE privacy enabled, then controllers with
767 * Extended Scanner Filter Policies support can now enable support
768 * for handling directed advertising.
769 *
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800770 * So instead of using filter polices 0x00 (no accept list)
771 * and 0x01 (accept list enabled) use the new filter policies
772 * 0x02 (no accept list) and 0x03 (accept list enabled).
Johan Hedberg0857dd32014-12-19 13:40:20 +0200773 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700774 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200775 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776 filter_policy |= 0x02;
777
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700778 if (hdev->suspended) {
Alain Michaud10873f92020-06-11 02:01:56 +0000779 window = hdev->le_scan_window_suspend;
780 interval = hdev->le_scan_int_suspend;
Alain Michaud9a9373f2020-07-31 01:05:34 +0000781 } else if (hci_is_le_conn_scanning(hdev)) {
782 window = hdev->le_scan_window_connect;
783 interval = hdev->le_scan_int_connect;
Howard Chung291f0c52020-09-18 11:11:49 +0800784 } else if (hci_is_adv_monitoring(hdev)) {
785 window = hdev->le_scan_window_adv_monitor;
786 interval = hdev->le_scan_int_adv_monitor;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800787
788 /* Disable duplicates filter when scanning for advertisement
789 * monitor for the following reasons.
790 *
791 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792 * controllers ignore RSSI_Sampling_Period when the duplicates
793 * filter is enabled.
794 *
795 * For SW pattern filtering, when we're not doing interleaved
796 * scanning, it is necessary to disable duplicates filter,
797 * otherwise hosts can only receive one advertisement and it's
798 * impossible to know if a peer is still in range.
799 */
800 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700801 } else {
802 window = hdev->le_scan_window;
803 interval = hdev->le_scan_interval;
804 }
805
Archie Pusaka3d4f9c02021-06-04 16:26:27 +0800806 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
807 filter_policy);
Abhishek Pandit-Subedidd522a72020-03-11 08:54:02 -0700808 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +0800809 own_addr_type, filter_policy, filter_dup,
810 addr_resolv);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200811}
812
Daniel Winkler53274472020-09-15 14:14:27 -0700813static void cancel_adv_timeout(struct hci_dev *hdev)
814{
815 if (hdev->adv_instance_timeout) {
816 hdev->adv_instance_timeout = 0;
817 cancel_delayed_work(&hdev->adv_instance_expire);
818 }
819}
820
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -0800821static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
Johan Hedbergf2252572015-11-18 12:49:20 +0200822{
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700823 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +0200824}
825
826void __hci_req_disable_advertising(struct hci_request *req)
827{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530828 if (ext_adv_capable(req->hdev)) {
Daniel Winkler37adf702020-07-14 14:16:00 -0700829 __hci_req_disable_ext_adv_instance(req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +0200830
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530831 } else {
832 u8 enable = 0x00;
833
834 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
835 }
Johan Hedbergf2252572015-11-18 12:49:20 +0200836}
837
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200838static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
839{
840 /* If privacy is not enabled don't use RPA */
841 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
842 return false;
843
844 /* If basic privacy mode is enabled use RPA */
845 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
846 return true;
847
848 /* If limited privacy mode is enabled don't use RPA if we're
849 * both discoverable and bondable.
850 */
851 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
852 hci_dev_test_flag(hdev, HCI_BONDABLE))
853 return false;
854
855 /* We're neither bondable nor discoverable in the limited
856 * privacy mode, therefore use RPA.
857 */
858 return true;
859}
860
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100861static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
862{
863 /* If there is no connection we are OK to advertise. */
864 if (hci_conn_num(hdev, LE_LINK) == 0)
865 return true;
866
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800867 /* Check le_states if there is any connection in peripheral role. */
868 if (hdev->conn_hash.le_num_peripheral > 0) {
869 /* Peripheral connection state and non connectable mode bit 20.
870 */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100871 if (!connectable && !(hdev->le_states[2] & 0x10))
872 return false;
873
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800874 /* Peripheral connection state and connectable mode bit 38
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100875 * and scannable bit 21.
876 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100877 if (connectable && (!(hdev->le_states[4] & 0x40) ||
878 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100879 return false;
880 }
881
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800882 /* Check le_states if there is any connection in central role. */
883 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
884 /* Central connection state and non connectable mode bit 18. */
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100885 if (!connectable && !(hdev->le_states[2] & 0x02))
886 return false;
887
Archie Pusaka39bc74c2021-06-04 16:26:26 +0800888 /* Central connection state and connectable mode bit 35 and
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100889 * scannable 19.
890 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100891 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100892 !(hdev->le_states[2] & 0x08)))
893 return false;
894 }
895
896 return true;
897}
898
Johan Hedbergf2252572015-11-18 12:49:20 +0200899void __hci_req_enable_advertising(struct hci_request *req)
900{
901 struct hci_dev *hdev = req->hdev;
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700902 struct adv_info *adv;
Johan Hedbergf2252572015-11-18 12:49:20 +0200903 struct hci_cp_le_set_adv_param cp;
904 u8 own_addr_type, enable = 0x01;
905 bool connectable;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530906 u16 adv_min_interval, adv_max_interval;
Johan Hedbergf2252572015-11-18 12:49:20 +0200907 u32 flags;
908
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700909 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
910 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100911
912 /* If the "connectable" instance flag was not set, then choose between
913 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
914 */
915 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
916 mgmt_get_connectable(hdev);
917
918 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +0200919 return;
920
921 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
922 __hci_req_disable_advertising(req);
923
924 /* Clear the HCI_LE_ADV bit temporarily so that the
925 * hci_update_random_address knows that it's safe to go ahead
926 * and write a new random address. The flag will be set back on
927 * as soon as the SET_ADV_ENABLE HCI command completes.
928 */
929 hci_dev_clear_flag(hdev, HCI_LE_ADV);
930
Johan Hedbergf2252572015-11-18 12:49:20 +0200931 /* Set require_privacy to true only when non-connectable
932 * advertising is used. In that case it is fine to use a
933 * non-resolvable private address.
934 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200935 if (hci_update_random_address(req, !connectable,
936 adv_use_rpa(hdev, flags),
937 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +0200938 return;
939
940 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +0200941
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700942 if (adv) {
943 adv_min_interval = adv->min_interval;
944 adv_max_interval = adv->max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -0800945 } else {
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530946 adv_min_interval = hdev->le_adv_min_interval;
947 adv_max_interval = hdev->le_adv_max_interval;
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -0800948 }
949
950 if (connectable) {
951 cp.type = LE_ADV_IND;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530952 } else {
Luiz Augusto von Dentzaeeae472020-11-13 16:44:34 -0800953 if (adv_cur_instance_is_scannable(hdev))
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530954 cp.type = LE_ADV_SCAN_IND;
955 else
956 cp.type = LE_ADV_NONCONN_IND;
957
958 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
959 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
960 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
961 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
Spoorthi Ravishankar Koppadad4a6792019-07-15 17:05:22 +0530962 }
963 }
964
965 cp.min_interval = cpu_to_le16(adv_min_interval);
966 cp.max_interval = cpu_to_le16(adv_max_interval);
Johan Hedbergf2252572015-11-18 12:49:20 +0200967 cp.own_address_type = own_addr_type;
968 cp.channel_map = hdev->le_adv_channel_map;
969
970 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
971
972 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
973}
974
Johan Hedbergcab054a2015-11-30 11:21:45 +0200975void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +0200976{
977 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +0200978 u8 len;
979
980 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
981 return;
982
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +0530983 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -0700984 struct {
985 struct hci_cp_le_set_ext_scan_rsp_data cp;
986 u8 data[HCI_MAX_EXT_AD_LENGTH];
987 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +0200988
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -0700989 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +0200990
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -0700991 len = eir_create_scan_rsp(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +0200992
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +0530993 if (hdev->scan_rsp_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -0700994 !memcmp(pdu.data, hdev->scan_rsp_data, len))
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +0530995 return;
Johan Hedbergf2252572015-11-18 12:49:20 +0200996
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -0700997 memcpy(hdev->scan_rsp_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +0530998 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +0200999
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001000 pdu.cp.handle = instance;
1001 pdu.cp.length = len;
1002 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1003 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301004
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001005 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1006 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301007 } else {
1008 struct hci_cp_le_set_scan_rsp_data cp;
1009
1010 memset(&cp, 0, sizeof(cp));
1011
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001012 len = eir_create_scan_rsp(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301013
1014 if (hdev->scan_rsp_data_len == len &&
1015 !memcmp(cp.data, hdev->scan_rsp_data, len))
1016 return;
1017
1018 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1019 hdev->scan_rsp_data_len = len;
1020
1021 cp.length = len;
1022
1023 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1024 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001025}
1026
Johan Hedbergcab054a2015-11-30 11:21:45 +02001027void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001028{
1029 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001030 u8 len;
1031
1032 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1033 return;
1034
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301035 if (ext_adv_capable(hdev)) {
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001036 struct {
1037 struct hci_cp_le_set_ext_adv_data cp;
1038 u8 data[HCI_MAX_EXT_AD_LENGTH];
1039 } pdu;
Johan Hedbergf2252572015-11-18 12:49:20 +02001040
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001041 memset(&pdu, 0, sizeof(pdu));
Johan Hedbergf2252572015-11-18 12:49:20 +02001042
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001043 len = eir_create_adv_data(hdev, instance, pdu.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001044
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301045 /* There's nothing to do if the data hasn't changed */
1046 if (hdev->adv_data_len == len &&
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001047 memcmp(pdu.data, hdev->adv_data, len) == 0)
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301048 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001049
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001050 memcpy(hdev->adv_data, pdu.data, len);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301051 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001052
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001053 pdu.cp.length = len;
1054 pdu.cp.handle = instance;
1055 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1056 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301057
Luiz Augusto von Dentzc9ed0a72021-06-09 11:09:27 -07001058 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1059 sizeof(pdu.cp) + len, &pdu.cp);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301060 } else {
1061 struct hci_cp_le_set_adv_data cp;
1062
1063 memset(&cp, 0, sizeof(cp));
1064
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001065 len = eir_create_adv_data(hdev, instance, cp.data);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301066
1067 /* There's nothing to do if the data hasn't changed */
1068 if (hdev->adv_data_len == len &&
1069 memcmp(cp.data, hdev->adv_data, len) == 0)
1070 return;
1071
1072 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073 hdev->adv_data_len = len;
1074
1075 cp.length = len;
1076
1077 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1078 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001079}
1080
Johan Hedbergcab054a2015-11-30 11:21:45 +02001081int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001082{
1083 struct hci_request req;
1084
1085 hci_req_init(&req, hdev);
1086 __hci_req_update_adv_data(&req, instance);
1087
1088 return hci_req_run(&req, NULL);
1089}
1090
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301091static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1092 u16 opcode)
1093{
1094 BT_DBG("%s status %u", hdev->name, status);
1095}
1096
1097void hci_req_disable_address_resolution(struct hci_dev *hdev)
1098{
1099 struct hci_request req;
1100 __u8 enable = 0x00;
1101
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -07001102 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05301103 return;
1104
1105 hci_req_init(&req, hdev);
1106
1107 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1108
1109 hci_req_run(&req, enable_addr_resolution_complete);
1110}
1111
Johan Hedbergf2252572015-11-18 12:49:20 +02001112static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1113{
Howard Chung22fbcfc2020-11-11 15:02:19 +08001114 bt_dev_dbg(hdev, "status %u", status);
Johan Hedbergf2252572015-11-18 12:49:20 +02001115}
1116
1117void hci_req_reenable_advertising(struct hci_dev *hdev)
1118{
1119 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001120
1121 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001122 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001123 return;
1124
Johan Hedbergf2252572015-11-18 12:49:20 +02001125 hci_req_init(&req, hdev);
1126
Johan Hedbergcab054a2015-11-30 11:21:45 +02001127 if (hdev->cur_adv_instance) {
1128 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1129 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001130 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301131 if (ext_adv_capable(hdev)) {
1132 __hci_req_start_ext_adv(&req, 0x00);
1133 } else {
1134 __hci_req_update_adv_data(&req, 0x00);
1135 __hci_req_update_scan_rsp_data(&req, 0x00);
1136 __hci_req_enable_advertising(&req);
1137 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001138 }
1139
1140 hci_req_run(&req, adv_enable_complete);
1141}
1142
1143static void adv_timeout_expire(struct work_struct *work)
1144{
1145 struct hci_dev *hdev = container_of(work, struct hci_dev,
1146 adv_instance_expire.work);
1147
1148 struct hci_request req;
1149 u8 instance;
1150
Howard Chung22fbcfc2020-11-11 15:02:19 +08001151 bt_dev_dbg(hdev, "");
Johan Hedbergf2252572015-11-18 12:49:20 +02001152
1153 hci_dev_lock(hdev);
1154
1155 hdev->adv_instance_timeout = 0;
1156
Johan Hedbergcab054a2015-11-30 11:21:45 +02001157 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001158 if (instance == 0x00)
1159 goto unlock;
1160
1161 hci_req_init(&req, hdev);
1162
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001163 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001164
1165 if (list_empty(&hdev->adv_instances))
1166 __hci_req_disable_advertising(&req);
1167
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001168 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001169
1170unlock:
1171 hci_dev_unlock(hdev);
1172}
1173
Howard Chungc4f1f402020-11-26 12:22:21 +08001174static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1175 unsigned long opt)
1176{
1177 struct hci_dev *hdev = req->hdev;
1178 int ret = 0;
1179
1180 hci_dev_lock(hdev);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1183 hci_req_add_le_scan_disable(req, false);
1184 hci_req_add_le_passive_scan(req);
1185
1186 switch (hdev->interleave_scan_state) {
1187 case INTERLEAVE_SCAN_ALLOWLIST:
1188 bt_dev_dbg(hdev, "next state: allowlist");
1189 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1190 break;
1191 case INTERLEAVE_SCAN_NO_FILTER:
1192 bt_dev_dbg(hdev, "next state: no filter");
1193 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1194 break;
1195 case INTERLEAVE_SCAN_NONE:
1196 BT_ERR("unexpected error");
1197 ret = -1;
1198 }
1199
1200 hci_dev_unlock(hdev);
1201
1202 return ret;
1203}
1204
1205static void interleave_scan_work(struct work_struct *work)
1206{
1207 struct hci_dev *hdev = container_of(work, struct hci_dev,
1208 interleave_scan.work);
1209 u8 status;
1210 unsigned long timeout;
1211
1212 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1213 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1214 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1215 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1216 } else {
1217 bt_dev_err(hdev, "unexpected error");
1218 return;
1219 }
1220
1221 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1222 HCI_CMD_TIMEOUT, &status);
1223
1224 /* Don't continue interleaving if it was canceled */
1225 if (is_interleave_scanning(hdev))
1226 queue_delayed_work(hdev->req_workqueue,
1227 &hdev->interleave_scan, timeout);
1228}
1229
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301230int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1231 bool use_rpa, struct adv_info *adv_instance,
1232 u8 *own_addr_type, bdaddr_t *rand_addr)
1233{
1234 int err;
1235
1236 bacpy(rand_addr, BDADDR_ANY);
1237
1238 /* If privacy is enabled use a resolvable private address. If
1239 * current RPA has expired then generate a new one.
1240 */
1241 if (use_rpa) {
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301242 /* If Controller supports LL Privacy use own address type is
1243 * 0x03
1244 */
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -07001245 if (use_ll_privacy(hdev))
Sathish Narasimmanc0ee0642020-09-25 18:02:15 +05301246 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1247 else
1248 *own_addr_type = ADDR_LE_DEV_RANDOM;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301249
1250 if (adv_instance) {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001251 if (adv_rpa_valid(adv_instance))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301252 return 0;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301253 } else {
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001254 if (rpa_valid(hdev))
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301255 return 0;
1256 }
1257
1258 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1259 if (err < 0) {
Marcel Holtmann00b383b2020-03-09 22:48:10 +01001260 bt_dev_err(hdev, "failed to generate new RPA");
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301261 return err;
1262 }
1263
1264 bacpy(rand_addr, &hdev->rpa);
1265
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301266 return 0;
1267 }
1268
1269 /* In case of required privacy without resolvable private address,
1270 * use an non-resolvable private address. This is useful for
1271 * non-connectable advertising.
1272 */
1273 if (require_privacy) {
1274 bdaddr_t nrpa;
1275
1276 while (true) {
1277 /* The non-resolvable private address is generated
1278 * from random six bytes with the two most significant
1279 * bits cleared.
1280 */
1281 get_random_bytes(&nrpa, 6);
1282 nrpa.b[5] &= 0x3f;
1283
1284 /* The non-resolvable private address shall not be
1285 * equal to the public address.
1286 */
1287 if (bacmp(&hdev->bdaddr, &nrpa))
1288 break;
1289 }
1290
1291 *own_addr_type = ADDR_LE_DEV_RANDOM;
1292 bacpy(rand_addr, &nrpa);
1293
1294 return 0;
1295 }
1296
1297 /* No privacy so use a public address. */
1298 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1299
1300 return 0;
1301}
1302
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301303void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1304{
1305 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1306}
1307
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001308static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1309{
1310 struct hci_dev *hdev = req->hdev;
1311
1312 /* If we're advertising or initiating an LE connection we can't
1313 * go ahead and change the random address at this time. This is
1314 * because the eventual initiator address used for the
1315 * subsequently created connection will be undefined (some
1316 * controllers use the new address and others the one we had
1317 * when the operation started).
1318 *
1319 * In this kind of scenario skip the update and let the random
1320 * address be updated at the next cycle.
1321 */
1322 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1323 hci_lookup_le_connect(hdev)) {
1324 bt_dev_dbg(hdev, "Deferring random address update");
1325 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1326 return;
1327 }
1328
1329 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1330}
1331
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301332int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301333{
1334 struct hci_cp_le_set_ext_adv_params cp;
1335 struct hci_dev *hdev = req->hdev;
1336 bool connectable;
1337 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301338 bdaddr_t random_addr;
1339 u8 own_addr_type;
1340 int err;
1341 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301342 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301343
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301344 if (instance > 0) {
1345 adv_instance = hci_find_adv_instance(hdev, instance);
1346 if (!adv_instance)
1347 return -EINVAL;
1348 } else {
1349 adv_instance = NULL;
1350 }
1351
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001352 flags = hci_adv_instance_flags(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301353
1354 /* If the "connectable" instance flag was not set, then choose between
1355 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1356 */
1357 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1358 mgmt_get_connectable(hdev);
1359
Colin Ian King75edd1f2018-11-09 13:27:36 +00001360 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301361 return -EPERM;
1362
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301363 /* Set require_privacy to true only when non-connectable
1364 * advertising is used. In that case it is fine to use a
1365 * non-resolvable private address.
1366 */
1367 err = hci_get_random_address(hdev, !connectable,
1368 adv_use_rpa(hdev, flags), adv_instance,
1369 &own_addr_type, &random_addr);
1370 if (err < 0)
1371 return err;
1372
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301373 memset(&cp, 0, sizeof(cp));
1374
Daniel Winkler9bf9f4b2020-12-03 12:12:50 -08001375 if (adv_instance) {
1376 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1377 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1378 cp.tx_power = adv_instance->tx_power;
1379 } else {
1380 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1381 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1382 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1383 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301384
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301385 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1386
1387 if (connectable) {
1388 if (secondary_adv)
1389 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1390 else
1391 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
Luiz Augusto von Dentz01ce70b2021-09-20 15:59:37 -07001392 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
Daniel Winklerff02db12021-03-03 11:15:23 -08001393 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301394 if (secondary_adv)
1395 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1396 else
1397 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1398 } else {
1399 if (secondary_adv)
1400 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1401 else
1402 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1403 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301404
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301405 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301406 cp.channel_map = hdev->le_adv_channel_map;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001407 cp.handle = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301408
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301409 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1410 cp.primary_phy = HCI_ADV_PHY_1M;
1411 cp.secondary_phy = HCI_ADV_PHY_2M;
1412 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1413 cp.primary_phy = HCI_ADV_PHY_CODED;
1414 cp.secondary_phy = HCI_ADV_PHY_CODED;
1415 } else {
1416 /* In all other cases use 1M */
1417 cp.primary_phy = HCI_ADV_PHY_1M;
1418 cp.secondary_phy = HCI_ADV_PHY_1M;
1419 }
1420
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301421 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1422
Luiz Augusto von Dentzcf75ad82021-10-27 16:58:44 -07001423 if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1424 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301425 bacmp(&random_addr, BDADDR_ANY)) {
1426 struct hci_cp_le_set_adv_set_rand_addr cp;
1427
1428 /* Check if random address need to be updated */
1429 if (adv_instance) {
1430 if (!bacmp(&random_addr, &adv_instance->random_addr))
1431 return 0;
1432 } else {
1433 if (!bacmp(&random_addr, &hdev->random_addr))
1434 return 0;
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001435 /* Instance 0x00 doesn't have an adv_info, instead it
1436 * uses hdev->random_addr to track its address so
1437 * whenever it needs to be updated this also set the
1438 * random address since hdev->random_addr is shared with
1439 * scan state machine.
1440 */
1441 set_random_addr(req, &random_addr);
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301442 }
1443
1444 memset(&cp, 0, sizeof(cp));
1445
Tedd Ho-Jeong Aneaa7b722020-05-01 10:00:50 -07001446 cp.handle = instance;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301447 bacpy(&cp.bdaddr, &random_addr);
1448
1449 hci_req_add(req,
1450 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1451 sizeof(cp), &cp);
1452 }
1453
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301454 return 0;
1455}
1456
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001457int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301458{
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001459 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301460 struct hci_cp_le_set_ext_adv_enable *cp;
1461 struct hci_cp_ext_adv_set *adv_set;
1462 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001463 struct adv_info *adv_instance;
1464
1465 if (instance > 0) {
1466 adv_instance = hci_find_adv_instance(hdev, instance);
1467 if (!adv_instance)
1468 return -EINVAL;
1469 } else {
1470 adv_instance = NULL;
1471 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301472
1473 cp = (void *) data;
1474 adv_set = (void *) cp->data;
1475
1476 memset(cp, 0, sizeof(*cp));
1477
1478 cp->enable = 0x01;
1479 cp->num_of_sets = 0x01;
1480
1481 memset(adv_set, 0, sizeof(*adv_set));
1482
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001483 adv_set->handle = instance;
1484
1485 /* Set duration per instance since controller is responsible for
1486 * scheduling it.
1487 */
1488 if (adv_instance && adv_instance->duration) {
Luiz Augusto von Dentz10bbffa2019-10-24 16:15:42 +03001489 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001490
1491 /* Time = N * 10 ms */
1492 adv_set->duration = cpu_to_le16(duration / 10);
1493 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301494
1495 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1496 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1497 data);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001498
1499 return 0;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301500}
1501
Daniel Winkler37adf702020-07-14 14:16:00 -07001502int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1503{
1504 struct hci_dev *hdev = req->hdev;
1505 struct hci_cp_le_set_ext_adv_enable *cp;
1506 struct hci_cp_ext_adv_set *adv_set;
1507 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1508 u8 req_size;
1509
1510 /* If request specifies an instance that doesn't exist, fail */
1511 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1512 return -EINVAL;
1513
1514 memset(data, 0, sizeof(data));
1515
1516 cp = (void *)data;
1517 adv_set = (void *)cp->data;
1518
1519 /* Instance 0x00 indicates all advertising instances will be disabled */
1520 cp->num_of_sets = !!instance;
1521 cp->enable = 0x00;
1522
1523 adv_set->handle = instance;
1524
1525 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1526 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1527
1528 return 0;
1529}
1530
1531int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1532{
1533 struct hci_dev *hdev = req->hdev;
1534
1535 /* If request specifies an instance that doesn't exist, fail */
1536 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1537 return -EINVAL;
1538
1539 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1540
1541 return 0;
1542}
1543
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301544int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1545{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301546 struct hci_dev *hdev = req->hdev;
Daniel Winkler37adf702020-07-14 14:16:00 -07001547 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301548 int err;
1549
Daniel Winkler37adf702020-07-14 14:16:00 -07001550 /* If instance isn't pending, the chip knows about it, and it's safe to
1551 * disable
1552 */
1553 if (adv_instance && !adv_instance->pending)
1554 __hci_req_disable_ext_adv_instance(req, instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301555
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301556 err = __hci_req_setup_ext_adv_instance(req, instance);
1557 if (err < 0)
1558 return err;
1559
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301560 __hci_req_update_scan_rsp_data(req, instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001561 __hci_req_enable_ext_advertising(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301562
1563 return 0;
1564}
1565
Johan Hedbergf2252572015-11-18 12:49:20 +02001566int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1567 bool force)
1568{
1569 struct hci_dev *hdev = req->hdev;
1570 struct adv_info *adv_instance = NULL;
1571 u16 timeout;
1572
1573 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001574 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001575 return -EPERM;
1576
1577 if (hdev->adv_instance_timeout)
1578 return -EBUSY;
1579
1580 adv_instance = hci_find_adv_instance(hdev, instance);
1581 if (!adv_instance)
1582 return -ENOENT;
1583
1584 /* A zero timeout means unlimited advertising. As long as there is
1585 * only one instance, duration should be ignored. We still set a timeout
1586 * in case further instances are being added later on.
1587 *
1588 * If the remaining lifetime of the instance is more than the duration
1589 * then the timeout corresponds to the duration, otherwise it will be
1590 * reduced to the remaining instance lifetime.
1591 */
1592 if (adv_instance->timeout == 0 ||
1593 adv_instance->duration <= adv_instance->remaining_time)
1594 timeout = adv_instance->duration;
1595 else
1596 timeout = adv_instance->remaining_time;
1597
1598 /* The remaining time is being reduced unless the instance is being
1599 * advertised without time limit.
1600 */
1601 if (adv_instance->timeout)
1602 adv_instance->remaining_time =
1603 adv_instance->remaining_time - timeout;
1604
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001605 /* Only use work for scheduling instances with legacy advertising */
1606 if (!ext_adv_capable(hdev)) {
1607 hdev->adv_instance_timeout = timeout;
1608 queue_delayed_work(hdev->req_workqueue,
Johan Hedbergf2252572015-11-18 12:49:20 +02001609 &hdev->adv_instance_expire,
1610 msecs_to_jiffies(timeout * 1000));
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03001611 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001612
1613 /* If we're just re-scheduling the same instance again then do not
1614 * execute any HCI commands. This happens when a single instance is
1615 * being advertised.
1616 */
1617 if (!force && hdev->cur_adv_instance == instance &&
1618 hci_dev_test_flag(hdev, HCI_LE_ADV))
1619 return 0;
1620
1621 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301622 if (ext_adv_capable(hdev)) {
1623 __hci_req_start_ext_adv(req, instance);
1624 } else {
1625 __hci_req_update_adv_data(req, instance);
1626 __hci_req_update_scan_rsp_data(req, instance);
1627 __hci_req_enable_advertising(req);
1628 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001629
1630 return 0;
1631}
1632
Johan Hedbergf2252572015-11-18 12:49:20 +02001633/* For a single instance:
1634 * - force == true: The instance will be removed even when its remaining
1635 * lifetime is not zero.
1636 * - force == false: the instance will be deactivated but kept stored unless
1637 * the remaining lifetime is zero.
1638 *
1639 * For instance == 0x00:
1640 * - force == true: All instances will be removed regardless of their timeout
1641 * setting.
1642 * - force == false: Only instances that have a timeout will be removed.
1643 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001644void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1645 struct hci_request *req, u8 instance,
1646 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001647{
1648 struct adv_info *adv_instance, *n, *next_instance = NULL;
1649 int err;
1650 u8 rem_inst;
1651
1652 /* Cancel any timeout concerning the removed instance(s). */
1653 if (!instance || hdev->cur_adv_instance == instance)
1654 cancel_adv_timeout(hdev);
1655
1656 /* Get the next instance to advertise BEFORE we remove
1657 * the current one. This can be the same instance again
1658 * if there is only one instance.
1659 */
1660 if (instance && hdev->cur_adv_instance == instance)
1661 next_instance = hci_get_next_instance(hdev, instance);
1662
1663 if (instance == 0x00) {
1664 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1665 list) {
1666 if (!(force || adv_instance->timeout))
1667 continue;
1668
1669 rem_inst = adv_instance->instance;
1670 err = hci_remove_adv_instance(hdev, rem_inst);
1671 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001672 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001673 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001674 } else {
1675 adv_instance = hci_find_adv_instance(hdev, instance);
1676
1677 if (force || (adv_instance && adv_instance->timeout &&
1678 !adv_instance->remaining_time)) {
1679 /* Don't advertise a removed instance. */
1680 if (next_instance &&
1681 next_instance->instance == instance)
1682 next_instance = NULL;
1683
1684 err = hci_remove_adv_instance(hdev, instance);
1685 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001686 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001687 }
1688 }
1689
Johan Hedbergf2252572015-11-18 12:49:20 +02001690 if (!req || !hdev_is_powered(hdev) ||
1691 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1692 return;
1693
Daniel Winkler37adf702020-07-14 14:16:00 -07001694 if (next_instance && !ext_adv_capable(hdev))
Johan Hedbergf2252572015-11-18 12:49:20 +02001695 __hci_req_schedule_adv_instance(req, next_instance->instance,
1696 false);
1697}
1698
Johan Hedberg0857dd32014-12-19 13:40:20 +02001699int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001700 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001701{
1702 struct hci_dev *hdev = req->hdev;
1703 int err;
1704
1705 /* If privacy is enabled use a resolvable private address. If
1706 * current RPA has expired or there is something else than
1707 * the current RPA in use, then generate a new one.
1708 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001709 if (use_rpa) {
Sathish Narasimmand03c7592020-07-23 18:09:00 +05301710 /* If Controller supports LL Privacy use own address type is
1711 * 0x03
1712 */
Luiz Augusto von Dentzad383c22021-10-27 16:58:42 -07001713 if (use_ll_privacy(hdev))
Sathish Narasimmand03c7592020-07-23 18:09:00 +05301714 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1715 else
1716 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg0857dd32014-12-19 13:40:20 +02001717
Luiz Augusto von Dentzc45074d2021-08-02 16:56:19 -07001718 if (rpa_valid(hdev))
Johan Hedberg0857dd32014-12-19 13:40:20 +02001719 return 0;
1720
1721 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1722 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001723 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001724 return err;
1725 }
1726
1727 set_random_addr(req, &hdev->rpa);
1728
Johan Hedberg0857dd32014-12-19 13:40:20 +02001729 return 0;
1730 }
1731
1732 /* In case of required privacy without resolvable private address,
1733 * use an non-resolvable private address. This is useful for active
1734 * scanning and non-connectable advertising.
1735 */
1736 if (require_privacy) {
1737 bdaddr_t nrpa;
1738
1739 while (true) {
1740 /* The non-resolvable private address is generated
1741 * from random six bytes with the two most significant
1742 * bits cleared.
1743 */
1744 get_random_bytes(&nrpa, 6);
1745 nrpa.b[5] &= 0x3f;
1746
1747 /* The non-resolvable private address shall not be
1748 * equal to the public address.
1749 */
1750 if (bacmp(&hdev->bdaddr, &nrpa))
1751 break;
1752 }
1753
1754 *own_addr_type = ADDR_LE_DEV_RANDOM;
1755 set_random_addr(req, &nrpa);
1756 return 0;
1757 }
1758
1759 /* If forcing static address is in use or there is no public
1760 * address use the static address as random address (but skip
1761 * the HCI command if the current random address is already the
1762 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001763 *
1764 * In case BR/EDR has been disabled on a dual-mode controller
1765 * and a static address has been configured, then use that
1766 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001767 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001768 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001769 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001770 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001771 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001772 *own_addr_type = ADDR_LE_DEV_RANDOM;
1773 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1774 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1775 &hdev->static_addr);
1776 return 0;
1777 }
1778
1779 /* Neither privacy nor static address is being used so use a
1780 * public address.
1781 */
1782 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1783
1784 return 0;
1785}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001786
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001787static bool disconnected_accept_list_entries(struct hci_dev *hdev)
Johan Hedberg405a2612014-12-19 23:18:22 +02001788{
1789 struct bdaddr_list *b;
1790
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001791 list_for_each_entry(b, &hdev->accept_list, list) {
Johan Hedberg405a2612014-12-19 23:18:22 +02001792 struct hci_conn *conn;
1793
1794 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1795 if (!conn)
1796 return true;
1797
1798 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1799 return true;
1800 }
1801
1802 return false;
1803}
1804
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001805void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001806{
1807 struct hci_dev *hdev = req->hdev;
1808 u8 scan;
1809
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001810 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001811 return;
1812
1813 if (!hdev_is_powered(hdev))
1814 return;
1815
1816 if (mgmt_powering_down(hdev))
1817 return;
1818
Abhishek Pandit-Subedi4f40afc2020-03-11 08:54:01 -07001819 if (hdev->scanning_paused)
1820 return;
1821
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001822 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08001823 disconnected_accept_list_entries(hdev))
Johan Hedberg405a2612014-12-19 23:18:22 +02001824 scan = SCAN_PAGE;
1825 else
1826 scan = SCAN_DISABLED;
1827
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001828 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001829 scan |= SCAN_INQUIRY;
1830
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001831 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1832 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1833 return;
1834
Johan Hedberg405a2612014-12-19 23:18:22 +02001835 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1836}
1837
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001838static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001839{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001840 hci_dev_lock(req->hdev);
1841 __hci_req_update_scan(req);
1842 hci_dev_unlock(req->hdev);
1843 return 0;
1844}
Johan Hedberg405a2612014-12-19 23:18:22 +02001845
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001846static void scan_update_work(struct work_struct *work)
1847{
1848 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1849
1850 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001851}
1852
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001853static u8 get_service_classes(struct hci_dev *hdev)
1854{
1855 struct bt_uuid *uuid;
1856 u8 val = 0;
1857
1858 list_for_each_entry(uuid, &hdev->uuids, list)
1859 val |= uuid->svc_hint;
1860
1861 return val;
1862}
1863
1864void __hci_req_update_class(struct hci_request *req)
1865{
1866 struct hci_dev *hdev = req->hdev;
1867 u8 cod[3];
1868
Howard Chung22fbcfc2020-11-11 15:02:19 +08001869 bt_dev_dbg(hdev, "");
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001870
1871 if (!hdev_is_powered(hdev))
1872 return;
1873
1874 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1875 return;
1876
1877 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1878 return;
1879
1880 cod[0] = hdev->minor_class;
1881 cod[1] = hdev->major_class;
1882 cod[2] = get_service_classes(hdev);
1883
1884 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1885 cod[1] |= 0x20;
1886
1887 if (memcmp(cod, hdev->dev_class, 3) == 0)
1888 return;
1889
1890 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1891}
1892
Johan Hedbergaed1a882015-11-22 17:24:44 +03001893static void write_iac(struct hci_request *req)
1894{
1895 struct hci_dev *hdev = req->hdev;
1896 struct hci_cp_write_current_iac_lap cp;
1897
1898 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1899 return;
1900
1901 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1902 /* Limited discoverable mode */
1903 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1904 cp.iac_lap[0] = 0x00; /* LIAC */
1905 cp.iac_lap[1] = 0x8b;
1906 cp.iac_lap[2] = 0x9e;
1907 cp.iac_lap[3] = 0x33; /* GIAC */
1908 cp.iac_lap[4] = 0x8b;
1909 cp.iac_lap[5] = 0x9e;
1910 } else {
1911 /* General discoverable mode */
1912 cp.num_iac = 1;
1913 cp.iac_lap[0] = 0x33; /* GIAC */
1914 cp.iac_lap[1] = 0x8b;
1915 cp.iac_lap[2] = 0x9e;
1916 }
1917
1918 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1919 (cp.num_iac * 3) + 1, &cp);
1920}
1921
1922static int discoverable_update(struct hci_request *req, unsigned long opt)
1923{
1924 struct hci_dev *hdev = req->hdev;
1925
1926 hci_dev_lock(hdev);
1927
1928 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1929 write_iac(req);
1930 __hci_req_update_scan(req);
1931 __hci_req_update_class(req);
1932 }
1933
1934 /* Advertising instances don't use the global discoverable setting, so
1935 * only update AD if advertising was enabled using Set Advertising.
1936 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001937 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001938 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001939
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001940 /* Discoverable mode affects the local advertising
1941 * address in limited privacy mode.
1942 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301943 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1944 if (ext_adv_capable(hdev))
1945 __hci_req_start_ext_adv(req, 0x00);
1946 else
1947 __hci_req_enable_advertising(req);
1948 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001949 }
1950
Johan Hedbergaed1a882015-11-22 17:24:44 +03001951 hci_dev_unlock(hdev);
1952
1953 return 0;
1954}
1955
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001956void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1957 u8 reason)
1958{
1959 switch (conn->state) {
1960 case BT_CONNECTED:
1961 case BT_CONFIG:
1962 if (conn->type == AMP_LINK) {
1963 struct hci_cp_disconn_phy_link cp;
1964
1965 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1966 cp.reason = reason;
1967 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1968 &cp);
1969 } else {
1970 struct hci_cp_disconnect dc;
1971
1972 dc.handle = cpu_to_le16(conn->handle);
1973 dc.reason = reason;
1974 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1975 }
1976
1977 conn->state = BT_DISCONN;
1978
1979 break;
1980 case BT_CONNECT:
1981 if (conn->type == LE_LINK) {
1982 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1983 break;
1984 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1985 0, NULL);
1986 } else if (conn->type == ACL_LINK) {
1987 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1988 break;
1989 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1990 6, &conn->dst);
1991 }
1992 break;
1993 case BT_CONNECT2:
1994 if (conn->type == ACL_LINK) {
1995 struct hci_cp_reject_conn_req rej;
1996
1997 bacpy(&rej.bdaddr, &conn->dst);
1998 rej.reason = reason;
1999
2000 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2001 sizeof(rej), &rej);
2002 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2003 struct hci_cp_reject_sync_conn_req rej;
2004
2005 bacpy(&rej.bdaddr, &conn->dst);
2006
2007 /* SCO rejection has its own limited set of
2008 * allowed error values (0x0D-0x0F) which isn't
2009 * compatible with most values passed to this
2010 * function. To be safe hard-code one of the
2011 * values that's suitable for SCO.
2012 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002013 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002014
2015 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2016 sizeof(rej), &rej);
2017 }
2018 break;
2019 default:
2020 conn->state = BT_CLOSED;
2021 break;
2022 }
2023}
2024
2025static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2026{
2027 if (status)
Howard Chung22fbcfc2020-11-11 15:02:19 +08002028 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002029}
2030
2031int hci_abort_conn(struct hci_conn *conn, u8 reason)
2032{
2033 struct hci_request req;
2034 int err;
2035
2036 hci_req_init(&req, conn->hdev);
2037
2038 __hci_abort_conn(&req, conn, reason);
2039
2040 err = hci_req_run(&req, abort_conn_complete);
2041 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002042 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002043 return err;
2044 }
2045
2046 return 0;
2047}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002048
Johan Hedberga1d01db2015-11-11 08:11:25 +02002049static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002050{
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302051 hci_req_add_le_scan_disable(req, false);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002052 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002053}
2054
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002055static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2056{
2057 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002058 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2059 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002060 struct hci_cp_inquiry cp;
2061
Archie Pusaka06752d12021-04-01 11:11:33 +08002062 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2063 return 0;
2064
Howard Chung22fbcfc2020-11-11 15:02:19 +08002065 bt_dev_dbg(req->hdev, "");
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002066
2067 hci_dev_lock(req->hdev);
2068 hci_inquiry_cache_flush(req->hdev);
2069 hci_dev_unlock(req->hdev);
2070
2071 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002072
2073 if (req->hdev->discovery.limited)
2074 memcpy(&cp.lap, liac, sizeof(cp.lap));
2075 else
2076 memcpy(&cp.lap, giac, sizeof(cp.lap));
2077
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002078 cp.length = length;
2079
2080 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2081
2082 return 0;
2083}
2084
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002085static void le_scan_disable_work(struct work_struct *work)
2086{
2087 struct hci_dev *hdev = container_of(work, struct hci_dev,
2088 le_scan_disable.work);
2089 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002090
Howard Chung22fbcfc2020-11-11 15:02:19 +08002091 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002092
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002093 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002094 return;
2095
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002096 cancel_delayed_work(&hdev->le_scan_restart);
2097
2098 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2099 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002100 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2101 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002102 return;
2103 }
2104
2105 hdev->discovery.scan_start = 0;
2106
2107 /* If we were running LE only scan, change discovery state. If
2108 * we were running both LE and BR/EDR inquiry simultaneously,
2109 * and BR/EDR inquiry is already finished, stop discovery,
2110 * otherwise BR/EDR inquiry will stop discovery when finished.
2111 * If we will resolve remote device name, do not change
2112 * discovery state.
2113 */
2114
2115 if (hdev->discovery.type == DISCOV_TYPE_LE)
2116 goto discov_stopped;
2117
2118 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2119 return;
2120
2121 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2122 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2123 hdev->discovery.state != DISCOVERY_RESOLVING)
2124 goto discov_stopped;
2125
2126 return;
2127 }
2128
2129 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2130 HCI_CMD_TIMEOUT, &status);
2131 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002132 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002133 goto discov_stopped;
2134 }
2135
2136 return;
2137
2138discov_stopped:
2139 hci_dev_lock(hdev);
2140 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2141 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002142}
2143
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002144static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002145{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002146 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002147
2148 /* If controller is not scanning we are done. */
2149 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2150 return 0;
2151
Abhishek Pandit-Subedi3a0377d2020-06-24 11:34:19 -07002152 if (hdev->scanning_paused) {
2153 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2154 return 0;
2155 }
2156
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302157 hci_req_add_le_scan_disable(req, false);
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002158
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302159 if (use_ext_scan(hdev)) {
2160 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2161
2162 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2163 ext_enable_cp.enable = LE_SCAN_ENABLE;
2164 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2165
2166 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2167 sizeof(ext_enable_cp), &ext_enable_cp);
2168 } else {
2169 struct hci_cp_le_set_scan_enable cp;
2170
2171 memset(&cp, 0, sizeof(cp));
2172 cp.enable = LE_SCAN_ENABLE;
2173 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2174 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2175 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002176
2177 return 0;
2178}
2179
2180static void le_scan_restart_work(struct work_struct *work)
2181{
2182 struct hci_dev *hdev = container_of(work, struct hci_dev,
2183 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002184 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002185 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002186
Howard Chung22fbcfc2020-11-11 15:02:19 +08002187 bt_dev_dbg(hdev, "");
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002188
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002189 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002190 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002191 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2192 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002193 return;
2194 }
2195
2196 hci_dev_lock(hdev);
2197
2198 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2199 !hdev->discovery.scan_start)
2200 goto unlock;
2201
2202 /* When the scan was started, hdev->le_scan_disable has been queued
2203 * after duration from scan_start. During scan restart this job
2204 * has been canceled, and we need to queue it again after proper
2205 * timeout, to make sure that scan does not run indefinitely.
2206 */
2207 duration = hdev->discovery.scan_duration;
2208 scan_start = hdev->discovery.scan_start;
2209 now = jiffies;
2210 if (now - scan_start <= duration) {
2211 int elapsed;
2212
2213 if (now >= scan_start)
2214 elapsed = now - scan_start;
2215 else
2216 elapsed = ULONG_MAX - scan_start + now;
2217
2218 timeout = duration - elapsed;
2219 } else {
2220 timeout = 0;
2221 }
2222
2223 queue_delayed_work(hdev->req_workqueue,
2224 &hdev->le_scan_disable, timeout);
2225
2226unlock:
2227 hci_dev_unlock(hdev);
2228}
2229
Johan Hedberge68f0722015-11-11 08:30:30 +02002230static int active_scan(struct hci_request *req, unsigned long opt)
2231{
2232 uint16_t interval = opt;
2233 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002234 u8 own_addr_type;
Archie Pusaka3d4f9c02021-06-04 16:26:27 +08002235 /* Accept list is not used for discovery */
Marcel Holtmann849c9c32020-04-09 08:05:48 +02002236 u8 filter_policy = 0x00;
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002237 /* Default is to enable duplicates filter */
2238 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Marcel Holtmanne1d57232020-07-23 18:08:57 +05302239 /* Discovery doesn't require controller address resolution */
2240 bool addr_resolv = false;
Johan Hedberge68f0722015-11-11 08:30:30 +02002241 int err;
2242
Howard Chung22fbcfc2020-11-11 15:02:19 +08002243 bt_dev_dbg(hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002244
Johan Hedberge68f0722015-11-11 08:30:30 +02002245 /* If controller is scanning, it means the background scanning is
2246 * running. Thus, we should temporarily stop it in order to set the
2247 * discovery scanning parameters.
2248 */
Howard Chung422bb172020-11-26 12:22:23 +08002249 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302250 hci_req_add_le_scan_disable(req, false);
Howard Chung422bb172020-11-26 12:22:23 +08002251 cancel_interleave_scan(hdev);
2252 }
Johan Hedberge68f0722015-11-11 08:30:30 +02002253
2254 /* All active scans will be done with either a resolvable private
2255 * address (when privacy feature has been enabled) or non-resolvable
2256 * private address.
2257 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002258 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2259 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002260 if (err < 0)
2261 own_addr_type = ADDR_LE_DEV_PUBLIC;
2262
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002263 if (hci_is_adv_monitoring(hdev)) {
2264 /* Duplicate filter should be disabled when some advertisement
2265 * monitor is activated, otherwise AdvMon can only receive one
2266 * advertisement for one peer(*) during active scanning, and
2267 * might report loss to these peers.
2268 *
2269 * Note that different controllers have different meanings of
2270 * |duplicate|. Some of them consider packets with the same
2271 * address as duplicate, and others consider packets with the
2272 * same address and the same RSSI as duplicate. Although in the
2273 * latter case we don't need to disable duplicate filter, but
2274 * it is common to have active scanning for a short period of
2275 * time, the power impact should be neglectable.
2276 */
2277 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2278 }
2279
Alain Michaudd4edda02020-06-29 17:04:15 +00002280 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2281 hdev->le_scan_window_discovery, own_addr_type,
Yun-Hao Chungc32d6242021-05-20 13:12:09 +08002282 filter_policy, filter_dup, addr_resolv);
Johan Hedberge68f0722015-11-11 08:30:30 +02002283 return 0;
2284}
2285
2286static int interleaved_discov(struct hci_request *req, unsigned long opt)
2287{
2288 int err;
2289
Howard Chung22fbcfc2020-11-11 15:02:19 +08002290 bt_dev_dbg(req->hdev, "");
Johan Hedberge68f0722015-11-11 08:30:30 +02002291
2292 err = active_scan(req, opt);
2293 if (err)
2294 return err;
2295
Johan Hedberg7df26b52015-11-11 12:24:21 +02002296 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002297}
2298
2299static void start_discovery(struct hci_dev *hdev, u8 *status)
2300{
2301 unsigned long timeout;
2302
Howard Chung22fbcfc2020-11-11 15:02:19 +08002303 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002304
2305 switch (hdev->discovery.type) {
2306 case DISCOV_TYPE_BREDR:
2307 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002308 hci_req_sync(hdev, bredr_inquiry,
2309 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002310 status);
2311 return;
2312 case DISCOV_TYPE_INTERLEAVED:
2313 /* When running simultaneous discovery, the LE scanning time
2314 * should occupy the whole discovery time sine BR/EDR inquiry
2315 * and LE scanning are scheduled by the controller.
2316 *
2317 * For interleaving discovery in comparison, BR/EDR inquiry
2318 * and LE scanning are done sequentially with separate
2319 * timeouts.
2320 */
2321 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2322 &hdev->quirks)) {
2323 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2324 /* During simultaneous discovery, we double LE scan
2325 * interval. We must leave some time for the controller
2326 * to do BR/EDR inquiry.
2327 */
2328 hci_req_sync(hdev, interleaved_discov,
Alain Michaudd4edda02020-06-29 17:04:15 +00002329 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002330 status);
2331 break;
2332 }
2333
2334 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
Alain Michaudd4edda02020-06-29 17:04:15 +00002335 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002336 HCI_CMD_TIMEOUT, status);
2337 break;
2338 case DISCOV_TYPE_LE:
2339 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
Alain Michaudd4edda02020-06-29 17:04:15 +00002340 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
Johan Hedberge68f0722015-11-11 08:30:30 +02002341 HCI_CMD_TIMEOUT, status);
2342 break;
2343 default:
2344 *status = HCI_ERROR_UNSPECIFIED;
2345 return;
2346 }
2347
2348 if (*status)
2349 return;
2350
Howard Chung22fbcfc2020-11-11 15:02:19 +08002351 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
Johan Hedberge68f0722015-11-11 08:30:30 +02002352
2353 /* When service discovery is used and the controller has a
2354 * strict duplicate filter, it is important to remember the
2355 * start and duration of the scan. This is required for
2356 * restarting scanning during the discovery phase.
2357 */
2358 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2359 hdev->discovery.result_filtering) {
2360 hdev->discovery.scan_start = jiffies;
2361 hdev->discovery.scan_duration = timeout;
2362 }
2363
2364 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2365 timeout);
2366}
2367
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002368bool hci_req_stop_discovery(struct hci_request *req)
2369{
2370 struct hci_dev *hdev = req->hdev;
2371 struct discovery_state *d = &hdev->discovery;
2372 struct hci_cp_remote_name_req_cancel cp;
2373 struct inquiry_entry *e;
2374 bool ret = false;
2375
Howard Chung22fbcfc2020-11-11 15:02:19 +08002376 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002377
2378 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2379 if (test_bit(HCI_INQUIRY, &hdev->flags))
2380 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2381
2382 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2383 cancel_delayed_work(&hdev->le_scan_disable);
Sonny Sasakac06632a2021-03-15 10:30:59 -07002384 cancel_delayed_work(&hdev->le_scan_restart);
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302385 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002386 }
2387
2388 ret = true;
2389 } else {
2390 /* Passive scanning */
2391 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +05302392 hci_req_add_le_scan_disable(req, false);
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002393 ret = true;
2394 }
2395 }
2396
2397 /* No further actions needed for LE-only discovery */
2398 if (d->type == DISCOV_TYPE_LE)
2399 return ret;
2400
2401 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2402 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2403 NAME_PENDING);
2404 if (!e)
2405 return ret;
2406
2407 bacpy(&cp.bdaddr, &e->data.bdaddr);
2408 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2409 &cp);
2410 ret = true;
2411 }
2412
2413 return ret;
2414}
2415
Kiran K9798fbd2021-09-07 15:42:44 +05302416static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2417 u16 opcode)
2418{
2419 bt_dev_dbg(hdev, "status %u", status);
2420}
2421
2422int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2423{
2424 struct hci_request req;
2425 int err;
2426 __u8 vnd_len, *vnd_data = NULL;
2427 struct hci_op_configure_data_path *cmd = NULL;
2428
2429 hci_req_init(&req, hdev);
2430
2431 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2432 &vnd_data);
2433 if (err < 0)
2434 goto error;
2435
2436 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2437 if (!cmd) {
2438 err = -ENOMEM;
2439 goto error;
2440 }
2441
2442 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2443 if (err < 0)
2444 goto error;
2445
2446 cmd->vnd_len = vnd_len;
2447 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2448
2449 cmd->direction = 0x00;
2450 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2451
2452 cmd->direction = 0x01;
2453 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2454
2455 err = hci_req_run(&req, config_data_path_complete);
2456error:
2457
2458 kfree(cmd);
2459 kfree(vnd_data);
2460 return err;
2461}
2462
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002463static int stop_discovery(struct hci_request *req, unsigned long opt)
2464{
2465 hci_dev_lock(req->hdev);
2466 hci_req_stop_discovery(req);
2467 hci_dev_unlock(req->hdev);
2468
2469 return 0;
2470}
2471
Johan Hedberge68f0722015-11-11 08:30:30 +02002472static void discov_update(struct work_struct *work)
2473{
2474 struct hci_dev *hdev = container_of(work, struct hci_dev,
2475 discov_update);
2476 u8 status = 0;
2477
2478 switch (hdev->discovery.state) {
2479 case DISCOVERY_STARTING:
2480 start_discovery(hdev, &status);
2481 mgmt_start_discovery_complete(hdev, status);
2482 if (status)
2483 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2484 else
2485 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2486 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002487 case DISCOVERY_STOPPING:
2488 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2489 mgmt_stop_discovery_complete(hdev, status);
2490 if (!status)
2491 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2492 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002493 case DISCOVERY_STOPPED:
2494 default:
2495 return;
2496 }
2497}
2498
Johan Hedbergc366f552015-11-23 15:43:06 +02002499static void discov_off(struct work_struct *work)
2500{
2501 struct hci_dev *hdev = container_of(work, struct hci_dev,
2502 discov_off.work);
2503
Howard Chung22fbcfc2020-11-11 15:02:19 +08002504 bt_dev_dbg(hdev, "");
Johan Hedbergc366f552015-11-23 15:43:06 +02002505
2506 hci_dev_lock(hdev);
2507
2508 /* When discoverable timeout triggers, then just make sure
2509 * the limited discoverable flag is cleared. Even in the case
2510 * of a timeout triggered from general discoverable, it is
2511 * safe to unconditionally clear the flag.
2512 */
2513 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2514 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2515 hdev->discov_timeout = 0;
2516
2517 hci_dev_unlock(hdev);
2518
2519 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2520 mgmt_new_settings(hdev);
2521}
2522
Johan Hedberg2ff13892015-11-25 16:15:44 +02002523static int powered_update_hci(struct hci_request *req, unsigned long opt)
2524{
2525 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002526 u8 link_sec;
2527
2528 hci_dev_lock(hdev);
2529
2530 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2531 !lmp_host_ssp_capable(hdev)) {
2532 u8 mode = 0x01;
2533
2534 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2535
2536 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2537 u8 support = 0x01;
2538
2539 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2540 sizeof(support), &support);
2541 }
2542 }
2543
2544 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2545 lmp_bredr_capable(hdev)) {
2546 struct hci_cp_write_le_host_supported cp;
2547
2548 cp.le = 0x01;
2549 cp.simul = 0x00;
2550
2551 /* Check first if we already have the right
2552 * host state (host features set)
2553 */
2554 if (cp.le != lmp_host_le_capable(hdev) ||
2555 cp.simul != lmp_host_le_br_capable(hdev))
2556 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2557 sizeof(cp), &cp);
2558 }
2559
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002560 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002561 /* Make sure the controller has a good default for
2562 * advertising data. This also applies to the case
2563 * where BR/EDR was toggled during the AUTO_OFF phase.
2564 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002565 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2566 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302567 int err;
2568
2569 if (ext_adv_capable(hdev)) {
2570 err = __hci_req_setup_ext_adv_instance(req,
2571 0x00);
2572 if (!err)
2573 __hci_req_update_scan_rsp_data(req,
2574 0x00);
2575 } else {
2576 err = 0;
2577 __hci_req_update_adv_data(req, 0x00);
2578 __hci_req_update_scan_rsp_data(req, 0x00);
2579 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002580
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302581 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302582 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302583 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302584 else if (!err)
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +03002585 __hci_req_enable_ext_advertising(req,
2586 0x00);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302587 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002588 } else if (!list_empty(&hdev->adv_instances)) {
2589 struct adv_info *adv_instance;
2590
Johan Hedberg2ff13892015-11-25 16:15:44 +02002591 adv_instance = list_first_entry(&hdev->adv_instances,
2592 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002593 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002594 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002595 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002596 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002597 }
2598
2599 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2600 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2601 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2602 sizeof(link_sec), &link_sec);
2603
2604 if (lmp_bredr_capable(hdev)) {
2605 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2606 __hci_req_write_fast_connectable(req, true);
2607 else
2608 __hci_req_write_fast_connectable(req, false);
2609 __hci_req_update_scan(req);
2610 __hci_req_update_class(req);
2611 __hci_req_update_name(req);
2612 __hci_req_update_eir(req);
2613 }
2614
2615 hci_dev_unlock(hdev);
2616 return 0;
2617}
2618
2619int __hci_req_hci_power_on(struct hci_dev *hdev)
2620{
2621 /* Register the available SMP channels (BR/EDR and LE) only when
2622 * successfully powering on the controller. This late
2623 * registration is required so that LE SMP can clearly decide if
2624 * the public address or static address is used.
2625 */
2626 smp_register(hdev);
2627
2628 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2629 NULL);
2630}
2631
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002632void hci_request_setup(struct hci_dev *hdev)
2633{
Johan Hedberge68f0722015-11-11 08:30:30 +02002634 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002635 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002636 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002637 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2638 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002639 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Howard Chungc4f1f402020-11-26 12:22:21 +08002640 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002641}
2642
2643void hci_request_cancel_all(struct hci_dev *hdev)
2644{
Benjamin Berg744451c2021-12-17 16:28:09 +01002645 __hci_cmd_sync_cancel(hdev, ENODEV);
Johan Hedberg7df0f732015-11-12 15:15:00 +02002646
Johan Hedberge68f0722015-11-11 08:30:30 +02002647 cancel_work_sync(&hdev->discov_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002648 cancel_work_sync(&hdev->scan_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002649 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002650 cancel_delayed_work_sync(&hdev->le_scan_disable);
2651 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002652
2653 if (hdev->adv_instance_timeout) {
2654 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2655 hdev->adv_instance_timeout = 0;
2656 }
Howard Chungc4f1f402020-11-26 12:22:21 +08002657
2658 cancel_interleave_scan(hdev);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002659}