blob: 3219ee66faadec1048d77e26dc3b9efd8c8ef8d1 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
Johan Hedbergbe91cd02015-11-10 09:44:54 +020030#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
Johan Hedberg0857dd32014-12-19 13:40:20 +020034void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
35{
36 skb_queue_head_init(&req->cmd_q);
37 req->hdev = hdev;
38 req->err = 0;
39}
40
Johan Hedberge62144872015-04-02 13:41:08 +030041static int req_run(struct hci_request *req, hci_req_complete_t complete,
42 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020043{
44 struct hci_dev *hdev = req->hdev;
45 struct sk_buff *skb;
46 unsigned long flags;
47
48 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
49
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
52 */
53 if (req->err) {
54 skb_queue_purge(&req->cmd_q);
55 return req->err;
56 }
57
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req->cmd_q))
60 return -ENODATA;
61
62 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020063 if (complete) {
64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020069
70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
72 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
73
74 queue_work(hdev->workqueue, &hdev->cmd_work);
75
76 return 0;
77}
78
Johan Hedberge62144872015-04-02 13:41:08 +030079int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
80{
81 return req_run(req, complete, NULL);
82}
83
84int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
85{
86 return req_run(req, NULL, complete);
87}
88
Johan Hedbergbe91cd02015-11-10 09:44:54 +020089static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
Johan Hedbergb5044302015-11-10 09:44:55 +0200103void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
Johan Hedberga1d01db2015-11-11 08:11:25 +0200201 err = func(&req, opt);
202 if (err) {
203 if (hci_status)
204 *hci_status = HCI_ERROR_UNSPECIFIED;
205 return err;
206 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207
208 add_wait_queue(&hdev->req_wait_q, &wait);
209 set_current_state(TASK_INTERRUPTIBLE);
210
211 err = hci_req_run_skb(&req, hci_req_sync_complete);
212 if (err < 0) {
213 hdev->req_status = 0;
214
215 remove_wait_queue(&hdev->req_wait_q, &wait);
216 set_current_state(TASK_RUNNING);
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
223 if (err == -ENODATA)
224 return 0;
225
226 return err;
227 }
228
229 schedule_timeout(timeout);
230
231 remove_wait_queue(&hdev->req_wait_q, &wait);
232
233 if (signal_pending(current))
234 return -EINTR;
235
236 switch (hdev->req_status) {
237 case HCI_REQ_DONE:
238 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200239 if (hci_status)
240 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200241 break;
242
243 case HCI_REQ_CANCELED:
244 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200245 if (hci_status)
246 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200247 break;
248
249 default:
250 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200251 if (hci_status)
252 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200253 break;
254 }
255
256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
Johan Hedberga1d01db2015-11-11 08:11:25 +0200263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200265 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200273 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200275 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276
277 return ret;
278}
279
Johan Hedberg0857dd32014-12-19 13:40:20 +0200280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
291 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
296 memcpy(skb_put(skb, plen), param, plen);
297
298 BT_DBG("skb len %d", skb->len);
299
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
323 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
324 hdev->name, opcode);
325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100332 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
343void hci_req_add_le_scan_disable(struct hci_request *req)
344{
345 struct hci_cp_le_set_scan_enable cp;
346
347 memset(&cp, 0, sizeof(cp));
348 cp.enable = LE_SCAN_DISABLE;
349 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
350}
351
352static void add_to_white_list(struct hci_request *req,
353 struct hci_conn_params *params)
354{
355 struct hci_cp_le_add_to_white_list cp;
356
357 cp.bdaddr_type = params->addr_type;
358 bacpy(&cp.bdaddr, &params->addr);
359
360 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
361}
362
363static u8 update_white_list(struct hci_request *req)
364{
365 struct hci_dev *hdev = req->hdev;
366 struct hci_conn_params *params;
367 struct bdaddr_list *b;
368 uint8_t white_list_entries = 0;
369
370 /* Go through the current white list programmed into the
371 * controller one by one and check if that address is still
372 * in the list of pending connections or list of devices to
373 * report. If not present in either list, then queue the
374 * command to remove it from the controller.
375 */
376 list_for_each_entry(b, &hdev->le_white_list, list) {
377 struct hci_cp_le_del_from_white_list cp;
378
379 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
380 &b->bdaddr, b->bdaddr_type) ||
381 hci_pend_le_action_lookup(&hdev->pend_le_reports,
382 &b->bdaddr, b->bdaddr_type)) {
383 white_list_entries++;
384 continue;
385 }
386
387 cp.bdaddr_type = b->bdaddr_type;
388 bacpy(&cp.bdaddr, &b->bdaddr);
389
390 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
391 sizeof(cp), &cp);
392 }
393
394 /* Since all no longer valid white list entries have been
395 * removed, walk through the list of pending connections
396 * and ensure that any new device gets programmed into
397 * the controller.
398 *
399 * If the list of the devices is larger than the list of
400 * available white list entries in the controller, then
401 * just abort and return filer policy value to not use the
402 * white list.
403 */
404 list_for_each_entry(params, &hdev->pend_le_conns, action) {
405 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
406 &params->addr, params->addr_type))
407 continue;
408
409 if (white_list_entries >= hdev->le_white_list_size) {
410 /* Select filter policy to accept all advertising */
411 return 0x00;
412 }
413
414 if (hci_find_irk_by_addr(hdev, &params->addr,
415 params->addr_type)) {
416 /* White list can not be used with RPAs */
417 return 0x00;
418 }
419
420 white_list_entries++;
421 add_to_white_list(req, params);
422 }
423
424 /* After adding all new pending connections, walk through
425 * the list of pending reports and also add these to the
426 * white list if there is still space.
427 */
428 list_for_each_entry(params, &hdev->pend_le_reports, action) {
429 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
430 &params->addr, params->addr_type))
431 continue;
432
433 if (white_list_entries >= hdev->le_white_list_size) {
434 /* Select filter policy to accept all advertising */
435 return 0x00;
436 }
437
438 if (hci_find_irk_by_addr(hdev, &params->addr,
439 params->addr_type)) {
440 /* White list can not be used with RPAs */
441 return 0x00;
442 }
443
444 white_list_entries++;
445 add_to_white_list(req, params);
446 }
447
448 /* Select filter policy to use white list */
449 return 0x01;
450}
451
452void hci_req_add_le_passive_scan(struct hci_request *req)
453{
454 struct hci_cp_le_set_scan_param param_cp;
455 struct hci_cp_le_set_scan_enable enable_cp;
456 struct hci_dev *hdev = req->hdev;
457 u8 own_addr_type;
458 u8 filter_policy;
459
460 /* Set require_privacy to false since no SCAN_REQ are send
461 * during passive scanning. Not using an non-resolvable address
462 * here is important so that peer devices using direct
463 * advertising with our address will be correctly reported
464 * by the controller.
465 */
466 if (hci_update_random_address(req, false, &own_addr_type))
467 return;
468
469 /* Adding or removing entries from the white list must
470 * happen before enabling scanning. The controller does
471 * not allow white list modification while scanning.
472 */
473 filter_policy = update_white_list(req);
474
475 /* When the controller is using random resolvable addresses and
476 * with that having LE privacy enabled, then controllers with
477 * Extended Scanner Filter Policies support can now enable support
478 * for handling directed advertising.
479 *
480 * So instead of using filter polices 0x00 (no whitelist)
481 * and 0x01 (whitelist enabled) use the new filter policies
482 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
483 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700484 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200485 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
486 filter_policy |= 0x02;
487
488 memset(&param_cp, 0, sizeof(param_cp));
489 param_cp.type = LE_SCAN_PASSIVE;
490 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
491 param_cp.window = cpu_to_le16(hdev->le_scan_window);
492 param_cp.own_address_type = own_addr_type;
493 param_cp.filter_policy = filter_policy;
494 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
495 &param_cp);
496
497 memset(&enable_cp, 0, sizeof(enable_cp));
498 enable_cp.enable = LE_SCAN_ENABLE;
499 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
500 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
501 &enable_cp);
502}
503
504static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
505{
506 struct hci_dev *hdev = req->hdev;
507
508 /* If we're advertising or initiating an LE connection we can't
509 * go ahead and change the random address at this time. This is
510 * because the eventual initiator address used for the
511 * subsequently created connection will be undefined (some
512 * controllers use the new address and others the one we had
513 * when the operation started).
514 *
515 * In this kind of scenario skip the update and let the random
516 * address be updated at the next cycle.
517 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700518 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200519 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200520 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700521 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200522 return;
523 }
524
525 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
526}
527
528int hci_update_random_address(struct hci_request *req, bool require_privacy,
529 u8 *own_addr_type)
530{
531 struct hci_dev *hdev = req->hdev;
532 int err;
533
534 /* If privacy is enabled use a resolvable private address. If
535 * current RPA has expired or there is something else than
536 * the current RPA in use, then generate a new one.
537 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700538 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200539 int to;
540
541 *own_addr_type = ADDR_LE_DEV_RANDOM;
542
Marcel Holtmanna69d8922015-03-13 02:11:05 -0700543 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200544 !bacmp(&hdev->random_addr, &hdev->rpa))
545 return 0;
546
547 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
548 if (err < 0) {
549 BT_ERR("%s failed to generate new RPA", hdev->name);
550 return err;
551 }
552
553 set_random_addr(req, &hdev->rpa);
554
555 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
556 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
557
558 return 0;
559 }
560
561 /* In case of required privacy without resolvable private address,
562 * use an non-resolvable private address. This is useful for active
563 * scanning and non-connectable advertising.
564 */
565 if (require_privacy) {
566 bdaddr_t nrpa;
567
568 while (true) {
569 /* The non-resolvable private address is generated
570 * from random six bytes with the two most significant
571 * bits cleared.
572 */
573 get_random_bytes(&nrpa, 6);
574 nrpa.b[5] &= 0x3f;
575
576 /* The non-resolvable private address shall not be
577 * equal to the public address.
578 */
579 if (bacmp(&hdev->bdaddr, &nrpa))
580 break;
581 }
582
583 *own_addr_type = ADDR_LE_DEV_RANDOM;
584 set_random_addr(req, &nrpa);
585 return 0;
586 }
587
588 /* If forcing static address is in use or there is no public
589 * address use the static address as random address (but skip
590 * the HCI command if the current random address is already the
591 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100592 *
593 * In case BR/EDR has been disabled on a dual-mode controller
594 * and a static address has been configured, then use that
595 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200596 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700597 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100598 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700599 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100600 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200601 *own_addr_type = ADDR_LE_DEV_RANDOM;
602 if (bacmp(&hdev->static_addr, &hdev->random_addr))
603 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
604 &hdev->static_addr);
605 return 0;
606 }
607
608 /* Neither privacy nor static address is being used so use a
609 * public address.
610 */
611 *own_addr_type = ADDR_LE_DEV_PUBLIC;
612
613 return 0;
614}
Johan Hedberg2cf22212014-12-19 22:26:00 +0200615
Johan Hedberg405a2612014-12-19 23:18:22 +0200616static bool disconnected_whitelist_entries(struct hci_dev *hdev)
617{
618 struct bdaddr_list *b;
619
620 list_for_each_entry(b, &hdev->whitelist, list) {
621 struct hci_conn *conn;
622
623 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
624 if (!conn)
625 return true;
626
627 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
628 return true;
629 }
630
631 return false;
632}
633
634void __hci_update_page_scan(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 scan;
638
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700639 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +0200640 return;
641
642 if (!hdev_is_powered(hdev))
643 return;
644
645 if (mgmt_powering_down(hdev))
646 return;
647
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700648 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +0200649 disconnected_whitelist_entries(hdev))
650 scan = SCAN_PAGE;
651 else
652 scan = SCAN_DISABLED;
653
654 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
655 return;
656
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700657 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +0200658 scan |= SCAN_INQUIRY;
659
660 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
661}
662
663void hci_update_page_scan(struct hci_dev *hdev)
664{
665 struct hci_request req;
666
667 hci_req_init(&req, hdev);
668 __hci_update_page_scan(&req);
669 hci_req_run(&req, NULL);
670}
671
Johan Hedberg2cf22212014-12-19 22:26:00 +0200672/* This function controls the background scanning based on hdev->pend_le_conns
673 * list. If there are pending LE connection we start the background scanning,
674 * otherwise we stop it.
675 *
676 * This function requires the caller holds hdev->lock.
677 */
Johan Hedberg145a0912015-11-11 08:11:22 +0200678static void __hci_update_background_scan(struct hci_request *req)
Johan Hedberg2cf22212014-12-19 22:26:00 +0200679{
680 struct hci_dev *hdev = req->hdev;
Johan Hedberg2cf22212014-12-19 22:26:00 +0200681
682 if (!test_bit(HCI_UP, &hdev->flags) ||
683 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700684 hci_dev_test_flag(hdev, HCI_SETUP) ||
685 hci_dev_test_flag(hdev, HCI_CONFIG) ||
686 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
687 hci_dev_test_flag(hdev, HCI_UNREGISTER))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200688 return;
689
690 /* No point in doing scanning if LE support hasn't been enabled */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700691 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200692 return;
693
694 /* If discovery is active don't interfere with it */
695 if (hdev->discovery.state != DISCOVERY_STOPPED)
696 return;
697
698 /* Reset RSSI and UUID filters when starting background scanning
699 * since these filters are meant for service discovery only.
700 *
701 * The Start Discovery and Start Service Discovery operations
702 * ensure to set proper values for RSSI threshold and UUID
703 * filter list. So it is safe to just reset them here.
704 */
705 hci_discovery_filter_clear(hdev);
706
707 if (list_empty(&hdev->pend_le_conns) &&
708 list_empty(&hdev->pend_le_reports)) {
709 /* If there is no pending LE connections or devices
710 * to be scanned for, we should stop the background
711 * scanning.
712 */
713
714 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700715 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200716 return;
717
718 hci_req_add_le_scan_disable(req);
719
720 BT_DBG("%s stopping background scanning", hdev->name);
721 } else {
722 /* If there is at least one pending LE connection, we should
723 * keep the background scan running.
724 */
725
726 /* If controller is connecting, we should not start scanning
727 * since some controllers are not able to scan and connect at
728 * the same time.
729 */
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200730 if (hci_lookup_le_connect(hdev))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200731 return;
732
733 /* If controller is currently scanning, we stop it to ensure we
734 * don't miss any advertising (due to duplicates filter).
735 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700736 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200737 hci_req_add_le_scan_disable(req);
738
739 hci_req_add_le_passive_scan(req);
740
741 BT_DBG("%s starting background scanning", hdev->name);
742 }
743}
744
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +0300745void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
746 u8 reason)
747{
748 switch (conn->state) {
749 case BT_CONNECTED:
750 case BT_CONFIG:
751 if (conn->type == AMP_LINK) {
752 struct hci_cp_disconn_phy_link cp;
753
754 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
755 cp.reason = reason;
756 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
757 &cp);
758 } else {
759 struct hci_cp_disconnect dc;
760
761 dc.handle = cpu_to_le16(conn->handle);
762 dc.reason = reason;
763 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
764 }
765
766 conn->state = BT_DISCONN;
767
768 break;
769 case BT_CONNECT:
770 if (conn->type == LE_LINK) {
771 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
772 break;
773 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
774 0, NULL);
775 } else if (conn->type == ACL_LINK) {
776 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
777 break;
778 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
779 6, &conn->dst);
780 }
781 break;
782 case BT_CONNECT2:
783 if (conn->type == ACL_LINK) {
784 struct hci_cp_reject_conn_req rej;
785
786 bacpy(&rej.bdaddr, &conn->dst);
787 rej.reason = reason;
788
789 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
790 sizeof(rej), &rej);
791 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
792 struct hci_cp_reject_sync_conn_req rej;
793
794 bacpy(&rej.bdaddr, &conn->dst);
795
796 /* SCO rejection has its own limited set of
797 * allowed error values (0x0D-0x0F) which isn't
798 * compatible with most values passed to this
799 * function. To be safe hard-code one of the
800 * values that's suitable for SCO.
801 */
802 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
803
804 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
805 sizeof(rej), &rej);
806 }
807 break;
808 default:
809 conn->state = BT_CLOSED;
810 break;
811 }
812}
813
814static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
815{
816 if (status)
817 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
818}
819
820int hci_abort_conn(struct hci_conn *conn, u8 reason)
821{
822 struct hci_request req;
823 int err;
824
825 hci_req_init(&req, conn->hdev);
826
827 __hci_abort_conn(&req, conn, reason);
828
829 err = hci_req_run(&req, abort_conn_complete);
830 if (err && err != -ENODATA) {
831 BT_ERR("Failed to run HCI request: err %d", err);
832 return err;
833 }
834
835 return 0;
836}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +0200837
Johan Hedberga1d01db2015-11-11 08:11:25 +0200838static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +0200839{
840 hci_dev_lock(req->hdev);
841 __hci_update_background_scan(req);
842 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200843 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200844}
845
846static void bg_scan_update(struct work_struct *work)
847{
848 struct hci_dev *hdev = container_of(work, struct hci_dev,
849 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +0200850 struct hci_conn *conn;
851 u8 status;
852 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200853
Johan Hedberg84235d22015-11-11 08:11:20 +0200854 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
855 if (!err)
856 return;
857
858 hci_dev_lock(hdev);
859
860 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
861 if (conn)
862 hci_le_conn_failed(conn, status);
863
864 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +0200865}
866
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200867static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
868{
869 if (status) {
870 BT_ERR("Failed to start inquiry: status %d", status);
871
872 hci_dev_lock(hdev);
873 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
874 hci_dev_unlock(hdev);
875 return;
876 }
877}
878
879static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
880{
881 /* General inquiry access code (GIAC) */
882 u8 lap[3] = { 0x33, 0x8b, 0x9e };
883 struct hci_cp_inquiry cp;
884 int err;
885
886 if (status) {
887 BT_ERR("Failed to disable LE scanning: status %d", status);
888 return;
889 }
890
891 hdev->discovery.scan_start = 0;
892
893 switch (hdev->discovery.type) {
894 case DISCOV_TYPE_LE:
895 hci_dev_lock(hdev);
896 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
897 hci_dev_unlock(hdev);
898 break;
899
900 case DISCOV_TYPE_INTERLEAVED:
901 hci_dev_lock(hdev);
902
903 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
904 &hdev->quirks)) {
905 /* If we were running LE only scan, change discovery
906 * state. If we were running both LE and BR/EDR inquiry
907 * simultaneously, and BR/EDR inquiry is already
908 * finished, stop discovery, otherwise BR/EDR inquiry
909 * will stop discovery when finished. If we will resolve
910 * remote device name, do not change discovery state.
911 */
912 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
913 hdev->discovery.state != DISCOVERY_RESOLVING)
914 hci_discovery_set_state(hdev,
915 DISCOVERY_STOPPED);
916 } else {
917 struct hci_request req;
918
919 hci_inquiry_cache_flush(hdev);
920
921 hci_req_init(&req, hdev);
922
923 memset(&cp, 0, sizeof(cp));
924 memcpy(&cp.lap, lap, sizeof(cp.lap));
925 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
926 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
927
928 err = hci_req_run(&req, inquiry_complete);
929 if (err) {
930 BT_ERR("Inquiry request failed: err %d", err);
931 hci_discovery_set_state(hdev,
932 DISCOVERY_STOPPED);
933 }
934 }
935
936 hci_dev_unlock(hdev);
937 break;
938 }
939}
940
Johan Hedberga1d01db2015-11-11 08:11:25 +0200941static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200942{
943 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200944 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200945}
946
947static void le_scan_disable_work(struct work_struct *work)
948{
949 struct hci_dev *hdev = container_of(work, struct hci_dev,
950 le_scan_disable.work);
951 u8 status;
952 int err;
953
954 BT_DBG("%s", hdev->name);
955
956 cancel_delayed_work(&hdev->le_scan_restart);
957
958 err = hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
959 if (err)
960 return;
961
962 le_scan_disable_work_complete(hdev, status);
963}
964
965static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status)
966{
967 unsigned long timeout, duration, scan_start, now;
968
969 BT_DBG("%s", hdev->name);
970
971 if (status) {
972 BT_ERR("Failed to restart LE scan: status %d", status);
973 return;
974 }
975
976 hci_dev_lock(hdev);
977
978 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
979 !hdev->discovery.scan_start)
980 goto unlock;
981
982 /* When the scan was started, hdev->le_scan_disable has been queued
983 * after duration from scan_start. During scan restart this job
984 * has been canceled, and we need to queue it again after proper
985 * timeout, to make sure that scan does not run indefinitely.
986 */
987 duration = hdev->discovery.scan_duration;
988 scan_start = hdev->discovery.scan_start;
989 now = jiffies;
990 if (now - scan_start <= duration) {
991 int elapsed;
992
993 if (now >= scan_start)
994 elapsed = now - scan_start;
995 else
996 elapsed = ULONG_MAX - scan_start + now;
997
998 timeout = duration - elapsed;
999 } else {
1000 timeout = 0;
1001 }
1002
1003 queue_delayed_work(hdev->req_workqueue,
1004 &hdev->le_scan_disable, timeout);
1005
1006unlock:
1007 hci_dev_unlock(hdev);
1008}
1009
Johan Hedberga1d01db2015-11-11 08:11:25 +02001010static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001011{
1012 struct hci_dev *hdev = req->hdev;
1013 struct hci_cp_le_set_scan_enable cp;
1014
1015 /* If controller is not scanning we are done. */
1016 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberga1d01db2015-11-11 08:11:25 +02001017 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001018
1019 hci_req_add_le_scan_disable(req);
1020
1021 memset(&cp, 0, sizeof(cp));
1022 cp.enable = LE_SCAN_ENABLE;
1023 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1024 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001025
1026 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001027}
1028
1029static void le_scan_restart_work(struct work_struct *work)
1030{
1031 struct hci_dev *hdev = container_of(work, struct hci_dev,
1032 le_scan_restart.work);
1033 u8 status;
1034 int err;
1035
1036 BT_DBG("%s", hdev->name);
1037
1038 err = hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1039 if (err)
1040 return;
1041
1042 le_scan_restart_work_complete(hdev, status);
1043}
1044
Johan Hedberge68f0722015-11-11 08:30:30 +02001045static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1046{
1047 struct hci_cp_inquiry cp;
1048 /* General inquiry access code (GIAC) */
1049 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1050
1051 BT_DBG("%s", req->hdev->name);
1052
1053 hci_dev_lock(req->hdev);
1054 hci_inquiry_cache_flush(req->hdev);
1055 hci_dev_unlock(req->hdev);
1056
1057 memset(&cp, 0, sizeof(cp));
1058 memcpy(&cp.lap, lap, sizeof(cp.lap));
1059 cp.length = DISCOV_BREDR_INQUIRY_LEN;
1060
1061 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1062
1063 return 0;
1064}
1065
1066static void cancel_adv_timeout(struct hci_dev *hdev)
1067{
1068 if (hdev->adv_instance_timeout) {
1069 hdev->adv_instance_timeout = 0;
1070 cancel_delayed_work(&hdev->adv_instance_expire);
1071 }
1072}
1073
1074static void disable_advertising(struct hci_request *req)
1075{
1076 u8 enable = 0x00;
1077
1078 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1079}
1080
1081static int active_scan(struct hci_request *req, unsigned long opt)
1082{
1083 uint16_t interval = opt;
1084 struct hci_dev *hdev = req->hdev;
1085 struct hci_cp_le_set_scan_param param_cp;
1086 struct hci_cp_le_set_scan_enable enable_cp;
1087 u8 own_addr_type;
1088 int err;
1089
1090 BT_DBG("%s", hdev->name);
1091
1092 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1093 hci_dev_lock(hdev);
1094
1095 /* Don't let discovery abort an outgoing connection attempt
1096 * that's using directed advertising.
1097 */
1098 if (hci_lookup_le_connect(hdev)) {
1099 hci_dev_unlock(hdev);
1100 return -EBUSY;
1101 }
1102
1103 cancel_adv_timeout(hdev);
1104 hci_dev_unlock(hdev);
1105
1106 disable_advertising(req);
1107 }
1108
1109 /* If controller is scanning, it means the background scanning is
1110 * running. Thus, we should temporarily stop it in order to set the
1111 * discovery scanning parameters.
1112 */
1113 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1114 hci_req_add_le_scan_disable(req);
1115
1116 /* All active scans will be done with either a resolvable private
1117 * address (when privacy feature has been enabled) or non-resolvable
1118 * private address.
1119 */
1120 err = hci_update_random_address(req, true, &own_addr_type);
1121 if (err < 0)
1122 own_addr_type = ADDR_LE_DEV_PUBLIC;
1123
1124 memset(&param_cp, 0, sizeof(param_cp));
1125 param_cp.type = LE_SCAN_ACTIVE;
1126 param_cp.interval = cpu_to_le16(interval);
1127 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1128 param_cp.own_address_type = own_addr_type;
1129
1130 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1131 &param_cp);
1132
1133 memset(&enable_cp, 0, sizeof(enable_cp));
1134 enable_cp.enable = LE_SCAN_ENABLE;
1135 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1136
1137 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1138 &enable_cp);
1139
1140 return 0;
1141}
1142
1143static int interleaved_discov(struct hci_request *req, unsigned long opt)
1144{
1145 int err;
1146
1147 BT_DBG("%s", req->hdev->name);
1148
1149 err = active_scan(req, opt);
1150 if (err)
1151 return err;
1152
1153 return bredr_inquiry(req, opt);
1154}
1155
1156static void start_discovery(struct hci_dev *hdev, u8 *status)
1157{
1158 unsigned long timeout;
1159
1160 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1161
1162 switch (hdev->discovery.type) {
1163 case DISCOV_TYPE_BREDR:
1164 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
1165 hci_req_sync(hdev, bredr_inquiry, 0, HCI_CMD_TIMEOUT,
1166 status);
1167 return;
1168 case DISCOV_TYPE_INTERLEAVED:
1169 /* When running simultaneous discovery, the LE scanning time
1170 * should occupy the whole discovery time sine BR/EDR inquiry
1171 * and LE scanning are scheduled by the controller.
1172 *
1173 * For interleaving discovery in comparison, BR/EDR inquiry
1174 * and LE scanning are done sequentially with separate
1175 * timeouts.
1176 */
1177 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1178 &hdev->quirks)) {
1179 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1180 /* During simultaneous discovery, we double LE scan
1181 * interval. We must leave some time for the controller
1182 * to do BR/EDR inquiry.
1183 */
1184 hci_req_sync(hdev, interleaved_discov,
1185 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1186 status);
1187 break;
1188 }
1189
1190 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1191 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1192 HCI_CMD_TIMEOUT, status);
1193 break;
1194 case DISCOV_TYPE_LE:
1195 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1196 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1197 HCI_CMD_TIMEOUT, status);
1198 break;
1199 default:
1200 *status = HCI_ERROR_UNSPECIFIED;
1201 return;
1202 }
1203
1204 if (*status)
1205 return;
1206
1207 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1208
1209 /* When service discovery is used and the controller has a
1210 * strict duplicate filter, it is important to remember the
1211 * start and duration of the scan. This is required for
1212 * restarting scanning during the discovery phase.
1213 */
1214 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1215 hdev->discovery.result_filtering) {
1216 hdev->discovery.scan_start = jiffies;
1217 hdev->discovery.scan_duration = timeout;
1218 }
1219
1220 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1221 timeout);
1222}
1223
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001224bool hci_req_stop_discovery(struct hci_request *req)
1225{
1226 struct hci_dev *hdev = req->hdev;
1227 struct discovery_state *d = &hdev->discovery;
1228 struct hci_cp_remote_name_req_cancel cp;
1229 struct inquiry_entry *e;
1230 bool ret = false;
1231
1232 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1233
1234 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1235 if (test_bit(HCI_INQUIRY, &hdev->flags))
1236 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1237
1238 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1239 cancel_delayed_work(&hdev->le_scan_disable);
1240 hci_req_add_le_scan_disable(req);
1241 }
1242
1243 ret = true;
1244 } else {
1245 /* Passive scanning */
1246 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1247 hci_req_add_le_scan_disable(req);
1248 ret = true;
1249 }
1250 }
1251
1252 /* No further actions needed for LE-only discovery */
1253 if (d->type == DISCOV_TYPE_LE)
1254 return ret;
1255
1256 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1257 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1258 NAME_PENDING);
1259 if (!e)
1260 return ret;
1261
1262 bacpy(&cp.bdaddr, &e->data.bdaddr);
1263 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1264 &cp);
1265 ret = true;
1266 }
1267
1268 return ret;
1269}
1270
1271static int stop_discovery(struct hci_request *req, unsigned long opt)
1272{
1273 hci_dev_lock(req->hdev);
1274 hci_req_stop_discovery(req);
1275 hci_dev_unlock(req->hdev);
1276
1277 return 0;
1278}
1279
Johan Hedberge68f0722015-11-11 08:30:30 +02001280static void discov_update(struct work_struct *work)
1281{
1282 struct hci_dev *hdev = container_of(work, struct hci_dev,
1283 discov_update);
1284 u8 status = 0;
1285
1286 switch (hdev->discovery.state) {
1287 case DISCOVERY_STARTING:
1288 start_discovery(hdev, &status);
1289 mgmt_start_discovery_complete(hdev, status);
1290 if (status)
1291 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1292 else
1293 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1294 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001295 case DISCOVERY_STOPPING:
1296 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1297 mgmt_stop_discovery_complete(hdev, status);
1298 if (!status)
1299 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1300 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02001301 case DISCOVERY_STOPPED:
1302 default:
1303 return;
1304 }
1305}
1306
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001307void hci_request_setup(struct hci_dev *hdev)
1308{
Johan Hedberge68f0722015-11-11 08:30:30 +02001309 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001310 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001311 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1312 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001313}
1314
1315void hci_request_cancel_all(struct hci_dev *hdev)
1316{
Johan Hedberge68f0722015-11-11 08:30:30 +02001317 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001318 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001319 cancel_delayed_work_sync(&hdev->le_scan_disable);
1320 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001321}