blob: 6a12e84c66c40428fd177705e8d4ccbb44b17358 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2014 Intel Corporation
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
Michał Narajowski1b422062016-10-05 12:28:27 +020023#include <asm/unaligned.h>
24
Johan Hedbergb5044302015-11-10 09:44:55 +020025#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
26#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
Johan Hedbergbe91cd02015-11-10 09:44:54 +020027
Johan Hedberg0857dd32014-12-19 13:40:20 +020028struct hci_request {
29 struct hci_dev *hdev;
30 struct sk_buff_head cmd_q;
31
32 /* If something goes wrong when building the HCI request, the error
33 * value is stored in this field.
34 */
35 int err;
36};
37
38void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053039void hci_req_purge(struct hci_request *req);
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080040bool hci_req_status_pend(struct hci_dev *hdev);
Johan Hedberg0857dd32014-12-19 13:40:20 +020041int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
Johan Hedberge62144872015-04-02 13:41:08 +030042int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
Johan Hedberg0857dd32014-12-19 13:40:20 +020043void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
44 const void *param);
45void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
46 const void *param, u8 event);
Johan Hedberge62144872015-04-02 13:41:08 +030047void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
48 hci_req_complete_t *req_complete,
49 hci_req_complete_skb_t *req_complete_skb);
Johan Hedberg0857dd32014-12-19 13:40:20 +020050
Johan Hedberga1d01db2015-11-11 08:11:25 +020051int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
52 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +020053 unsigned long opt, u32 timeout, u8 *hci_status);
Johan Hedberga1d01db2015-11-11 08:11:25 +020054int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
55 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +020056 unsigned long opt, u32 timeout, u8 *hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +020057void hci_req_sync_cancel(struct hci_dev *hdev, int err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +020058
Johan Hedberg0857dd32014-12-19 13:40:20 +020059struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
60 const void *param);
61
Johan Hedberg2ff13892015-11-25 16:15:44 +020062int __hci_req_hci_power_on(struct hci_dev *hdev);
63
Johan Hedbergbf943cb2015-11-25 16:15:43 +020064void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
Johan Hedberg00cf5042015-11-25 16:15:41 +020065void __hci_req_update_name(struct hci_request *req);
Johan Hedbergb1a89172015-11-25 16:15:42 +020066void __hci_req_update_eir(struct hci_request *req);
Johan Hedberg00cf5042015-11-25 16:15:41 +020067
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +053068void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
Johan Hedberg0857dd32014-12-19 13:40:20 +020069void hci_req_add_le_passive_scan(struct hci_request *req);
70
Abhishek Pandit-Subedi9952d902020-03-11 08:54:00 -070071void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
72
Sathish Narasimman5c49bcc2020-07-23 18:09:01 +053073void hci_req_disable_address_resolution(struct hci_dev *hdev);
Johan Hedbergf2252572015-11-18 12:49:20 +020074void hci_req_reenable_advertising(struct hci_dev *hdev);
75void __hci_req_enable_advertising(struct hci_request *req);
76void __hci_req_disable_advertising(struct hci_request *req);
Johan Hedbergcab054a2015-11-30 11:21:45 +020077void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
78int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
79void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
Johan Hedbergf2252572015-11-18 12:49:20 +020080
81int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
82 bool force);
Johan Hedberg37d3a1f2016-08-28 20:53:34 +030083void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
84 struct hci_request *req, u8 instance,
85 bool force);
Johan Hedbergf2252572015-11-18 12:49:20 +020086
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +053087int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +053088int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
Luiz Augusto von Dentz1d0fac22019-06-03 13:48:42 +030089int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
Daniel Winkler37adf702020-07-14 14:16:00 -070090int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance);
91int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance);
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +053092void __hci_req_clear_ext_adv_sets(struct hci_request *req);
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +053093int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
94 bool use_rpa, struct adv_info *adv_instance,
95 u8 *own_addr_type, bdaddr_t *rand_addr);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +053096
Johan Hedberg14bf5ea2015-11-22 19:00:22 +020097void __hci_req_update_class(struct hci_request *req);
98
Johan Hedberg2154d3f2015-11-11 08:30:45 +020099/* Returns true if HCI commands were queued */
100bool hci_req_stop_discovery(struct hci_request *req);
101
Johan Hedberg01b1cb82015-11-16 12:52:21 +0200102static inline void hci_req_update_scan(struct hci_dev *hdev)
103{
104 queue_work(hdev->req_workqueue, &hdev->scan_update);
105}
106
107void __hci_req_update_scan(struct hci_request *req);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200108
109int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200110 bool use_rpa, u8 *own_addr_type);
Johan Hedberg2cf22212014-12-19 22:26:00 +0200111
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +0300112int hci_abort_conn(struct hci_conn *conn, u8 reason);
113void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
114 u8 reason);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +0200115
Johan Hedberg2e93e532015-11-11 08:11:17 +0200116static inline void hci_update_background_scan(struct hci_dev *hdev)
117{
118 queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
119}
120
Johan Hedberg5fc16cc2015-11-11 08:11:16 +0200121void hci_request_setup(struct hci_dev *hdev);
122void hci_request_cancel_all(struct hci_dev *hdev);
Michał Narajowski1b422062016-10-05 12:28:27 +0200123
Michał Narajowskif61851f2016-10-19 10:20:27 +0200124u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
125
Michał Narajowski1b422062016-10-05 12:28:27 +0200126static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
127 u8 *data, u8 data_len)
128{
129 eir[eir_len++] = sizeof(type) + data_len;
130 eir[eir_len++] = type;
131 memcpy(&eir[eir_len], data, data_len);
132 eir_len += data_len;
133
134 return eir_len;
135}
136
137static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
138{
139 eir[eir_len++] = sizeof(type) + sizeof(data);
140 eir[eir_len++] = type;
141 put_unaligned_le16(data, &eir[eir_len]);
142 eir_len += sizeof(data);
143
144 return eir_len;
145}