blob: 9bf30db89d894456c0eb7a3b4199d98ce5ce0cf2 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020033#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Johan Hedberg801c1e82015-03-06 21:08:50 +020037static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
Marcel Holtmanncd82e612012-02-20 20:34:38 +010040static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* ----- HCI socket interface ----- */
43
Marcel Holtmann863def52014-07-11 05:41:00 +020044/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070053 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020054};
55
Marcel Holtmann6befc642015-03-14 19:27:53 -070056void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
Marcel Holtmannc85be542015-03-14 19:28:00 -070066int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
Johan Hedbergd0f172b2015-03-17 13:48:46 +020071unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
Jiri Slaby93919762015-02-19 15:20:43 +010076static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Jiri Slaby93919762015-02-19 15:20:43 +010078 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020082#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
Marcel Holtmann7e67c112014-07-11 05:36:40 +020090static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* Packet types */
92 0x10,
93 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020094 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020099 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113};
114
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100159 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100164
Sasha Levinb67bfe02013-02-27 17:06:00 -0800165 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200187 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100190 if (!skb_copy) {
191 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200201 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 continue;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100209
210 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211}
212
Johan Hedberg71290692015-02-20 13:26:23 +0200213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700215 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100216{
217 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100218
Johan Hedberg71290692015-02-20 13:26:23 +0200219 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100220
221 read_lock(&hci_sk_list.lock);
222
Sasha Levinb67bfe02013-02-27 17:06:00 -0800223 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100224 struct sk_buff *nskb;
225
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700226 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700227 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700228 continue;
229
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100230 /* Skip the original socket */
231 if (sk == skip_sk)
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
Johan Hedberg71290692015-02-20 13:26:23 +0200237 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100254 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800255 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100266 break;
267 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100273 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100279 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100281 break;
282 default:
283 return;
284 }
285
Marcel Holtmann2b531292015-01-11 19:33:31 -0800286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100299 kfree_skb(skb_copy);
300}
301
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200306 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100307 struct sk_buff *skb;
308 __le16 opcode;
309
310 switch (event) {
311 case HCI_DEV_REG:
312 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313 if (!skb)
314 return NULL;
315
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200316 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100317 ni->type = hdev->dev_type;
318 ni->bus = hdev->bus;
319 bacpy(&ni->bdaddr, &hdev->bdaddr);
320 memcpy(ni->name, hdev->name, 8);
321
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700322 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100323 break;
324
325 case HCI_DEV_UNREG:
326 skb = bt_skb_alloc(0, GFP_ATOMIC);
327 if (!skb)
328 return NULL;
329
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700330 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100331 break;
332
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200333 case HCI_DEV_UP:
334 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
335 if (!skb)
336 return NULL;
337
338 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
339 bacpy(&ii->bdaddr, &hdev->bdaddr);
340 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
341
342 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
343 break;
344
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200345 case HCI_DEV_OPEN:
346 skb = bt_skb_alloc(0, GFP_ATOMIC);
347 if (!skb)
348 return NULL;
349
350 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
351 break;
352
353 case HCI_DEV_CLOSE:
354 skb = bt_skb_alloc(0, GFP_ATOMIC);
355 if (!skb)
356 return NULL;
357
358 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
359 break;
360
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100361 default:
362 return NULL;
363 }
364
365 __net_timestamp(skb);
366
367 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
368 hdr->opcode = opcode;
369 hdr->index = cpu_to_le16(hdev->id);
370 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
371
372 return skb;
373}
374
375static void send_monitor_replay(struct sock *sk)
376{
377 struct hci_dev *hdev;
378
379 read_lock(&hci_dev_list_lock);
380
381 list_for_each_entry(hdev, &hci_dev_list, list) {
382 struct sk_buff *skb;
383
384 skb = create_monitor_event(hdev, HCI_DEV_REG);
385 if (!skb)
386 continue;
387
388 if (sock_queue_rcv_skb(sk, skb))
389 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200390
391 if (!test_bit(HCI_RUNNING, &hdev->flags))
392 continue;
393
394 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
395 if (!skb)
396 continue;
397
398 if (sock_queue_rcv_skb(sk, skb))
399 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200400
401 if (!test_bit(HCI_UP, &hdev->flags))
402 continue;
403
404 skb = create_monitor_event(hdev, HCI_DEV_UP);
405 if (!skb)
406 continue;
407
408 if (sock_queue_rcv_skb(sk, skb))
409 kfree_skb(skb);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100410 }
411
412 read_unlock(&hci_dev_list_lock);
413}
414
Marcel Holtmann040030e2012-02-20 14:50:37 +0100415/* Generate internal stack event */
416static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
417{
418 struct hci_event_hdr *hdr;
419 struct hci_ev_stack_internal *ev;
420 struct sk_buff *skb;
421
422 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
423 if (!skb)
424 return;
425
426 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
427 hdr->evt = HCI_EV_STACK_INTERNAL;
428 hdr->plen = sizeof(*ev) + dlen;
429
430 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
431 ev->type = type;
432 memcpy(ev->data, data, dlen);
433
434 bt_cb(skb)->incoming = 1;
435 __net_timestamp(skb);
436
437 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100438 hci_send_to_sock(hdev, skb);
439 kfree_skb(skb);
440}
441
442void hci_sock_dev_event(struct hci_dev *hdev, int event)
443{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100444 BT_DBG("hdev %s event %d", hdev->name, event);
445
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100446 if (atomic_read(&monitor_promisc)) {
447 struct sk_buff *skb;
448
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200449 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100450 skb = create_monitor_event(hdev, event);
451 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700452 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
453 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100454 kfree_skb(skb);
455 }
456 }
457
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200458 if (event <= HCI_DEV_DOWN) {
459 struct hci_ev_si_device ev;
460
461 /* Send event to sockets */
462 ev.event = event;
463 ev.dev_id = hdev->id;
464 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
465 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100466
467 if (event == HCI_DEV_UNREG) {
468 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100469
470 /* Detach sockets from device */
471 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800472 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100473 bh_lock_sock_nested(sk);
474 if (hci_pi(sk)->hdev == hdev) {
475 hci_pi(sk)->hdev = NULL;
476 sk->sk_err = EPIPE;
477 sk->sk_state = BT_OPEN;
478 sk->sk_state_change(sk);
479
480 hci_dev_put(hdev);
481 }
482 bh_unlock_sock(sk);
483 }
484 read_unlock(&hci_sk_list.lock);
485 }
486}
487
Johan Hedberg801c1e82015-03-06 21:08:50 +0200488static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
489{
490 struct hci_mgmt_chan *c;
491
492 list_for_each_entry(c, &mgmt_chan_list, list) {
493 if (c->channel == channel)
494 return c;
495 }
496
497 return NULL;
498}
499
500static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
501{
502 struct hci_mgmt_chan *c;
503
504 mutex_lock(&mgmt_chan_list_lock);
505 c = __hci_mgmt_chan_find(channel);
506 mutex_unlock(&mgmt_chan_list_lock);
507
508 return c;
509}
510
511int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
512{
513 if (c->channel < HCI_CHANNEL_CONTROL)
514 return -EINVAL;
515
516 mutex_lock(&mgmt_chan_list_lock);
517 if (__hci_mgmt_chan_find(c->channel)) {
518 mutex_unlock(&mgmt_chan_list_lock);
519 return -EALREADY;
520 }
521
522 list_add_tail(&c->list, &mgmt_chan_list);
523
524 mutex_unlock(&mgmt_chan_list_lock);
525
526 return 0;
527}
528EXPORT_SYMBOL(hci_mgmt_chan_register);
529
530void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
531{
532 mutex_lock(&mgmt_chan_list_lock);
533 list_del(&c->list);
534 mutex_unlock(&mgmt_chan_list_lock);
535}
536EXPORT_SYMBOL(hci_mgmt_chan_unregister);
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538static int hci_sock_release(struct socket *sock)
539{
540 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100541 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543 BT_DBG("sock %p sk %p", sock, sk);
544
545 if (!sk)
546 return 0;
547
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100548 hdev = hci_pi(sk)->hdev;
549
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100550 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
551 atomic_dec(&monitor_promisc);
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 bt_sock_unlink(&hci_sk_list, sk);
554
555 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700556 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200557 /* When releasing an user channel exclusive access,
558 * call hci_dev_do_close directly instead of calling
559 * hci_dev_close to ensure the exclusive access will
560 * be released and the controller brought back down.
561 *
562 * The checking of HCI_AUTO_OFF is not needed in this
563 * case since it will have been cleared already when
564 * opening the user channel.
565 */
566 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200567 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
568 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700569 }
570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 atomic_dec(&hdev->promisc);
572 hci_dev_put(hdev);
573 }
574
575 sock_orphan(sk);
576
577 skb_queue_purge(&sk->sk_receive_queue);
578 skb_queue_purge(&sk->sk_write_queue);
579
580 sock_put(sk);
581 return 0;
582}
583
Antti Julkub2a66aa2011-06-15 12:01:14 +0300584static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200585{
586 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300587 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200588
589 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
590 return -EFAULT;
591
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300592 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300593
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300594 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300595
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300596 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300597
598 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200599}
600
Antti Julkub2a66aa2011-06-15 12:01:14 +0300601static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200602{
603 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300604 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200605
606 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
607 return -EFAULT;
608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300609 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300610
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300611 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300612
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300613 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300614
615 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200616}
617
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900618/* Ioctls that require bound socket */
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300619static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
620 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
622 struct hci_dev *hdev = hci_pi(sk)->hdev;
623
624 if (!hdev)
625 return -EBADFD;
626
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700627 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700628 return -EBUSY;
629
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700630 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200631 return -EOPNOTSUPP;
632
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700633 if (hdev->dev_type != HCI_BREDR)
634 return -EOPNOTSUPP;
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 switch (cmd) {
637 case HCISETRAW:
638 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000639 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700640 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200643 return hci_get_conn_info(hdev, (void __user *) arg);
644
645 case HCIGETAUTHINFO:
646 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Johan Hedbergf0358562010-05-18 13:20:32 +0200648 case HCIBLOCKADDR:
649 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000650 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300651 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200652
653 case HCIUNBLOCKADDR:
654 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000655 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300656 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700658
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700659 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300662static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
663 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200665 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700666 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 int err;
668
669 BT_DBG("cmd %x arg %lx", cmd, arg);
670
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700671 lock_sock(sk);
672
673 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
674 err = -EBADFD;
675 goto done;
676 }
677
678 release_sock(sk);
679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 switch (cmd) {
681 case HCIGETDEVLIST:
682 return hci_get_dev_list(argp);
683
684 case HCIGETDEVINFO:
685 return hci_get_dev_info(argp);
686
687 case HCIGETCONNLIST:
688 return hci_get_conn_list(argp);
689
690 case HCIDEVUP:
691 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000692 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return hci_dev_open(arg);
694
695 case HCIDEVDOWN:
696 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000697 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return hci_dev_close(arg);
699
700 case HCIDEVRESET:
701 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000702 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return hci_dev_reset(arg);
704
705 case HCIDEVRESTAT:
706 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000707 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 return hci_dev_reset_stat(arg);
709
710 case HCISETSCAN:
711 case HCISETAUTH:
712 case HCISETENCRYPT:
713 case HCISETPTYPE:
714 case HCISETLINKPOL:
715 case HCISETLINKMODE:
716 case HCISETACLMTU:
717 case HCISETSCOMTU:
718 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000719 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 return hci_dev_cmd(cmd, argp);
721
722 case HCIINQUIRY:
723 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700725
726 lock_sock(sk);
727
728 err = hci_sock_bound_ioctl(sk, cmd, arg);
729
730done:
731 release_sock(sk);
732 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
734
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300735static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
736 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
Johan Hedberg03811012010-12-08 00:21:06 +0200738 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 struct sock *sk = sock->sk;
740 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200741 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743 BT_DBG("sock %p sk %p", sock, sk);
744
Johan Hedberg03811012010-12-08 00:21:06 +0200745 if (!addr)
746 return -EINVAL;
747
748 memset(&haddr, 0, sizeof(haddr));
749 len = min_t(unsigned int, sizeof(haddr), addr_len);
750 memcpy(&haddr, addr, len);
751
752 if (haddr.hci_family != AF_BLUETOOTH)
753 return -EINVAL;
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 lock_sock(sk);
756
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100757 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 err = -EALREADY;
759 goto done;
760 }
761
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100762 switch (haddr.hci_channel) {
763 case HCI_CHANNEL_RAW:
764 if (hci_pi(sk)->hdev) {
765 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 goto done;
767 }
768
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100769 if (haddr.hci_dev != HCI_DEV_NONE) {
770 hdev = hci_dev_get(haddr.hci_dev);
771 if (!hdev) {
772 err = -ENODEV;
773 goto done;
774 }
775
776 atomic_inc(&hdev->promisc);
777 }
778
779 hci_pi(sk)->hdev = hdev;
780 break;
781
Marcel Holtmann23500182013-08-26 21:40:52 -0700782 case HCI_CHANNEL_USER:
783 if (hci_pi(sk)->hdev) {
784 err = -EALREADY;
785 goto done;
786 }
787
788 if (haddr.hci_dev == HCI_DEV_NONE) {
789 err = -EINVAL;
790 goto done;
791 }
792
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700793 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700794 err = -EPERM;
795 goto done;
796 }
797
798 hdev = hci_dev_get(haddr.hci_dev);
799 if (!hdev) {
800 err = -ENODEV;
801 goto done;
802 }
803
Marcel Holtmann781f8992015-06-06 06:06:49 +0200804 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700805 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +0200806 hci_dev_test_flag(hdev, HCI_CONFIG) ||
807 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
808 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700809 err = -EBUSY;
810 hci_dev_put(hdev);
811 goto done;
812 }
813
Marcel Holtmann238be782015-03-13 02:11:06 -0700814 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700815 err = -EUSERS;
816 hci_dev_put(hdev);
817 goto done;
818 }
819
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200820 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700821
822 err = hci_dev_open(hdev->id);
823 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +0200824 if (err == -EALREADY) {
825 /* In case the transport is already up and
826 * running, clear the error here.
827 *
828 * This can happen when opening an user
829 * channel and HCI_AUTO_OFF grace period
830 * is still active.
831 */
832 err = 0;
833 } else {
834 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
835 mgmt_index_added(hdev);
836 hci_dev_put(hdev);
837 goto done;
838 }
Marcel Holtmann23500182013-08-26 21:40:52 -0700839 }
840
841 atomic_inc(&hdev->promisc);
842
843 hci_pi(sk)->hdev = hdev;
844 break;
845
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100846 case HCI_CHANNEL_MONITOR:
847 if (haddr.hci_dev != HCI_DEV_NONE) {
848 err = -EINVAL;
849 goto done;
850 }
851
852 if (!capable(CAP_NET_RAW)) {
853 err = -EPERM;
854 goto done;
855 }
856
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700857 /* The monitor interface is restricted to CAP_NET_RAW
858 * capabilities and with that implicitly trusted.
859 */
860 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
861
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100862 send_monitor_replay(sk);
863
864 atomic_inc(&monitor_promisc);
865 break;
866
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100867 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200868 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
869 err = -EINVAL;
870 goto done;
871 }
872
873 if (haddr.hci_dev != HCI_DEV_NONE) {
874 err = -EINVAL;
875 goto done;
876 }
877
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700878 /* Users with CAP_NET_ADMIN capabilities are allowed
879 * access to all management commands and events. For
880 * untrusted users the interface is restricted and
881 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700882 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -0700883 if (capable(CAP_NET_ADMIN))
884 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700885
Marcel Holtmannf9207332015-03-14 19:27:55 -0700886 /* At the moment the index and unconfigured index events
887 * are enabled unconditionally. Setting them on each
888 * socket when binding keeps this functionality. They
889 * however might be cleared later and then sending of these
890 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700891 *
892 * This also enables generic events that are safe to be
893 * received by untrusted users. Example for such events
894 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -0700895 */
896 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
897 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
898 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmannf6b77122015-03-14 19:28:05 -0700899 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -0700900 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200901 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 }
903
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100904
Johan Hedberg03811012010-12-08 00:21:06 +0200905 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 sk->sk_state = BT_BOUND;
907
908done:
909 release_sock(sk);
910 return err;
911}
912
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300913static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
914 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915{
916 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
917 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700918 struct hci_dev *hdev;
919 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921 BT_DBG("sock %p sk %p", sock, sk);
922
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700923 if (peer)
924 return -EOPNOTSUPP;
925
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 lock_sock(sk);
927
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700928 hdev = hci_pi(sk)->hdev;
929 if (!hdev) {
930 err = -EBADFD;
931 goto done;
932 }
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 *addr_len = sizeof(*haddr);
935 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100936 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700937 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700939done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700941 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942}
943
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300944static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
945 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
947 __u32 mask = hci_pi(sk)->cmsg_mask;
948
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700949 if (mask & HCI_CMSG_DIR) {
950 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300951 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
952 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700955 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100956#ifdef CONFIG_COMPAT
957 struct compat_timeval ctv;
958#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700959 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200960 void *data;
961 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700962
963 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200964
David S. Miller1da97f82007-09-12 14:10:58 +0200965 data = &tv;
966 len = sizeof(tv);
967#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800968 if (!COMPAT_USE_64BIT_TIME &&
969 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200970 ctv.tv_sec = tv.tv_sec;
971 ctv.tv_usec = tv.tv_usec;
972 data = &ctv;
973 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200974 }
David S. Miller1da97f82007-09-12 14:10:58 +0200975#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200976
977 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900980
Ying Xue1b784142015-03-02 15:37:48 +0800981static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
982 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
984 int noblock = flags & MSG_DONTWAIT;
985 struct sock *sk = sock->sk;
986 struct sk_buff *skb;
987 int copied, err;
988
989 BT_DBG("sock %p, sk %p", sock, sk);
990
991 if (flags & (MSG_OOB))
992 return -EOPNOTSUPP;
993
994 if (sk->sk_state == BT_CLOSED)
995 return 0;
996
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200997 skb = skb_recv_datagram(sk, flags, noblock, &err);
998 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 return err;
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 copied = skb->len;
1002 if (len < copied) {
1003 msg->msg_flags |= MSG_TRUNC;
1004 copied = len;
1005 }
1006
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001007 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001008 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
Marcel Holtmann3a208622012-02-20 14:50:34 +01001010 switch (hci_pi(sk)->channel) {
1011 case HCI_CHANNEL_RAW:
1012 hci_sock_cmsg(sk, msg, skb);
1013 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001014 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001015 case HCI_CHANNEL_MONITOR:
1016 sock_recv_timestamp(msg, sk, skb);
1017 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001018 default:
1019 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1020 sock_recv_timestamp(msg, sk, skb);
1021 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 skb_free_datagram(sk, skb);
1025
1026 return err ? : copied;
1027}
1028
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001029static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1030 struct msghdr *msg, size_t msglen)
1031{
1032 void *buf;
1033 u8 *cp;
1034 struct mgmt_hdr *hdr;
1035 u16 opcode, index, len;
1036 struct hci_dev *hdev = NULL;
1037 const struct hci_mgmt_handler *handler;
1038 bool var_len, no_hdev;
1039 int err;
1040
1041 BT_DBG("got %zu bytes", msglen);
1042
1043 if (msglen < sizeof(*hdr))
1044 return -EINVAL;
1045
1046 buf = kmalloc(msglen, GFP_KERNEL);
1047 if (!buf)
1048 return -ENOMEM;
1049
1050 if (memcpy_from_msg(buf, msg, msglen)) {
1051 err = -EFAULT;
1052 goto done;
1053 }
1054
1055 hdr = buf;
1056 opcode = __le16_to_cpu(hdr->opcode);
1057 index = __le16_to_cpu(hdr->index);
1058 len = __le16_to_cpu(hdr->len);
1059
1060 if (len != msglen - sizeof(*hdr)) {
1061 err = -EINVAL;
1062 goto done;
1063 }
1064
1065 if (opcode >= chan->handler_count ||
1066 chan->handlers[opcode].func == NULL) {
1067 BT_DBG("Unknown op %u", opcode);
1068 err = mgmt_cmd_status(sk, index, opcode,
1069 MGMT_STATUS_UNKNOWN_COMMAND);
1070 goto done;
1071 }
1072
1073 handler = &chan->handlers[opcode];
1074
1075 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1076 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1077 err = mgmt_cmd_status(sk, index, opcode,
1078 MGMT_STATUS_PERMISSION_DENIED);
1079 goto done;
1080 }
1081
1082 if (index != MGMT_INDEX_NONE) {
1083 hdev = hci_dev_get(index);
1084 if (!hdev) {
1085 err = mgmt_cmd_status(sk, index, opcode,
1086 MGMT_STATUS_INVALID_INDEX);
1087 goto done;
1088 }
1089
1090 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1091 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1092 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1093 err = mgmt_cmd_status(sk, index, opcode,
1094 MGMT_STATUS_INVALID_INDEX);
1095 goto done;
1096 }
1097
1098 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1099 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1100 err = mgmt_cmd_status(sk, index, opcode,
1101 MGMT_STATUS_INVALID_INDEX);
1102 goto done;
1103 }
1104 }
1105
1106 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1107 if (no_hdev != !hdev) {
1108 err = mgmt_cmd_status(sk, index, opcode,
1109 MGMT_STATUS_INVALID_INDEX);
1110 goto done;
1111 }
1112
1113 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1114 if ((var_len && len < handler->data_len) ||
1115 (!var_len && len != handler->data_len)) {
1116 err = mgmt_cmd_status(sk, index, opcode,
1117 MGMT_STATUS_INVALID_PARAMS);
1118 goto done;
1119 }
1120
1121 if (hdev && chan->hdev_init)
1122 chan->hdev_init(sk, hdev);
1123
1124 cp = buf + sizeof(*hdr);
1125
1126 err = handler->func(sk, hdev, cp, len);
1127 if (err < 0)
1128 goto done;
1129
1130 err = msglen;
1131
1132done:
1133 if (hdev)
1134 hci_dev_put(hdev);
1135
1136 kfree(buf);
1137 return err;
1138}
1139
Ying Xue1b784142015-03-02 15:37:48 +08001140static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1141 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
1143 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001144 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 struct hci_dev *hdev;
1146 struct sk_buff *skb;
1147 int err;
1148
1149 BT_DBG("sock %p sk %p", sock, sk);
1150
1151 if (msg->msg_flags & MSG_OOB)
1152 return -EOPNOTSUPP;
1153
1154 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1155 return -EINVAL;
1156
1157 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1158 return -EINVAL;
1159
1160 lock_sock(sk);
1161
Johan Hedberg03811012010-12-08 00:21:06 +02001162 switch (hci_pi(sk)->channel) {
1163 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001164 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001165 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001166 case HCI_CHANNEL_MONITOR:
1167 err = -EOPNOTSUPP;
1168 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001169 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001170 mutex_lock(&mgmt_chan_list_lock);
1171 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1172 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001173 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001174 else
1175 err = -EINVAL;
1176
1177 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001178 goto done;
1179 }
1180
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001181 hdev = hci_pi(sk)->hdev;
1182 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 err = -EBADFD;
1184 goto done;
1185 }
1186
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001187 if (!test_bit(HCI_UP, &hdev->flags)) {
1188 err = -ENETDOWN;
1189 goto done;
1190 }
1191
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001192 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1193 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 goto done;
1195
Al Viro6ce8e9c2014-04-06 21:25:44 -04001196 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 err = -EFAULT;
1198 goto drop;
1199 }
1200
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001201 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001204 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1205 /* No permission check is needed for user channel
1206 * since that gets enforced when binding the socket.
1207 *
1208 * However check that the packet type is valid.
1209 */
1210 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1211 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1212 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1213 err = -EINVAL;
1214 goto drop;
1215 }
1216
1217 skb_queue_tail(&hdev->raw_q, skb);
1218 queue_work(hdev->workqueue, &hdev->tx_work);
1219 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001220 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 u16 ogf = hci_opcode_ogf(opcode);
1222 u16 ocf = hci_opcode_ocf(opcode);
1223
1224 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001225 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1226 &hci_sec_filter.ocf_mask[ogf])) &&
1227 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 err = -EPERM;
1229 goto drop;
1230 }
1231
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001232 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001234 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001236 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001237 * single-command requests.
1238 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03001239 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02001240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001242 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 }
1244 } else {
1245 if (!capable(CAP_NET_RAW)) {
1246 err = -EPERM;
1247 goto drop;
1248 }
1249
1250 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001251 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 }
1253
1254 err = len;
1255
1256done:
1257 release_sock(sk);
1258 return err;
1259
1260drop:
1261 kfree_skb(skb);
1262 goto done;
1263}
1264
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001265static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1266 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
1268 struct hci_ufilter uf = { .opcode = 0 };
1269 struct sock *sk = sock->sk;
1270 int err = 0, opt = 0;
1271
1272 BT_DBG("sk %p, opt %d", sk, optname);
1273
1274 lock_sock(sk);
1275
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001276 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001277 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001278 goto done;
1279 }
1280
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 switch (optname) {
1282 case HCI_DATA_DIR:
1283 if (get_user(opt, (int __user *)optval)) {
1284 err = -EFAULT;
1285 break;
1286 }
1287
1288 if (opt)
1289 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1290 else
1291 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1292 break;
1293
1294 case HCI_TIME_STAMP:
1295 if (get_user(opt, (int __user *)optval)) {
1296 err = -EFAULT;
1297 break;
1298 }
1299
1300 if (opt)
1301 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1302 else
1303 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1304 break;
1305
1306 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001307 {
1308 struct hci_filter *f = &hci_pi(sk)->filter;
1309
1310 uf.type_mask = f->type_mask;
1311 uf.opcode = f->opcode;
1312 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1313 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1314 }
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 len = min_t(unsigned int, len, sizeof(uf));
1317 if (copy_from_user(&uf, optval, len)) {
1318 err = -EFAULT;
1319 break;
1320 }
1321
1322 if (!capable(CAP_NET_RAW)) {
1323 uf.type_mask &= hci_sec_filter.type_mask;
1324 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1325 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1326 }
1327
1328 {
1329 struct hci_filter *f = &hci_pi(sk)->filter;
1330
1331 f->type_mask = uf.type_mask;
1332 f->opcode = uf.opcode;
1333 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1334 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1335 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001336 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 default:
1339 err = -ENOPROTOOPT;
1340 break;
1341 }
1342
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001343done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 release_sock(sk);
1345 return err;
1346}
1347
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001348static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1349 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350{
1351 struct hci_ufilter uf;
1352 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001353 int len, opt, err = 0;
1354
1355 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
1357 if (get_user(len, optlen))
1358 return -EFAULT;
1359
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001360 lock_sock(sk);
1361
1362 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001363 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001364 goto done;
1365 }
1366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 switch (optname) {
1368 case HCI_DATA_DIR:
1369 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1370 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001371 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 opt = 0;
1373
1374 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001375 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 break;
1377
1378 case HCI_TIME_STAMP:
1379 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1380 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001381 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 opt = 0;
1383
1384 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001385 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 break;
1387
1388 case HCI_FILTER:
1389 {
1390 struct hci_filter *f = &hci_pi(sk)->filter;
1391
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001392 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 uf.type_mask = f->type_mask;
1394 uf.opcode = f->opcode;
1395 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1396 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1397 }
1398
1399 len = min_t(unsigned int, len, sizeof(uf));
1400 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001401 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 break;
1403
1404 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001405 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 break;
1407 }
1408
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001409done:
1410 release_sock(sk);
1411 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412}
1413
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001414static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 .family = PF_BLUETOOTH,
1416 .owner = THIS_MODULE,
1417 .release = hci_sock_release,
1418 .bind = hci_sock_bind,
1419 .getname = hci_sock_getname,
1420 .sendmsg = hci_sock_sendmsg,
1421 .recvmsg = hci_sock_recvmsg,
1422 .ioctl = hci_sock_ioctl,
1423 .poll = datagram_poll,
1424 .listen = sock_no_listen,
1425 .shutdown = sock_no_shutdown,
1426 .setsockopt = hci_sock_setsockopt,
1427 .getsockopt = hci_sock_getsockopt,
1428 .connect = sock_no_connect,
1429 .socketpair = sock_no_socketpair,
1430 .accept = sock_no_accept,
1431 .mmap = sock_no_mmap
1432};
1433
1434static struct proto hci_sk_proto = {
1435 .name = "HCI",
1436 .owner = THIS_MODULE,
1437 .obj_size = sizeof(struct hci_pinfo)
1438};
1439
Eric Paris3f378b62009-11-05 22:18:14 -08001440static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1441 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442{
1443 struct sock *sk;
1444
1445 BT_DBG("sock %p", sock);
1446
1447 if (sock->type != SOCK_RAW)
1448 return -ESOCKTNOSUPPORT;
1449
1450 sock->ops = &hci_sock_ops;
1451
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001452 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 if (!sk)
1454 return -ENOMEM;
1455
1456 sock_init_data(sock, sk);
1457
1458 sock_reset_flag(sk, SOCK_ZAPPED);
1459
1460 sk->sk_protocol = protocol;
1461
1462 sock->state = SS_UNCONNECTED;
1463 sk->sk_state = BT_OPEN;
1464
1465 bt_sock_link(&hci_sk_list, sk);
1466 return 0;
1467}
1468
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001469static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 .family = PF_BLUETOOTH,
1471 .owner = THIS_MODULE,
1472 .create = hci_sock_create,
1473};
1474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475int __init hci_sock_init(void)
1476{
1477 int err;
1478
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001479 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 err = proto_register(&hci_sk_proto, 0);
1482 if (err < 0)
1483 return err;
1484
1485 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001486 if (err < 0) {
1487 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001489 }
1490
Al Virob0316612013-04-04 19:14:33 -04001491 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001492 if (err < 0) {
1493 BT_ERR("Failed to create HCI proc file");
1494 bt_sock_unregister(BTPROTO_HCI);
1495 goto error;
1496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 BT_INFO("HCI socket layer initialized");
1499
1500 return 0;
1501
1502error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 proto_unregister(&hci_sk_proto);
1504 return err;
1505}
1506
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301507void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001509 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001510 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512}