blob: 54118868b3f67cf62cce93166fc4a3ff12896261 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010032#include <net/bluetooth/hci_mon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Johan Hedberg801c1e82015-03-06 21:08:50 +020034static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
Marcel Holtmanncd82e612012-02-20 20:34:38 +010037static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039/* ----- HCI socket interface ----- */
40
Marcel Holtmann863def52014-07-11 05:41:00 +020041/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070050 unsigned long flags;
Marcel Holtmann863def52014-07-11 05:41:00 +020051};
52
Marcel Holtmann6befc642015-03-14 19:27:53 -070053void hci_sock_set_flag(struct sock *sk, int nr)
54{
55 set_bit(nr, &hci_pi(sk)->flags);
56}
57
58void hci_sock_clear_flag(struct sock *sk, int nr)
59{
60 clear_bit(nr, &hci_pi(sk)->flags);
61}
62
Jiri Slaby93919762015-02-19 15:20:43 +010063static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{
Jiri Slaby93919762015-02-19 15:20:43 +010065 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -070066}
67
68/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +020069#define HCI_SFLT_MAX_OGF 5
70
71struct hci_sec_filter {
72 __u32 type_mask;
73 __u32 event_mask[2];
74 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
75};
76
Marcel Holtmann7e67c112014-07-11 05:36:40 +020077static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 /* Packet types */
79 0x10,
80 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +020081 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* Commands */
83 {
84 { 0x0 },
85 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020086 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020088 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020090 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020092 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +020094 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 }
96};
97
98static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -070099 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100};
101
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700102static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
103{
104 struct hci_filter *flt;
105 int flt_type, flt_event;
106
107 /* Apply filter */
108 flt = &hci_pi(sk)->filter;
109
110 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
111 flt_type = 0;
112 else
113 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
114
115 if (!test_bit(flt_type, &flt->type_mask))
116 return true;
117
118 /* Extra filter for event packets only */
119 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
120 return false;
121
122 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
123
124 if (!hci_test_bit(flt_event, &flt->event_mask))
125 return true;
126
127 /* Check filter only when opcode is set */
128 if (!flt->opcode)
129 return false;
130
131 if (flt_event == HCI_EV_CMD_COMPLETE &&
132 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
133 return true;
134
135 if (flt_event == HCI_EV_CMD_STATUS &&
136 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
137 return true;
138
139 return false;
140}
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100143void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100146 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("hdev %p len %d", hdev, skb->len);
149
150 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100151
Sasha Levinb67bfe02013-02-27 17:06:00 -0800152 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 struct sk_buff *nskb;
154
155 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
156 continue;
157
158 /* Don't send frame to the socket it came from */
159 if (skb->sk == sk)
160 continue;
161
Marcel Holtmann23500182013-08-26 21:40:52 -0700162 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
163 if (is_filtered_packet(sk, skb))
164 continue;
165 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
166 if (!bt_cb(skb)->incoming)
167 continue;
168 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
169 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
170 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
171 continue;
172 } else {
173 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200174 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100177 if (!skb_copy) {
178 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300179 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100180 if (!skb_copy)
181 continue;
182
183 /* Put type byte before the data */
184 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
185 }
186
187 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200188 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 continue;
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 if (sock_queue_rcv_skb(sk, nskb))
192 kfree_skb(nskb);
193 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100194
195 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100196
197 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100198}
199
Johan Hedberg71290692015-02-20 13:26:23 +0200200/* Send frame to sockets with specific channel */
201void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
202 struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100203{
204 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100205
Johan Hedberg71290692015-02-20 13:26:23 +0200206 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100207
208 read_lock(&hci_sk_list.lock);
209
Sasha Levinb67bfe02013-02-27 17:06:00 -0800210 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100211 struct sk_buff *nskb;
212
213 /* Skip the original socket */
214 if (sk == skip_sk)
215 continue;
216
217 if (sk->sk_state != BT_BOUND)
218 continue;
219
Johan Hedberg71290692015-02-20 13:26:23 +0200220 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800221 continue;
222
223 nskb = skb_clone(skb, GFP_ATOMIC);
224 if (!nskb)
225 continue;
226
227 if (sock_queue_rcv_skb(sk, nskb))
228 kfree_skb(nskb);
229 }
230
231 read_unlock(&hci_sk_list.lock);
232}
233
Marcel Holtmann17711c62015-03-14 19:27:54 -0700234/* Send frame to sockets with specific channel flag set */
235void hci_send_to_flagged_channel(unsigned short channel, struct sk_buff *skb,
236 int flag)
237{
238 struct sock *sk;
239
240 BT_DBG("channel %u len %d", channel, skb->len);
241
242 read_lock(&hci_sk_list.lock);
243
244 sk_for_each(sk, &hci_sk_list.head) {
245 struct sk_buff *nskb;
246
247 if (!test_bit(flag, &hci_pi(sk)->flags))
248 continue;
249
250 if (sk->sk_state != BT_BOUND)
251 continue;
252
253 if (hci_pi(sk)->channel != channel)
254 continue;
255
256 nskb = skb_clone(skb, GFP_ATOMIC);
257 if (!nskb)
258 continue;
259
260 if (sock_queue_rcv_skb(sk, nskb))
261 kfree_skb(nskb);
262 }
263
264 read_unlock(&hci_sk_list.lock);
265}
266
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100267/* Send frame to monitor socket */
268void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
269{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100270 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800271 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100272 __le16 opcode;
273
274 if (!atomic_read(&monitor_promisc))
275 return;
276
277 BT_DBG("hdev %p len %d", hdev, skb->len);
278
279 switch (bt_cb(skb)->pkt_type) {
280 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700281 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100282 break;
283 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700284 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100285 break;
286 case HCI_ACLDATA_PKT:
287 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700288 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100289 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700290 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100291 break;
292 case HCI_SCODATA_PKT:
293 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700294 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100295 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700296 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100297 break;
298 default:
299 return;
300 }
301
Marcel Holtmann2b531292015-01-11 19:33:31 -0800302 /* Create a private copy with headroom */
303 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
304 if (!skb_copy)
305 return;
306
307 /* Put header before the data */
308 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
309 hdr->opcode = opcode;
310 hdr->index = cpu_to_le16(hdev->id);
311 hdr->len = cpu_to_le16(skb->len);
312
Johan Hedberg03f310e2015-02-20 13:26:24 +0200313 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100314 kfree_skb(skb_copy);
315}
316
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100317static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
318{
319 struct hci_mon_hdr *hdr;
320 struct hci_mon_new_index *ni;
321 struct sk_buff *skb;
322 __le16 opcode;
323
324 switch (event) {
325 case HCI_DEV_REG:
326 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
327 if (!skb)
328 return NULL;
329
330 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
331 ni->type = hdev->dev_type;
332 ni->bus = hdev->bus;
333 bacpy(&ni->bdaddr, &hdev->bdaddr);
334 memcpy(ni->name, hdev->name, 8);
335
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700336 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100337 break;
338
339 case HCI_DEV_UNREG:
340 skb = bt_skb_alloc(0, GFP_ATOMIC);
341 if (!skb)
342 return NULL;
343
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700344 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100345 break;
346
347 default:
348 return NULL;
349 }
350
351 __net_timestamp(skb);
352
353 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
354 hdr->opcode = opcode;
355 hdr->index = cpu_to_le16(hdev->id);
356 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
357
358 return skb;
359}
360
361static void send_monitor_replay(struct sock *sk)
362{
363 struct hci_dev *hdev;
364
365 read_lock(&hci_dev_list_lock);
366
367 list_for_each_entry(hdev, &hci_dev_list, list) {
368 struct sk_buff *skb;
369
370 skb = create_monitor_event(hdev, HCI_DEV_REG);
371 if (!skb)
372 continue;
373
374 if (sock_queue_rcv_skb(sk, skb))
375 kfree_skb(skb);
376 }
377
378 read_unlock(&hci_dev_list_lock);
379}
380
Marcel Holtmann040030e2012-02-20 14:50:37 +0100381/* Generate internal stack event */
382static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
383{
384 struct hci_event_hdr *hdr;
385 struct hci_ev_stack_internal *ev;
386 struct sk_buff *skb;
387
388 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
389 if (!skb)
390 return;
391
392 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
393 hdr->evt = HCI_EV_STACK_INTERNAL;
394 hdr->plen = sizeof(*ev) + dlen;
395
396 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
397 ev->type = type;
398 memcpy(ev->data, data, dlen);
399
400 bt_cb(skb)->incoming = 1;
401 __net_timestamp(skb);
402
403 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100404 hci_send_to_sock(hdev, skb);
405 kfree_skb(skb);
406}
407
408void hci_sock_dev_event(struct hci_dev *hdev, int event)
409{
410 struct hci_ev_si_device ev;
411
412 BT_DBG("hdev %s event %d", hdev->name, event);
413
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100414 /* Send event to monitor */
415 if (atomic_read(&monitor_promisc)) {
416 struct sk_buff *skb;
417
418 skb = create_monitor_event(hdev, event);
419 if (skb) {
Johan Hedberg03f310e2015-02-20 13:26:24 +0200420 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100421 kfree_skb(skb);
422 }
423 }
424
Marcel Holtmann040030e2012-02-20 14:50:37 +0100425 /* Send event to sockets */
426 ev.event = event;
427 ev.dev_id = hdev->id;
428 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
429
430 if (event == HCI_DEV_UNREG) {
431 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100432
433 /* Detach sockets from device */
434 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800435 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100436 bh_lock_sock_nested(sk);
437 if (hci_pi(sk)->hdev == hdev) {
438 hci_pi(sk)->hdev = NULL;
439 sk->sk_err = EPIPE;
440 sk->sk_state = BT_OPEN;
441 sk->sk_state_change(sk);
442
443 hci_dev_put(hdev);
444 }
445 bh_unlock_sock(sk);
446 }
447 read_unlock(&hci_sk_list.lock);
448 }
449}
450
Johan Hedberg801c1e82015-03-06 21:08:50 +0200451static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
452{
453 struct hci_mgmt_chan *c;
454
455 list_for_each_entry(c, &mgmt_chan_list, list) {
456 if (c->channel == channel)
457 return c;
458 }
459
460 return NULL;
461}
462
463static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
464{
465 struct hci_mgmt_chan *c;
466
467 mutex_lock(&mgmt_chan_list_lock);
468 c = __hci_mgmt_chan_find(channel);
469 mutex_unlock(&mgmt_chan_list_lock);
470
471 return c;
472}
473
474int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
475{
476 if (c->channel < HCI_CHANNEL_CONTROL)
477 return -EINVAL;
478
479 mutex_lock(&mgmt_chan_list_lock);
480 if (__hci_mgmt_chan_find(c->channel)) {
481 mutex_unlock(&mgmt_chan_list_lock);
482 return -EALREADY;
483 }
484
485 list_add_tail(&c->list, &mgmt_chan_list);
486
487 mutex_unlock(&mgmt_chan_list_lock);
488
489 return 0;
490}
491EXPORT_SYMBOL(hci_mgmt_chan_register);
492
493void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
494{
495 mutex_lock(&mgmt_chan_list_lock);
496 list_del(&c->list);
497 mutex_unlock(&mgmt_chan_list_lock);
498}
499EXPORT_SYMBOL(hci_mgmt_chan_unregister);
500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501static int hci_sock_release(struct socket *sock)
502{
503 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100504 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
506 BT_DBG("sock %p sk %p", sock, sk);
507
508 if (!sk)
509 return 0;
510
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100511 hdev = hci_pi(sk)->hdev;
512
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100513 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
514 atomic_dec(&monitor_promisc);
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 bt_sock_unlink(&hci_sk_list, sk);
517
518 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700519 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200520 mgmt_index_added(hdev);
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700521 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann23500182013-08-26 21:40:52 -0700522 hci_dev_close(hdev->id);
523 }
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 atomic_dec(&hdev->promisc);
526 hci_dev_put(hdev);
527 }
528
529 sock_orphan(sk);
530
531 skb_queue_purge(&sk->sk_receive_queue);
532 skb_queue_purge(&sk->sk_write_queue);
533
534 sock_put(sk);
535 return 0;
536}
537
Antti Julkub2a66aa2011-06-15 12:01:14 +0300538static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200539{
540 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300541 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200542
543 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
544 return -EFAULT;
545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300547
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300548 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300549
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300550 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300551
552 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200553}
554
Antti Julkub2a66aa2011-06-15 12:01:14 +0300555static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200556{
557 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300558 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200559
560 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
561 return -EFAULT;
562
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300563 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300564
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300565 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300566
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300567 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300568
569 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200570}
571
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900572/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300573static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
574 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 struct hci_dev *hdev = hci_pi(sk)->hdev;
577
578 if (!hdev)
579 return -EBADFD;
580
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700581 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700582 return -EBUSY;
583
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700584 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200585 return -EOPNOTSUPP;
586
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700587 if (hdev->dev_type != HCI_BREDR)
588 return -EOPNOTSUPP;
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 switch (cmd) {
591 case HCISETRAW:
592 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000593 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700594 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 case HCIGETCONNINFO:
Marcel Holtmann40be4922008-07-14 20:13:50 +0200597 return hci_get_conn_info(hdev, (void __user *) arg);
598
599 case HCIGETAUTHINFO:
600 return hci_get_auth_info(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Johan Hedbergf0358562010-05-18 13:20:32 +0200602 case HCIBLOCKADDR:
603 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000604 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300605 return hci_sock_blacklist_add(hdev, (void __user *) arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200606
607 case HCIUNBLOCKADDR:
608 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000609 return -EPERM;
Antti Julkub2a66aa2011-06-15 12:01:14 +0300610 return hci_sock_blacklist_del(hdev, (void __user *) arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700612
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700613 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300616static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
617 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Marcel Holtmann40be4922008-07-14 20:13:50 +0200619 void __user *argp = (void __user *) arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700620 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 int err;
622
623 BT_DBG("cmd %x arg %lx", cmd, arg);
624
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700625 lock_sock(sk);
626
627 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
628 err = -EBADFD;
629 goto done;
630 }
631
632 release_sock(sk);
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 switch (cmd) {
635 case HCIGETDEVLIST:
636 return hci_get_dev_list(argp);
637
638 case HCIGETDEVINFO:
639 return hci_get_dev_info(argp);
640
641 case HCIGETCONNLIST:
642 return hci_get_conn_list(argp);
643
644 case HCIDEVUP:
645 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000646 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return hci_dev_open(arg);
648
649 case HCIDEVDOWN:
650 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000651 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return hci_dev_close(arg);
653
654 case HCIDEVRESET:
655 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000656 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return hci_dev_reset(arg);
658
659 case HCIDEVRESTAT:
660 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000661 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return hci_dev_reset_stat(arg);
663
664 case HCISETSCAN:
665 case HCISETAUTH:
666 case HCISETENCRYPT:
667 case HCISETPTYPE:
668 case HCISETLINKPOL:
669 case HCISETLINKMODE:
670 case HCISETACLMTU:
671 case HCISETSCOMTU:
672 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000673 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return hci_dev_cmd(cmd, argp);
675
676 case HCIINQUIRY:
677 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700679
680 lock_sock(sk);
681
682 err = hci_sock_bound_ioctl(sk, cmd, arg);
683
684done:
685 release_sock(sk);
686 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687}
688
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300689static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
690 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
Johan Hedberg03811012010-12-08 00:21:06 +0200692 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 struct sock *sk = sock->sk;
694 struct hci_dev *hdev = NULL;
Johan Hedberg03811012010-12-08 00:21:06 +0200695 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697 BT_DBG("sock %p sk %p", sock, sk);
698
Johan Hedberg03811012010-12-08 00:21:06 +0200699 if (!addr)
700 return -EINVAL;
701
702 memset(&haddr, 0, sizeof(haddr));
703 len = min_t(unsigned int, sizeof(haddr), addr_len);
704 memcpy(&haddr, addr, len);
705
706 if (haddr.hci_family != AF_BLUETOOTH)
707 return -EINVAL;
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 lock_sock(sk);
710
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100711 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 err = -EALREADY;
713 goto done;
714 }
715
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100716 switch (haddr.hci_channel) {
717 case HCI_CHANNEL_RAW:
718 if (hci_pi(sk)->hdev) {
719 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 goto done;
721 }
722
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100723 if (haddr.hci_dev != HCI_DEV_NONE) {
724 hdev = hci_dev_get(haddr.hci_dev);
725 if (!hdev) {
726 err = -ENODEV;
727 goto done;
728 }
729
730 atomic_inc(&hdev->promisc);
731 }
732
733 hci_pi(sk)->hdev = hdev;
734 break;
735
Marcel Holtmann23500182013-08-26 21:40:52 -0700736 case HCI_CHANNEL_USER:
737 if (hci_pi(sk)->hdev) {
738 err = -EALREADY;
739 goto done;
740 }
741
742 if (haddr.hci_dev == HCI_DEV_NONE) {
743 err = -EINVAL;
744 goto done;
745 }
746
Marcel Holtmann10a8b862013-10-01 22:59:24 -0700747 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700748 err = -EPERM;
749 goto done;
750 }
751
752 hdev = hci_dev_get(haddr.hci_dev);
753 if (!hdev) {
754 err = -ENODEV;
755 goto done;
756 }
757
758 if (test_bit(HCI_UP, &hdev->flags) ||
759 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700760 hci_dev_test_flag(hdev, HCI_SETUP) ||
761 hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700762 err = -EBUSY;
763 hci_dev_put(hdev);
764 goto done;
765 }
766
Marcel Holtmann238be782015-03-13 02:11:06 -0700767 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700768 err = -EUSERS;
769 hci_dev_put(hdev);
770 goto done;
771 }
772
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200773 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700774
775 err = hci_dev_open(hdev->id);
776 if (err) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700777 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +0200778 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700779 hci_dev_put(hdev);
780 goto done;
781 }
782
783 atomic_inc(&hdev->promisc);
784
785 hci_pi(sk)->hdev = hdev;
786 break;
787
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100788 case HCI_CHANNEL_MONITOR:
789 if (haddr.hci_dev != HCI_DEV_NONE) {
790 err = -EINVAL;
791 goto done;
792 }
793
794 if (!capable(CAP_NET_RAW)) {
795 err = -EPERM;
796 goto done;
797 }
798
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700799 /* The monitor interface is restricted to CAP_NET_RAW
800 * capabilities and with that implicitly trusted.
801 */
802 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
803
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100804 send_monitor_replay(sk);
805
806 atomic_inc(&monitor_promisc);
807 break;
808
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100809 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200810 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
811 err = -EINVAL;
812 goto done;
813 }
814
815 if (haddr.hci_dev != HCI_DEV_NONE) {
816 err = -EINVAL;
817 goto done;
818 }
819
820 if (!capable(CAP_NET_ADMIN)) {
821 err = -EPERM;
822 goto done;
823 }
824
Marcel Holtmann50ebc052015-03-14 19:27:58 -0700825 /* Since the access to control channels is currently
826 * restricted to CAP_NET_ADMIN capabilities, every
827 * socket is implicitly trusted.
828 */
829 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
830
Marcel Holtmannf9207332015-03-14 19:27:55 -0700831 /* At the moment the index and unconfigured index events
832 * are enabled unconditionally. Setting them on each
833 * socket when binding keeps this functionality. They
834 * however might be cleared later and then sending of these
835 * events will be disabled, but that is then intentional.
836 */
837 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
838 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
839 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
840 }
Johan Hedberg801c1e82015-03-06 21:08:50 +0200841 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 }
843
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +0100844
Johan Hedberg03811012010-12-08 00:21:06 +0200845 hci_pi(sk)->channel = haddr.hci_channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 sk->sk_state = BT_BOUND;
847
848done:
849 release_sock(sk);
850 return err;
851}
852
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300853static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
854 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
856 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
857 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700858 struct hci_dev *hdev;
859 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 BT_DBG("sock %p sk %p", sock, sk);
862
Marcel Holtmann06f43cb2013-08-26 00:06:30 -0700863 if (peer)
864 return -EOPNOTSUPP;
865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 lock_sock(sk);
867
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700868 hdev = hci_pi(sk)->hdev;
869 if (!hdev) {
870 err = -EBADFD;
871 goto done;
872 }
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 *addr_len = sizeof(*haddr);
875 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100876 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700877 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700879done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -0700881 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300884static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
885 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 __u32 mask = hci_pi(sk)->cmsg_mask;
888
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700889 if (mask & HCI_CMSG_DIR) {
890 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300891 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
892 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700895 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +0100896#ifdef CONFIG_COMPAT
897 struct compat_timeval ctv;
898#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700899 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200900 void *data;
901 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700902
903 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200904
David S. Miller1da97f82007-09-12 14:10:58 +0200905 data = &tv;
906 len = sizeof(tv);
907#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -0800908 if (!COMPAT_USE_64BIT_TIME &&
909 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200910 ctv.tv_sec = tv.tv_sec;
911 ctv.tv_usec = tv.tv_usec;
912 data = &ctv;
913 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200914 }
David S. Miller1da97f82007-09-12 14:10:58 +0200915#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +0200916
917 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700918 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900920
Ying Xue1b784142015-03-02 15:37:48 +0800921static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
922 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
924 int noblock = flags & MSG_DONTWAIT;
925 struct sock *sk = sock->sk;
926 struct sk_buff *skb;
927 int copied, err;
928
929 BT_DBG("sock %p, sk %p", sock, sk);
930
931 if (flags & (MSG_OOB))
932 return -EOPNOTSUPP;
933
934 if (sk->sk_state == BT_CLOSED)
935 return 0;
936
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200937 skb = skb_recv_datagram(sk, flags, noblock, &err);
938 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return err;
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 copied = skb->len;
942 if (len < copied) {
943 msg->msg_flags |= MSG_TRUNC;
944 copied = len;
945 }
946
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300947 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -0500948 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Marcel Holtmann3a208622012-02-20 14:50:34 +0100950 switch (hci_pi(sk)->channel) {
951 case HCI_CHANNEL_RAW:
952 hci_sock_cmsg(sk, msg, skb);
953 break;
Marcel Holtmann23500182013-08-26 21:40:52 -0700954 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100955 case HCI_CHANNEL_MONITOR:
956 sock_recv_timestamp(msg, sk, skb);
957 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200958 default:
959 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
960 sock_recv_timestamp(msg, sk, skb);
961 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +0100962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964 skb_free_datagram(sk, skb);
965
966 return err ? : copied;
967}
968
Ying Xue1b784142015-03-02 15:37:48 +0800969static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
970 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
972 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +0200973 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct hci_dev *hdev;
975 struct sk_buff *skb;
976 int err;
977
978 BT_DBG("sock %p sk %p", sock, sk);
979
980 if (msg->msg_flags & MSG_OOB)
981 return -EOPNOTSUPP;
982
983 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
984 return -EINVAL;
985
986 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
987 return -EINVAL;
988
989 lock_sock(sk);
990
Johan Hedberg03811012010-12-08 00:21:06 +0200991 switch (hci_pi(sk)->channel) {
992 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -0700993 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +0200994 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100995 case HCI_CHANNEL_MONITOR:
996 err = -EOPNOTSUPP;
997 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +0200998 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +0200999 mutex_lock(&mgmt_chan_list_lock);
1000 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1001 if (chan)
Johan Hedberg6d785aa32015-03-06 21:08:51 +02001002 err = mgmt_control(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001003 else
1004 err = -EINVAL;
1005
1006 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001007 goto done;
1008 }
1009
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001010 hdev = hci_pi(sk)->hdev;
1011 if (!hdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 err = -EBADFD;
1013 goto done;
1014 }
1015
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001016 if (!test_bit(HCI_UP, &hdev->flags)) {
1017 err = -ENETDOWN;
1018 goto done;
1019 }
1020
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001021 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1022 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 goto done;
1024
Al Viro6ce8e9c2014-04-06 21:25:44 -04001025 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 err = -EFAULT;
1027 goto drop;
1028 }
1029
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001030 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001033 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1034 /* No permission check is needed for user channel
1035 * since that gets enforced when binding the socket.
1036 *
1037 * However check that the packet type is valid.
1038 */
1039 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1040 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1041 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1042 err = -EINVAL;
1043 goto drop;
1044 }
1045
1046 skb_queue_tail(&hdev->raw_q, skb);
1047 queue_work(hdev->workqueue, &hdev->tx_work);
1048 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001049 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 u16 ogf = hci_opcode_ogf(opcode);
1051 u16 ocf = hci_opcode_ocf(opcode);
1052
1053 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001054 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1055 &hci_sec_filter.ocf_mask[ogf])) &&
1056 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 err = -EPERM;
1058 goto drop;
1059 }
1060
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001061 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001063 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001065 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001066 * single-command requests.
1067 */
Eyal Birger6368c232015-03-01 14:58:26 +02001068 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02001069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001071 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 }
1073 } else {
1074 if (!capable(CAP_NET_RAW)) {
1075 err = -EPERM;
1076 goto drop;
1077 }
1078
1079 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001080 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 }
1082
1083 err = len;
1084
1085done:
1086 release_sock(sk);
1087 return err;
1088
1089drop:
1090 kfree_skb(skb);
1091 goto done;
1092}
1093
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001094static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1095 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
1097 struct hci_ufilter uf = { .opcode = 0 };
1098 struct sock *sk = sock->sk;
1099 int err = 0, opt = 0;
1100
1101 BT_DBG("sk %p, opt %d", sk, optname);
1102
1103 lock_sock(sk);
1104
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001105 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001106 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001107 goto done;
1108 }
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 switch (optname) {
1111 case HCI_DATA_DIR:
1112 if (get_user(opt, (int __user *)optval)) {
1113 err = -EFAULT;
1114 break;
1115 }
1116
1117 if (opt)
1118 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1119 else
1120 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1121 break;
1122
1123 case HCI_TIME_STAMP:
1124 if (get_user(opt, (int __user *)optval)) {
1125 err = -EFAULT;
1126 break;
1127 }
1128
1129 if (opt)
1130 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1131 else
1132 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1133 break;
1134
1135 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001136 {
1137 struct hci_filter *f = &hci_pi(sk)->filter;
1138
1139 uf.type_mask = f->type_mask;
1140 uf.opcode = f->opcode;
1141 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1142 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1143 }
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 len = min_t(unsigned int, len, sizeof(uf));
1146 if (copy_from_user(&uf, optval, len)) {
1147 err = -EFAULT;
1148 break;
1149 }
1150
1151 if (!capable(CAP_NET_RAW)) {
1152 uf.type_mask &= hci_sec_filter.type_mask;
1153 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1154 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1155 }
1156
1157 {
1158 struct hci_filter *f = &hci_pi(sk)->filter;
1159
1160 f->type_mask = uf.type_mask;
1161 f->opcode = uf.opcode;
1162 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1163 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1164 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001165 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 default:
1168 err = -ENOPROTOOPT;
1169 break;
1170 }
1171
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001172done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 release_sock(sk);
1174 return err;
1175}
1176
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001177static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1178 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
1180 struct hci_ufilter uf;
1181 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001182 int len, opt, err = 0;
1183
1184 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 if (get_user(len, optlen))
1187 return -EFAULT;
1188
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001189 lock_sock(sk);
1190
1191 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001192 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001193 goto done;
1194 }
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 switch (optname) {
1197 case HCI_DATA_DIR:
1198 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1199 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001200 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 opt = 0;
1202
1203 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001204 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 break;
1206
1207 case HCI_TIME_STAMP:
1208 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1209 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001210 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 opt = 0;
1212
1213 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001214 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 break;
1216
1217 case HCI_FILTER:
1218 {
1219 struct hci_filter *f = &hci_pi(sk)->filter;
1220
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001221 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 uf.type_mask = f->type_mask;
1223 uf.opcode = f->opcode;
1224 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1225 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1226 }
1227
1228 len = min_t(unsigned int, len, sizeof(uf));
1229 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001230 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 break;
1232
1233 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001234 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 break;
1236 }
1237
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001238done:
1239 release_sock(sk);
1240 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
1242
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001243static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 .family = PF_BLUETOOTH,
1245 .owner = THIS_MODULE,
1246 .release = hci_sock_release,
1247 .bind = hci_sock_bind,
1248 .getname = hci_sock_getname,
1249 .sendmsg = hci_sock_sendmsg,
1250 .recvmsg = hci_sock_recvmsg,
1251 .ioctl = hci_sock_ioctl,
1252 .poll = datagram_poll,
1253 .listen = sock_no_listen,
1254 .shutdown = sock_no_shutdown,
1255 .setsockopt = hci_sock_setsockopt,
1256 .getsockopt = hci_sock_getsockopt,
1257 .connect = sock_no_connect,
1258 .socketpair = sock_no_socketpair,
1259 .accept = sock_no_accept,
1260 .mmap = sock_no_mmap
1261};
1262
1263static struct proto hci_sk_proto = {
1264 .name = "HCI",
1265 .owner = THIS_MODULE,
1266 .obj_size = sizeof(struct hci_pinfo)
1267};
1268
Eric Paris3f378b62009-11-05 22:18:14 -08001269static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1270 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271{
1272 struct sock *sk;
1273
1274 BT_DBG("sock %p", sock);
1275
1276 if (sock->type != SOCK_RAW)
1277 return -ESOCKTNOSUPPORT;
1278
1279 sock->ops = &hci_sock_ops;
1280
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001281 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (!sk)
1283 return -ENOMEM;
1284
1285 sock_init_data(sock, sk);
1286
1287 sock_reset_flag(sk, SOCK_ZAPPED);
1288
1289 sk->sk_protocol = protocol;
1290
1291 sock->state = SS_UNCONNECTED;
1292 sk->sk_state = BT_OPEN;
1293
1294 bt_sock_link(&hci_sk_list, sk);
1295 return 0;
1296}
1297
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00001298static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 .family = PF_BLUETOOTH,
1300 .owner = THIS_MODULE,
1301 .create = hci_sock_create,
1302};
1303
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304int __init hci_sock_init(void)
1305{
1306 int err;
1307
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08001308 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 err = proto_register(&hci_sk_proto, 0);
1311 if (err < 0)
1312 return err;
1313
1314 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001315 if (err < 0) {
1316 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001318 }
1319
Al Virob0316612013-04-04 19:14:33 -04001320 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001321 if (err < 0) {
1322 BT_ERR("Failed to create HCI proc file");
1323 bt_sock_unregister(BTPROTO_HCI);
1324 goto error;
1325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 BT_INFO("HCI socket layer initialized");
1328
1329 return 0;
1330
1331error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 proto_unregister(&hci_sk_proto);
1333 return err;
1334}
1335
Anand Gadiyarb7440a142011-02-22 12:43:09 +05301336void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09001338 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01001339 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341}