blob: 133d7ea063fb134ea4712cb55106704796d26345 [file] [log] [blame]
Thomas Gleixner97fb5e82019-05-29 07:17:58 -07001// SPDX-License-Identifier: GPL-2.0-only
Jukka Rissanen18722c22013-12-11 17:05:37 +02002/*
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03003 Copyright (c) 2013-2014 Intel Corp.
Jukka Rissanen18722c22013-12-11 17:05:37 +02004
Jukka Rissanen18722c22013-12-11 17:05:37 +02005*/
6
Jukka Rissanen18722c22013-12-11 17:05:37 +02007#include <linux/if_arp.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
Jukka Rissanen5547e482014-06-18 16:37:09 +030010#include <linux/module.h>
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030011#include <linux/debugfs.h>
Jukka Rissanen18722c22013-12-11 17:05:37 +020012
13#include <net/ipv6.h>
14#include <net/ip6_route.h>
15#include <net/addrconf.h>
Luiz Augusto von Dentz814f1b22017-04-11 22:21:02 +030016#include <net/pkt_sched.h>
Jukka Rissanen18722c22013-12-11 17:05:37 +020017
Jukka Rissanen18722c22013-12-11 17:05:37 +020018#include <net/bluetooth/bluetooth.h>
19#include <net/bluetooth/hci_core.h>
20#include <net/bluetooth/l2cap.h>
21
Alexander Aringcefc8c82014-03-05 14:29:05 +010022#include <net/6lowpan.h> /* for the compression support */
Jukka Rissanen18722c22013-12-11 17:05:37 +020023
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030024#define VERSION "0.1"
25
Jukka Rissanen7b2ed602015-01-08 17:00:55 +020026static struct dentry *lowpan_enable_debugfs;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030027static struct dentry *lowpan_control_debugfs;
28
Jukka Rissanen18722c22013-12-11 17:05:37 +020029#define IFACE_NAME_TEMPLATE "bt%d"
Jukka Rissanen18722c22013-12-11 17:05:37 +020030
31struct skb_cb {
32 struct in6_addr addr;
Jukka Rissanen39e90c72014-09-08 12:11:45 +030033 struct in6_addr gw;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030034 struct l2cap_chan *chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +020035};
36#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
37
38/* The devices list contains those devices that we are acting
39 * as a proxy. The BT 6LoWPAN device is a virtual device that
40 * connects to the Bluetooth LE device. The real connection to
41 * BT device is done via l2cap layer. There exists one
42 * virtual device / one BT 6LoWPAN network (=hciX device).
43 * The list contains struct lowpan_dev elements.
44 */
45static LIST_HEAD(bt_6lowpan_devices);
Jukka Rissanen90305822014-10-28 17:16:47 +020046static DEFINE_SPINLOCK(devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +020047
Jukka Rissanen7b2ed602015-01-08 17:00:55 +020048static bool enable_6lowpan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030049
50/* We are listening incoming connections via this channel
51 */
52static struct l2cap_chan *listen_chan;
Lihong Kouf9c70bd2020-06-23 20:28:41 +080053static DEFINE_MUTEX(set_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030054
Jukka Rissanen18722c22013-12-11 17:05:37 +020055struct lowpan_peer {
56 struct list_head list;
Jukka Rissanen90305822014-10-28 17:16:47 +020057 struct rcu_head rcu;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +030058 struct l2cap_chan *chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +020059
60 /* peer addresses in various formats */
Luiz Augusto von Dentzfa09ae62017-03-12 10:19:37 +020061 unsigned char lladdr[ETH_ALEN];
Jukka Rissanen18722c22013-12-11 17:05:37 +020062 struct in6_addr peer_addr;
63};
64
Alexander Aring2e4d60c2016-04-11 11:04:18 +020065struct lowpan_btle_dev {
Jukka Rissanen18722c22013-12-11 17:05:37 +020066 struct list_head list;
67
68 struct hci_dev *hdev;
69 struct net_device *netdev;
70 struct list_head peers;
71 atomic_t peer_count; /* number of items in peers list */
72
73 struct work_struct delete_netdev;
74 struct delayed_work notify_peers;
75};
76
Alexander Aring2e4d60c2016-04-11 11:04:18 +020077static inline struct lowpan_btle_dev *
78lowpan_btle_dev(const struct net_device *netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +020079{
Alexander Aring2e4d60c2016-04-11 11:04:18 +020080 return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
Jukka Rissanen18722c22013-12-11 17:05:37 +020081}
82
Alexander Aring2e4d60c2016-04-11 11:04:18 +020083static inline void peer_add(struct lowpan_btle_dev *dev,
84 struct lowpan_peer *peer)
Jukka Rissanen18722c22013-12-11 17:05:37 +020085{
Jukka Rissanen90305822014-10-28 17:16:47 +020086 list_add_rcu(&peer->list, &dev->peers);
Jukka Rissanen18722c22013-12-11 17:05:37 +020087 atomic_inc(&dev->peer_count);
88}
89
Alexander Aring2e4d60c2016-04-11 11:04:18 +020090static inline bool peer_del(struct lowpan_btle_dev *dev,
91 struct lowpan_peer *peer)
Jukka Rissanen18722c22013-12-11 17:05:37 +020092{
Jukka Rissanen90305822014-10-28 17:16:47 +020093 list_del_rcu(&peer->list);
Johan Hedberg4e790222014-11-11 14:16:29 +020094 kfree_rcu(peer, rcu);
Jukka Rissanen18722c22013-12-11 17:05:37 +020095
Jukka Rissanen18d93c12014-06-18 16:37:10 +030096 module_put(THIS_MODULE);
97
Jukka Rissanen18722c22013-12-11 17:05:37 +020098 if (atomic_dec_and_test(&dev->peer_count)) {
99 BT_DBG("last peer");
100 return true;
101 }
102
103 return false;
104}
105
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200106static inline struct lowpan_peer *
107__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300108{
Jukka Rissanen90305822014-10-28 17:16:47 +0200109 struct lowpan_peer *peer;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300110
Jukka Rissanen90305822014-10-28 17:16:47 +0200111 list_for_each_entry_rcu(peer, &dev->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300112 if (peer->chan == chan)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200113 return peer;
114 }
115
116 return NULL;
117}
118
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200119static inline struct lowpan_peer *
120__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200121{
Jukka Rissanen90305822014-10-28 17:16:47 +0200122 struct lowpan_peer *peer;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200123
Jukka Rissanen90305822014-10-28 17:16:47 +0200124 list_for_each_entry_rcu(peer, &dev->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300125 if (peer->chan->conn == conn)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200126 return peer;
127 }
128
129 return NULL;
130}
131
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200132static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300133 struct in6_addr *daddr,
134 struct sk_buff *skb)
135{
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300136 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
137 int count = atomic_read(&dev->peer_count);
Nicolas Dichtel9b1c1ef2019-06-24 16:01:08 +0200138 const struct in6_addr *nexthop;
139 struct lowpan_peer *peer;
Josua Mayer56363762019-07-06 17:54:47 +0200140 struct neighbour *neigh;
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300141
142 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
143
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300144 if (!rt) {
Josua Mayerb188b032019-07-06 17:54:46 +0200145 if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
146 /* There is neither route nor gateway,
147 * probably the destination is a direct peer.
148 */
149 nexthop = daddr;
150 } else {
151 /* There is a known gateway
152 */
153 nexthop = &lowpan_cb(skb)->gw;
154 }
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300155 } else {
Martin KaFai Lau2647a9b2015-05-22 20:55:58 -0700156 nexthop = rt6_nexthop(rt, daddr);
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300157
158 /* We need to remember the address because it is needed
159 * by bt_xmit() when sending the packet. In bt_xmit(), the
160 * destination routing info is not set.
161 */
162 memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
163 }
164
165 BT_DBG("gw %pI6c", nexthop);
166
Jukka Rissanen90305822014-10-28 17:16:47 +0200167 rcu_read_lock();
168
169 list_for_each_entry_rcu(peer, &dev->peers, list) {
Kai Ye658d5d82021-06-03 15:40:58 +0800170 BT_DBG("dst addr %pMR dst type %u ip %pI6c",
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300171 &peer->chan->dst, peer->chan->dst_type,
172 &peer->peer_addr);
173
Jukka Rissanen90305822014-10-28 17:16:47 +0200174 if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
175 rcu_read_unlock();
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300176 return peer;
Jukka Rissanen90305822014-10-28 17:16:47 +0200177 }
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300178 }
179
Meng Yu149b3f12021-04-01 14:50:39 +0800180 /* use the neighbour cache for matching addresses assigned by SLAAC */
Josua Mayer56363762019-07-06 17:54:47 +0200181 neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
182 if (neigh) {
183 list_for_each_entry_rcu(peer, &dev->peers, list) {
184 if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
185 neigh_release(neigh);
186 rcu_read_unlock();
187 return peer;
188 }
189 }
190 neigh_release(neigh);
191 }
192
Jukka Rissanen90305822014-10-28 17:16:47 +0200193 rcu_read_unlock();
194
Jukka Rissanen39e90c72014-09-08 12:11:45 +0300195 return NULL;
196}
197
Jukka Rissanen18722c22013-12-11 17:05:37 +0200198static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
199{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200200 struct lowpan_btle_dev *entry;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200201 struct lowpan_peer *peer = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200202
Jukka Rissanen90305822014-10-28 17:16:47 +0200203 rcu_read_lock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200204
Jukka Rissanen90305822014-10-28 17:16:47 +0200205 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
206 peer = __peer_lookup_conn(entry, conn);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200207 if (peer)
208 break;
209 }
210
Jukka Rissanen90305822014-10-28 17:16:47 +0200211 rcu_read_unlock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200212
213 return peer;
214}
215
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200216static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200217{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200218 struct lowpan_btle_dev *entry;
219 struct lowpan_btle_dev *dev = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200220
Jukka Rissanen90305822014-10-28 17:16:47 +0200221 rcu_read_lock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200222
Jukka Rissanen90305822014-10-28 17:16:47 +0200223 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
Jukka Rissanen18722c22013-12-11 17:05:37 +0200224 if (conn->hcon->hdev == entry->hdev) {
225 dev = entry;
226 break;
227 }
228 }
229
Jukka Rissanen90305822014-10-28 17:16:47 +0200230 rcu_read_unlock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200231
232 return dev;
233}
234
Jukka Rissanen18722c22013-12-11 17:05:37 +0200235static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
236{
237 struct sk_buff *skb_cp;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200238
239 skb_cp = skb_copy(skb, GFP_ATOMIC);
240 if (!skb_cp)
Martin Townsendf8b36172014-10-23 15:40:53 +0100241 return NET_RX_DROP;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200242
Alexander Aring324e7862015-10-27 08:35:24 +0100243 return netif_rx_ni(skb_cp);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200244}
245
Martin Townsend01141232014-10-23 15:40:56 +0100246static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
Luiz Augusto von Dentz27ce68a2017-04-03 17:48:55 +0300247 struct lowpan_peer *peer)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200248{
Patrik Flyktc259d142017-03-12 10:19:33 +0200249 const u8 *saddr;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200250
Luiz Augusto von Dentzfa09ae62017-03-12 10:19:37 +0200251 saddr = peer->lladdr;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200252
Luiz Augusto von Dentzfa09ae62017-03-12 10:19:37 +0200253 return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200254}
255
256static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
Luiz Augusto von Dentz27ce68a2017-04-03 17:48:55 +0300257 struct lowpan_peer *peer)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200258{
259 struct sk_buff *local_skb;
260 int ret;
261
262 if (!netif_running(dev))
263 goto drop;
264
Alexander Aringcefdb802015-10-13 13:42:55 +0200265 if (dev->type != ARPHRD_6LOWPAN || !skb->len)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200266 goto drop;
267
Alexander Aringcefdb802015-10-13 13:42:55 +0200268 skb_reset_network_header(skb);
269
Martin Townsend11e3ff72014-10-13 11:00:56 +0100270 skb = skb_share_check(skb, GFP_ATOMIC);
271 if (!skb)
272 goto drop;
273
Jukka Rissanen18722c22013-12-11 17:05:37 +0200274 /* check that it's our buffer */
Alexander Aringcefdb802015-10-13 13:42:55 +0200275 if (lowpan_is_ipv6(*skb_network_header(skb))) {
Lukasz Duda87f5fed2016-01-13 16:57:48 +0100276 /* Pull off the 1-byte of 6lowpan header. */
277 skb_pull(skb, 1);
278
Jukka Rissanen18722c22013-12-11 17:05:37 +0200279 /* Copy the packet so that the IPv6 header is
280 * properly aligned.
281 */
282 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
283 skb_tailroom(skb), GFP_ATOMIC);
284 if (!local_skb)
285 goto drop;
286
287 local_skb->protocol = htons(ETH_P_IPV6);
288 local_skb->pkt_type = PACKET_HOST;
Glenn Ruben Bakke4c58f322016-01-13 16:41:42 +0100289 local_skb->dev = dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200290
Jukka Rissanen18722c22013-12-11 17:05:37 +0200291 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
292
293 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
294 kfree_skb(local_skb);
295 goto drop;
296 }
297
298 dev->stats.rx_bytes += skb->len;
299 dev->stats.rx_packets++;
300
Martin Townsend3c400b82014-10-23 15:40:55 +0100301 consume_skb(local_skb);
302 consume_skb(skb);
Alexander Aringcefdb802015-10-13 13:42:55 +0200303 } else if (lowpan_is_iphc(*skb_network_header(skb))) {
304 local_skb = skb_clone(skb, GFP_ATOMIC);
305 if (!local_skb)
306 goto drop;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200307
Glenn Ruben Bakke4c58f322016-01-13 16:41:42 +0100308 local_skb->dev = dev;
309
Luiz Augusto von Dentz27ce68a2017-04-03 17:48:55 +0300310 ret = iphc_decompress(local_skb, dev, peer);
Alexander Aringcefdb802015-10-13 13:42:55 +0200311 if (ret < 0) {
Luiz Augusto von Dentzda75fdc2017-04-03 17:48:56 +0300312 BT_DBG("iphc_decompress failed: %d", ret);
Alexander Aringcefdb802015-10-13 13:42:55 +0200313 kfree_skb(local_skb);
314 goto drop;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200315 }
Alexander Aringcefdb802015-10-13 13:42:55 +0200316
317 local_skb->protocol = htons(ETH_P_IPV6);
318 local_skb->pkt_type = PACKET_HOST;
Alexander Aringcefdb802015-10-13 13:42:55 +0200319
320 if (give_skb_to_upper(local_skb, dev)
321 != NET_RX_SUCCESS) {
322 kfree_skb(local_skb);
323 goto drop;
324 }
325
326 dev->stats.rx_bytes += skb->len;
327 dev->stats.rx_packets++;
328
329 consume_skb(local_skb);
330 consume_skb(skb);
331 } else {
Luiz Augusto von Dentzda75fdc2017-04-03 17:48:56 +0300332 BT_DBG("unknown packet type");
Alexander Aringcefdb802015-10-13 13:42:55 +0200333 goto drop;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200334 }
335
336 return NET_RX_SUCCESS;
337
338drop:
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300339 dev->stats.rx_dropped++;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200340 return NET_RX_DROP;
341}
342
343/* Packet from BT LE device */
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300344static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200345{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200346 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200347 struct lowpan_peer *peer;
348 int err;
349
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300350 peer = lookup_peer(chan->conn);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200351 if (!peer)
352 return -ENOENT;
353
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300354 dev = lookup_dev(chan->conn);
Johan Hedberg30d3db42013-12-12 09:53:21 +0200355 if (!dev || !dev->netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200356 return -ENOENT;
357
Luiz Augusto von Dentz27ce68a2017-04-03 17:48:55 +0300358 err = recv_pkt(skb, dev->netdev, peer);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300359 if (err) {
360 BT_DBG("recv pkt %d", err);
361 err = -EAGAIN;
362 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200363
364 return err;
365}
366
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300367static int setup_header(struct sk_buff *skb, struct net_device *netdev,
368 bdaddr_t *peer_addr, u8 *peer_addr_type)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200369{
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300370 struct in6_addr ipv6_daddr;
Glenn Ruben Bakke55441072016-04-22 18:06:11 +0200371 struct ipv6hdr *hdr;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200372 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200373 struct lowpan_peer *peer;
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200374 u8 *daddr;
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300375 int err, status = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200376
Glenn Ruben Bakke55441072016-04-22 18:06:11 +0200377 hdr = ipv6_hdr(skb);
378
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200379 dev = lowpan_btle_dev(netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200380
Glenn Ruben Bakke55441072016-04-22 18:06:11 +0200381 memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300382
383 if (ipv6_addr_is_multicast(&ipv6_daddr)) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300384 lowpan_cb(skb)->chan = NULL;
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200385 daddr = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200386 } else {
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200387 BT_DBG("dest IP %pI6c", &ipv6_daddr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200388
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200389 /* The packet might be sent to 6lowpan interface
390 * because of routing (either via default route
391 * or user set route) so get peer according to
392 * the destination address.
Jukka Rissanen18722c22013-12-11 17:05:37 +0200393 */
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200394 peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200395 if (!peer) {
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200396 BT_DBG("no such peer");
397 return -ENOENT;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200398 }
399
Luiz Augusto von Dentzfa09ae62017-03-12 10:19:37 +0200400 daddr = peer->lladdr;
Colin Ian Kingfa0eaf82017-03-28 13:11:29 +0100401 *peer_addr = peer->chan->dst;
Luiz Augusto von Dentz9dae2e02017-03-12 10:19:38 +0200402 *peer_addr_type = peer->chan->dst_type;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300403 lowpan_cb(skb)->chan = peer->chan;
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300404
405 status = 1;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200406 }
407
Alexander Aringa6f77382015-10-13 13:42:57 +0200408 lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200409
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300410 err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
411 if (err < 0)
412 return err;
413
414 return status;
415}
416
417static int header_create(struct sk_buff *skb, struct net_device *netdev,
418 unsigned short type, const void *_daddr,
419 const void *_saddr, unsigned int len)
420{
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300421 if (type != ETH_P_IPV6)
422 return -EINVAL;
423
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300424 return 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200425}
426
427/* Packet to BT LE device */
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300428static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300429 struct net_device *netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200430{
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300431 struct msghdr msg;
432 struct kvec iv;
433 int err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200434
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300435 /* Remember the skb so that we can send EAGAIN to the caller if
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300436 * we run out of credits.
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300437 */
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300438 chan->data = skb;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300439
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300440 iv.iov_base = skb->data;
441 iv.iov_len = skb->len;
442
Al Viroc0371da2014-11-24 10:42:55 -0500443 memset(&msg, 0, sizeof(msg));
David Howellsaa563d72018-10-20 00:57:56 +0100444 iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
Al Viroc0371da2014-11-24 10:42:55 -0500445
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300446 err = l2cap_chan_send(chan, &msg, skb->len);
447 if (err > 0) {
448 netdev->stats.tx_bytes += err;
449 netdev->stats.tx_packets++;
450 return 0;
451 }
452
Luiz Augusto von Dentze1008f92017-04-11 22:20:59 +0300453 if (err < 0)
454 netdev->stats.tx_errors++;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300455
456 return err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200457}
458
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300459static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200460{
461 struct sk_buff *local_skb;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200462 struct lowpan_btle_dev *entry;
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300463 int err = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200464
Jukka Rissanen90305822014-10-28 17:16:47 +0200465 rcu_read_lock();
Jukka Rissanen18722c22013-12-11 17:05:37 +0200466
Jukka Rissanen90305822014-10-28 17:16:47 +0200467 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
468 struct lowpan_peer *pentry;
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200469 struct lowpan_btle_dev *dev;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200470
471 if (entry->netdev != netdev)
472 continue;
473
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200474 dev = lowpan_btle_dev(entry->netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200475
Jukka Rissanen90305822014-10-28 17:16:47 +0200476 list_for_each_entry_rcu(pentry, &dev->peers, list) {
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300477 int ret;
478
Jukka Rissanen18722c22013-12-11 17:05:37 +0200479 local_skb = skb_clone(skb, GFP_ATOMIC);
480
Kai Ye658d5d82021-06-03 15:40:58 +0800481 BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300482 netdev->name,
483 &pentry->chan->dst, pentry->chan->dst_type,
484 &pentry->peer_addr, pentry->chan);
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300485 ret = send_pkt(pentry->chan, local_skb, netdev);
486 if (ret < 0)
487 err = ret;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200488
489 kfree_skb(local_skb);
490 }
491 }
492
Jukka Rissanen90305822014-10-28 17:16:47 +0200493 rcu_read_unlock();
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300494
495 return err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200496}
497
498static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
499{
500 int err = 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200501 bdaddr_t addr;
502 u8 addr_type;
503
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300504 /* We must take a copy of the skb before we modify/replace the ipv6
505 * header as the header could be used elsewhere
506 */
Alexander Aringb0c42cd2014-10-08 10:24:53 +0200507 skb = skb_unshare(skb, GFP_ATOMIC);
508 if (!skb)
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300509 return NET_XMIT_DROP;
510
511 /* Return values from setup_header()
512 * <0 - error, packet is dropped
513 * 0 - this is a multicast packet
514 * 1 - this is unicast packet
515 */
516 err = setup_header(skb, netdev, &addr, &addr_type);
517 if (err < 0) {
518 kfree_skb(skb);
519 return NET_XMIT_DROP;
520 }
521
522 if (err) {
523 if (lowpan_cb(skb)->chan) {
Kai Ye658d5d82021-06-03 15:40:58 +0800524 BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300525 netdev->name, &addr, addr_type,
526 &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
Jukka Rissanend7b6b0a2014-10-01 15:59:14 +0300527 err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300528 } else {
529 err = -ENOENT;
530 }
531 } else {
532 /* We need to send the packet to every device behind this
533 * interface.
Jukka Rissanen18722c22013-12-11 17:05:37 +0200534 */
Jukka Rissanen9c238ca2014-10-01 15:59:15 +0300535 err = send_mcast_pkt(skb, netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200536 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200537
Jukka Rissanenfc125182014-10-01 11:30:26 +0300538 dev_kfree_skb(skb);
539
Jukka Rissanen18722c22013-12-11 17:05:37 +0200540 if (err)
541 BT_DBG("ERROR: xmit failed (%d)", err);
542
Jukka Rissanen36b3dd22014-09-29 16:37:25 +0300543 return err < 0 ? NET_XMIT_DROP : err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200544}
545
Cong Wang1a33e102020-05-02 22:22:19 -0700546static int bt_dev_init(struct net_device *dev)
547{
548 netdev_lockdep_set_classes(dev);
549
550 return 0;
551}
552
Jukka Rissanen18722c22013-12-11 17:05:37 +0200553static const struct net_device_ops netdev_ops = {
Cong Wang1a33e102020-05-02 22:22:19 -0700554 .ndo_init = bt_dev_init,
Jukka Rissanen18722c22013-12-11 17:05:37 +0200555 .ndo_start_xmit = bt_xmit,
556};
557
Nishka Dasgupta569428d2019-08-15 11:22:55 +0530558static const struct header_ops header_ops = {
Jukka Rissanen18722c22013-12-11 17:05:37 +0200559 .create = header_create,
560};
561
562static void netdev_setup(struct net_device *dev)
563{
Jukka Rissanen18722c22013-12-11 17:05:37 +0200564 dev->hard_header_len = 0;
565 dev->needed_tailroom = 0;
Patrik Flykt25869522017-04-11 22:21:03 +0300566 dev->flags = IFF_RUNNING | IFF_MULTICAST;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200567 dev->watchdog_timeo = 0;
Luiz Augusto von Dentz814f1b22017-04-11 22:21:02 +0300568 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200569
570 dev->netdev_ops = &netdev_ops;
571 dev->header_ops = &header_ops;
David S. Millercf124db2017-05-08 12:52:56 -0400572 dev->needs_free_netdev = true;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200573}
574
575static struct device_type bt_type = {
576 .name = "bluetooth",
577};
578
Jukka Rissanen18722c22013-12-11 17:05:37 +0200579static void ifup(struct net_device *netdev)
580{
581 int err;
582
583 rtnl_lock();
Petr Machata00f54e62018-12-06 17:05:36 +0000584 err = dev_open(netdev, NULL);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200585 if (err < 0)
586 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
587 rtnl_unlock();
588}
589
Jukka Rissanen7f118252014-06-18 16:37:11 +0300590static void ifdown(struct net_device *netdev)
591{
Jukka Rissanen7f118252014-06-18 16:37:11 +0300592 rtnl_lock();
stephen hemmingerddee3102017-07-18 15:59:25 -0700593 dev_close(netdev);
Jukka Rissanen7f118252014-06-18 16:37:11 +0300594 rtnl_unlock();
595}
596
Jukka Rissanen18722c22013-12-11 17:05:37 +0200597static void do_notify_peers(struct work_struct *work)
598{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200599 struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
600 notify_peers.work);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200601
602 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
603}
604
605static bool is_bt_6lowpan(struct hci_conn *hcon)
606{
607 if (hcon->type != LE_LINK)
608 return false;
609
Jukka Rissanen7b2ed602015-01-08 17:00:55 +0200610 if (!enable_6lowpan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300611 return false;
612
613 return true;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200614}
615
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300616static struct l2cap_chan *chan_create(void)
617{
618 struct l2cap_chan *chan;
619
620 chan = l2cap_chan_create();
621 if (!chan)
622 return NULL;
623
624 l2cap_chan_set_defaults(chan);
625
626 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
627 chan->mode = L2CAP_MODE_LE_FLOWCTL;
Johan Hedberg301de2c2015-10-06 13:03:19 +0300628 chan->imtu = 1280;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300629
630 return chan;
631}
632
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300633static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
Michael Scottd2891c42017-03-28 23:10:54 -0700634 struct lowpan_btle_dev *dev,
635 bool new_netdev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200636{
637 struct lowpan_peer *peer;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200638
639 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
640 if (!peer)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300641 return NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200642
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300643 peer->chan = chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200644 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
645
Luiz Augusto von Dentzfa09ae62017-03-12 10:19:37 +0200646 baswap((void *)peer->lladdr, &chan->dst);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200647
Luiz Augusto von Dentzfa09ae62017-03-12 10:19:37 +0200648 lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200649
Jukka Rissanen90305822014-10-28 17:16:47 +0200650 spin_lock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200651 INIT_LIST_HEAD(&peer->list);
652 peer_add(dev, peer);
Jukka Rissanen90305822014-10-28 17:16:47 +0200653 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200654
655 /* Notifying peers about us needs to be done without locks held */
Michael Scottd2891c42017-03-28 23:10:54 -0700656 if (new_netdev)
657 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200658 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
659
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300660 return peer->chan;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200661}
662
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200663static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200664{
Jukka Rissanen18722c22013-12-11 17:05:37 +0200665 struct net_device *netdev;
Jakub Kicinskia1916d32021-10-22 16:18:34 -0700666 bdaddr_t addr;
Kai Yec469c9c2021-04-02 18:46:29 +0800667 int err;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200668
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200669 netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
Alexander Aringb72f6f52015-08-11 21:44:08 +0200670 IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
671 netdev_setup);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200672 if (!netdev)
673 return -ENOMEM;
674
Patrik Flyktc259d142017-03-12 10:19:33 +0200675 netdev->addr_assign_type = NET_ADDR_PERM;
Jakub Kicinskia1916d32021-10-22 16:18:34 -0700676 baswap(&addr, &chan->src);
677 __dev_addr_set(netdev, &addr, sizeof(addr));
Jukka Rissanen18722c22013-12-11 17:05:37 +0200678
679 netdev->netdev_ops = &netdev_ops;
Glenn Ruben Bakkefc842422015-06-17 07:32:25 -0700680 SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200681 SET_NETDEV_DEVTYPE(netdev, &bt_type);
682
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200683 *dev = lowpan_btle_dev(netdev);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300684 (*dev)->netdev = netdev;
685 (*dev)->hdev = chan->conn->hcon->hdev;
686 INIT_LIST_HEAD(&(*dev)->peers);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200687
Jukka Rissanen90305822014-10-28 17:16:47 +0200688 spin_lock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300689 INIT_LIST_HEAD(&(*dev)->list);
Jukka Rissanen90305822014-10-28 17:16:47 +0200690 list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
691 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200692
Alexander Aring00f59312015-12-09 22:46:29 +0100693 err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
Alexander Aring5857d1d2015-07-30 09:40:53 +0200694 if (err < 0) {
695 BT_INFO("register_netdev failed %d", err);
696 spin_lock(&devices_lock);
697 list_del_rcu(&(*dev)->list);
698 spin_unlock(&devices_lock);
699 free_netdev(netdev);
700 goto out;
701 }
702
703 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
704 netdev->ifindex, &chan->dst, chan->dst_type,
705 &chan->src, chan->src_type);
706 set_bit(__LINK_STATE_PRESENT, &netdev->state);
707
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300708 return 0;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200709
710out:
711 return err;
712}
713
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300714static inline void chan_ready_cb(struct l2cap_chan *chan)
715{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200716 struct lowpan_btle_dev *dev;
Michael Scottd2891c42017-03-28 23:10:54 -0700717 bool new_netdev = false;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300718
719 dev = lookup_dev(chan->conn);
720
721 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
722
723 if (!dev) {
724 if (setup_netdev(chan, &dev) < 0) {
725 l2cap_chan_del(chan, -ENOENT);
726 return;
727 }
Michael Scottd2891c42017-03-28 23:10:54 -0700728 new_netdev = true;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300729 }
730
Jukka Rissanen18d93c12014-06-18 16:37:10 +0300731 if (!try_module_get(THIS_MODULE))
732 return;
733
Michael Scottd2891c42017-03-28 23:10:54 -0700734 add_peer_chan(chan, dev, new_netdev);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300735 ifup(dev->netdev);
736}
737
Johan Hedberg2b293492014-08-07 10:03:32 +0300738static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300739{
Johan Hedberg2b293492014-08-07 10:03:32 +0300740 struct l2cap_chan *chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300741
Johan Hedberg630ef792015-10-06 13:03:22 +0300742 chan = chan_create();
743 if (!chan)
744 return NULL;
745
Johan Hedberg2b293492014-08-07 10:03:32 +0300746 chan->ops = pchan->ops;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300747
748 BT_DBG("chan %p pchan %p", chan, pchan);
749
Johan Hedberg2b293492014-08-07 10:03:32 +0300750 return chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300751}
752
Jukka Rissanen18722c22013-12-11 17:05:37 +0200753static void delete_netdev(struct work_struct *work)
754{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200755 struct lowpan_btle_dev *entry = container_of(work,
756 struct lowpan_btle_dev,
757 delete_netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200758
Alexander Aring00f59312015-12-09 22:46:29 +0100759 lowpan_unregister_netdev(entry->netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200760
Glenn Ruben Bakke2ad88fb2015-06-17 07:32:26 -0700761 /* The entry pointer is deleted by the netdev destructor. */
Jukka Rissanen18722c22013-12-11 17:05:37 +0200762}
763
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300764static void chan_close_cb(struct l2cap_chan *chan)
Jukka Rissanen18722c22013-12-11 17:05:37 +0200765{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200766 struct lowpan_btle_dev *entry;
767 struct lowpan_btle_dev *dev = NULL;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200768 struct lowpan_peer *peer;
769 int err = -ENOENT;
Glenn Ruben Bakkef63666d22015-06-17 07:32:24 -0700770 bool last = false, remove = true;
Jukka Rissanen18722c22013-12-11 17:05:37 +0200771
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300772 BT_DBG("chan %p conn %p", chan, chan->conn);
773
774 if (chan->conn && chan->conn->hcon) {
775 if (!is_bt_6lowpan(chan->conn->hcon))
776 return;
777
778 /* If conn is set, then the netdev is also there and we should
779 * not remove it.
780 */
Glenn Ruben Bakkef63666d22015-06-17 07:32:24 -0700781 remove = false;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300782 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200783
Jukka Rissanen90305822014-10-28 17:16:47 +0200784 spin_lock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200785
Jukka Rissanen90305822014-10-28 17:16:47 +0200786 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200787 dev = lowpan_btle_dev(entry->netdev);
Jukka Rissanen90305822014-10-28 17:16:47 +0200788 peer = __peer_lookup_chan(dev, chan);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200789 if (peer) {
790 last = peer_del(dev, peer);
791 err = 0;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300792
793 BT_DBG("dev %p removing %speer %p", dev,
794 last ? "last " : "1 ", peer);
Kai Ye658d5d82021-06-03 15:40:58 +0800795 BT_DBG("chan %p orig refcnt %u", chan,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100796 kref_read(&chan->kref));
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300797
798 l2cap_chan_put(chan);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200799 break;
800 }
801 }
802
803 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
Jukka Rissanen90305822014-10-28 17:16:47 +0200804 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200805
806 cancel_delayed_work_sync(&dev->notify_peers);
807
Jukka Rissanen7f118252014-06-18 16:37:11 +0300808 ifdown(dev->netdev);
809
Glenn Ruben Bakkef63666d22015-06-17 07:32:24 -0700810 if (remove) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300811 INIT_WORK(&entry->delete_netdev, delete_netdev);
812 schedule_work(&entry->delete_netdev);
813 }
Jukka Rissanen18722c22013-12-11 17:05:37 +0200814 } else {
Jukka Rissanen90305822014-10-28 17:16:47 +0200815 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +0200816 }
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300817}
818
819static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
820{
821 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
822 state_to_string(state), err);
823}
824
825static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
826 unsigned long hdr_len,
827 unsigned long len, int nb)
828{
829 /* Note that we must allocate using GFP_ATOMIC here as
830 * this function is called originally from netdev hard xmit
831 * function in atomic context.
832 */
833 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
834}
835
836static void chan_suspend_cb(struct l2cap_chan *chan)
837{
Luiz Augusto von Dentzf183e522017-04-11 22:21:00 +0300838 struct lowpan_btle_dev *dev;
839
Michael Scott6dea44f2017-03-28 23:10:18 -0700840 BT_DBG("chan %p suspend", chan);
Luiz Augusto von Dentzf183e522017-04-11 22:21:00 +0300841
842 dev = lookup_dev(chan->conn);
843 if (!dev || !dev->netdev)
844 return;
845
846 netif_stop_queue(dev->netdev);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300847}
848
849static void chan_resume_cb(struct l2cap_chan *chan)
850{
Luiz Augusto von Dentzf183e522017-04-11 22:21:00 +0300851 struct lowpan_btle_dev *dev;
852
Michael Scott6dea44f2017-03-28 23:10:18 -0700853 BT_DBG("chan %p resume", chan);
Luiz Augusto von Dentzf183e522017-04-11 22:21:00 +0300854
855 dev = lookup_dev(chan->conn);
856 if (!dev || !dev->netdev)
857 return;
858
859 netif_wake_queue(dev->netdev);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300860}
861
862static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
863{
Jukka Rissanen2ae50d82014-09-08 12:11:43 +0300864 return L2CAP_CONN_TIMEOUT;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300865}
866
867static const struct l2cap_ops bt_6lowpan_chan_ops = {
868 .name = "L2CAP 6LoWPAN channel",
869 .new_connection = chan_new_conn_cb,
870 .recv = chan_recv_cb,
871 .close = chan_close_cb,
872 .state_change = chan_state_change_cb,
873 .ready = chan_ready_cb,
874 .resume = chan_resume_cb,
875 .suspend = chan_suspend_cb,
876 .get_sndtimeo = chan_get_sndtimeo_cb,
877 .alloc_skb = chan_alloc_skb_cb,
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300878
879 .teardown = l2cap_chan_no_teardown,
880 .defer = l2cap_chan_no_defer,
881 .set_shutdown = l2cap_chan_no_set_shutdown,
882};
883
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300884static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
885{
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300886 struct l2cap_chan *chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300887 int err;
888
Johan Hedberg26d46df2015-10-06 13:03:24 +0300889 chan = chan_create();
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300890 if (!chan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300891 return -EINVAL;
892
Johan Hedberg26d46df2015-10-06 13:03:24 +0300893 chan->ops = &bt_6lowpan_chan_ops;
894
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300895 err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300896 addr, dst_type);
897
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300898 BT_DBG("chan %p err %d", chan, err);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300899 if (err < 0)
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300900 l2cap_chan_put(chan);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300901
Jukka Rissanen18722c22013-12-11 17:05:37 +0200902 return err;
903}
904
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300905static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
906{
907 struct lowpan_peer *peer;
908
Kai Ye658d5d82021-06-03 15:40:58 +0800909 BT_DBG("conn %p dst type %u", conn, dst_type);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300910
911 peer = lookup_peer(conn);
912 if (!peer)
913 return -ENOENT;
914
915 BT_DBG("peer %p chan %p", peer, peer->chan);
916
917 l2cap_chan_close(peer->chan, ENOENT);
918
919 return 0;
920}
921
922static struct l2cap_chan *bt_6lowpan_listen(void)
923{
924 bdaddr_t *addr = BDADDR_ANY;
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300925 struct l2cap_chan *chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300926 int err;
927
Jukka Rissanen7b2ed602015-01-08 17:00:55 +0200928 if (!enable_6lowpan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300929 return NULL;
930
Johan Hedberg26d46df2015-10-06 13:03:24 +0300931 chan = chan_create();
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300932 if (!chan)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300933 return NULL;
934
Johan Hedberg26d46df2015-10-06 13:03:24 +0300935 chan->ops = &bt_6lowpan_chan_ops;
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300936 chan->state = BT_LISTEN;
937 chan->src_type = BDADDR_LE_PUBLIC;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300938
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300939 atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
Johan Hedberg2773b022014-11-13 09:46:05 +0200940
Kai Ye658d5d82021-06-03 15:40:58 +0800941 BT_DBG("chan %p src type %u", chan, chan->src_type);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300942
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300943 err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300944 if (err) {
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300945 l2cap_chan_put(chan);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300946 BT_ERR("psm cannot be added err %d", err);
947 return NULL;
948 }
949
Johan Hedberg0cd088f2015-10-06 13:03:23 +0300950 return chan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300951}
952
953static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
954 struct l2cap_conn **conn)
955{
956 struct hci_conn *hcon;
957 struct hci_dev *hdev;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300958 int n;
959
960 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
961 &addr->b[5], &addr->b[4], &addr->b[3],
962 &addr->b[2], &addr->b[1], &addr->b[0],
963 addr_type);
964
965 if (n < 7)
966 return -EINVAL;
967
Johan Hedberg39385cb2016-11-12 17:03:07 +0200968 /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
969 hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300970 if (!hdev)
971 return -ENOENT;
972
973 hci_dev_lock(hdev);
Johan Hedbergf5ad4ff2015-10-21 18:03:02 +0300974 hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300975 hci_dev_unlock(hdev);
976
977 if (!hcon)
978 return -ENOENT;
979
980 *conn = (struct l2cap_conn *)hcon->l2cap_data;
981
Kai Ye658d5d82021-06-03 15:40:58 +0800982 BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300983
984 return 0;
985}
986
987static void disconnect_all_peers(void)
988{
Alexander Aring2e4d60c2016-04-11 11:04:18 +0200989 struct lowpan_btle_dev *entry;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300990 struct lowpan_peer *peer, *tmp_peer, *new_peer;
991 struct list_head peers;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300992
993 INIT_LIST_HEAD(&peers);
994
995 /* We make a separate list of peers as the close_cb() will
996 * modify the device peers list so it is better not to mess
997 * with the same list at the same time.
998 */
999
Jukka Rissanen90305822014-10-28 17:16:47 +02001000 rcu_read_lock();
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001001
Jukka Rissanen90305822014-10-28 17:16:47 +02001002 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1003 list_for_each_entry_rcu(peer, &entry->peers, list) {
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001004 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1005 if (!new_peer)
1006 break;
1007
1008 new_peer->chan = peer->chan;
1009 INIT_LIST_HEAD(&new_peer->list);
1010
1011 list_add(&new_peer->list, &peers);
1012 }
1013 }
1014
Jukka Rissanen90305822014-10-28 17:16:47 +02001015 rcu_read_unlock();
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001016
Jukka Rissanen90305822014-10-28 17:16:47 +02001017 spin_lock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001018 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1019 l2cap_chan_close(peer->chan, ENOENT);
Jukka Rissanen90305822014-10-28 17:16:47 +02001020
1021 list_del_rcu(&peer->list);
Johan Hedberg4e790222014-11-11 14:16:29 +02001022 kfree_rcu(peer, rcu);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001023 }
Jukka Rissanen90305822014-10-28 17:16:47 +02001024 spin_unlock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001025}
1026
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001027struct set_enable {
Jukka Rissanen90305822014-10-28 17:16:47 +02001028 struct work_struct work;
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001029 bool flag;
Jukka Rissanen90305822014-10-28 17:16:47 +02001030};
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001031
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001032static void do_enable_set(struct work_struct *work)
Jukka Rissanen90305822014-10-28 17:16:47 +02001033{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001034 struct set_enable *set_enable = container_of(work,
1035 struct set_enable, work);
Jukka Rissanen90305822014-10-28 17:16:47 +02001036
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001037 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001038 /* Disconnect existing connections if 6lowpan is
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001039 * disabled
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001040 */
1041 disconnect_all_peers();
1042
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001043 enable_6lowpan = set_enable->flag;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001044
Lihong Kouf9c70bd2020-06-23 20:28:41 +08001045 mutex_lock(&set_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001046 if (listen_chan) {
1047 l2cap_chan_close(listen_chan, 0);
1048 l2cap_chan_put(listen_chan);
1049 }
1050
1051 listen_chan = bt_6lowpan_listen();
Lihong Kouf9c70bd2020-06-23 20:28:41 +08001052 mutex_unlock(&set_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001053
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001054 kfree(set_enable);
Jukka Rissanen90305822014-10-28 17:16:47 +02001055}
1056
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001057static int lowpan_enable_set(void *data, u64 val)
Jukka Rissanen90305822014-10-28 17:16:47 +02001058{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001059 struct set_enable *set_enable;
Jukka Rissanen90305822014-10-28 17:16:47 +02001060
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001061 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1062 if (!set_enable)
Jukka Rissanen90305822014-10-28 17:16:47 +02001063 return -ENOMEM;
1064
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001065 set_enable->flag = !!val;
1066 INIT_WORK(&set_enable->work, do_enable_set);
Jukka Rissanen90305822014-10-28 17:16:47 +02001067
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001068 schedule_work(&set_enable->work);
Jukka Rissanen90305822014-10-28 17:16:47 +02001069
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001070 return 0;
1071}
1072
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001073static int lowpan_enable_get(void *data, u64 *val)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001074{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001075 *val = enable_6lowpan;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001076 return 0;
1077}
1078
YueHaibinge250fab2019-01-16 01:54:06 +00001079DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1080 lowpan_enable_set, "%llu\n");
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001081
1082static ssize_t lowpan_control_write(struct file *fp,
1083 const char __user *user_buffer,
1084 size_t count,
1085 loff_t *position)
1086{
1087 char buf[32];
1088 size_t buf_size = min(count, sizeof(buf) - 1);
1089 int ret;
1090 bdaddr_t addr;
1091 u8 addr_type;
1092 struct l2cap_conn *conn = NULL;
1093
1094 if (copy_from_user(buf, user_buffer, buf_size))
1095 return -EFAULT;
1096
1097 buf[buf_size] = '\0';
1098
1099 if (memcmp(buf, "connect ", 8) == 0) {
1100 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1101 if (ret == -EINVAL)
1102 return ret;
1103
Lihong Kouf9c70bd2020-06-23 20:28:41 +08001104 mutex_lock(&set_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001105 if (listen_chan) {
1106 l2cap_chan_close(listen_chan, 0);
1107 l2cap_chan_put(listen_chan);
1108 listen_chan = NULL;
1109 }
Lihong Kouf9c70bd2020-06-23 20:28:41 +08001110 mutex_unlock(&set_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001111
1112 if (conn) {
1113 struct lowpan_peer *peer;
1114
1115 if (!is_bt_6lowpan(conn->hcon))
1116 return -EINVAL;
1117
1118 peer = lookup_peer(conn);
1119 if (peer) {
1120 BT_DBG("6LoWPAN connection already exists");
1121 return -EALREADY;
1122 }
1123
Kai Ye658d5d82021-06-03 15:40:58 +08001124 BT_DBG("conn %p dst %pMR type %d user %u", conn,
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001125 &conn->hcon->dst, conn->hcon->dst_type,
1126 addr_type);
1127 }
1128
1129 ret = bt_6lowpan_connect(&addr, addr_type);
1130 if (ret < 0)
1131 return ret;
1132
1133 return count;
1134 }
1135
1136 if (memcmp(buf, "disconnect ", 11) == 0) {
1137 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1138 if (ret < 0)
1139 return ret;
1140
1141 ret = bt_6lowpan_disconnect(conn, addr_type);
1142 if (ret < 0)
1143 return ret;
1144
1145 return count;
1146 }
1147
1148 return count;
1149}
1150
1151static int lowpan_control_show(struct seq_file *f, void *ptr)
1152{
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001153 struct lowpan_btle_dev *entry;
Jukka Rissanen90305822014-10-28 17:16:47 +02001154 struct lowpan_peer *peer;
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001155
Jukka Rissanen90305822014-10-28 17:16:47 +02001156 spin_lock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001157
Jukka Rissanen90305822014-10-28 17:16:47 +02001158 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1159 list_for_each_entry(peer, &entry->peers, list)
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001160 seq_printf(f, "%pMR (type %u)\n",
1161 &peer->chan->dst, peer->chan->dst_type);
1162 }
1163
Jukka Rissanen90305822014-10-28 17:16:47 +02001164 spin_unlock(&devices_lock);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001165
1166 return 0;
1167}
1168
1169static int lowpan_control_open(struct inode *inode, struct file *file)
1170{
1171 return single_open(file, lowpan_control_show, inode->i_private);
1172}
1173
1174static const struct file_operations lowpan_control_fops = {
1175 .open = lowpan_control_open,
1176 .read = seq_read,
1177 .write = lowpan_control_write,
1178 .llseek = seq_lseek,
1179 .release = single_release,
1180};
1181
Jukka Rissanen7f118252014-06-18 16:37:11 +03001182static void disconnect_devices(void)
1183{
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001184 struct lowpan_btle_dev *entry, *tmp, *new_dev;
Jukka Rissanen7f118252014-06-18 16:37:11 +03001185 struct list_head devices;
Jukka Rissanen7f118252014-06-18 16:37:11 +03001186
1187 INIT_LIST_HEAD(&devices);
1188
1189 /* We make a separate list of devices because the unregister_netdev()
1190 * will call device_event() which will also want to modify the same
1191 * devices list.
1192 */
1193
Jukka Rissanen90305822014-10-28 17:16:47 +02001194 rcu_read_lock();
Jukka Rissanen7f118252014-06-18 16:37:11 +03001195
Jukka Rissanen90305822014-10-28 17:16:47 +02001196 list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
Jukka Rissanen7f118252014-06-18 16:37:11 +03001197 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1198 if (!new_dev)
1199 break;
1200
1201 new_dev->netdev = entry->netdev;
1202 INIT_LIST_HEAD(&new_dev->list);
1203
Jukka Rissanen90305822014-10-28 17:16:47 +02001204 list_add_rcu(&new_dev->list, &devices);
Jukka Rissanen7f118252014-06-18 16:37:11 +03001205 }
1206
Jukka Rissanen90305822014-10-28 17:16:47 +02001207 rcu_read_unlock();
Jukka Rissanen7f118252014-06-18 16:37:11 +03001208
Dan Carpenterdaac1972014-10-29 19:10:57 +03001209 list_for_each_entry_safe(entry, tmp, &devices, list) {
Jukka Rissanen7f118252014-06-18 16:37:11 +03001210 ifdown(entry->netdev);
1211 BT_DBG("Unregistering netdev %s %p",
1212 entry->netdev->name, entry->netdev);
Alexander Aring00f59312015-12-09 22:46:29 +01001213 lowpan_unregister_netdev(entry->netdev);
Jukka Rissanen7f118252014-06-18 16:37:11 +03001214 kfree(entry);
1215 }
1216}
1217
Jukka Rissanen18722c22013-12-11 17:05:37 +02001218static int device_event(struct notifier_block *unused,
1219 unsigned long event, void *ptr)
1220{
1221 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
Alexander Aring2e4d60c2016-04-11 11:04:18 +02001222 struct lowpan_btle_dev *entry;
Jukka Rissanen18722c22013-12-11 17:05:37 +02001223
1224 if (netdev->type != ARPHRD_6LOWPAN)
1225 return NOTIFY_DONE;
1226
1227 switch (event) {
1228 case NETDEV_UNREGISTER:
Jukka Rissanen90305822014-10-28 17:16:47 +02001229 spin_lock(&devices_lock);
1230 list_for_each_entry(entry, &bt_6lowpan_devices, list) {
Jukka Rissanen18722c22013-12-11 17:05:37 +02001231 if (entry->netdev == netdev) {
Jukka Rissanen7f118252014-06-18 16:37:11 +03001232 BT_DBG("Unregistered netdev %s %p",
1233 netdev->name, netdev);
Jukka Rissanen18722c22013-12-11 17:05:37 +02001234 list_del(&entry->list);
Jukka Rissanen18722c22013-12-11 17:05:37 +02001235 break;
1236 }
1237 }
Jukka Rissanen90305822014-10-28 17:16:47 +02001238 spin_unlock(&devices_lock);
Jukka Rissanen18722c22013-12-11 17:05:37 +02001239 break;
1240 }
1241
1242 return NOTIFY_DONE;
1243}
1244
1245static struct notifier_block bt_6lowpan_dev_notifier = {
1246 .notifier_call = device_event,
1247};
1248
Jukka Rissanen5547e482014-06-18 16:37:09 +03001249static int __init bt_6lowpan_init(void)
Jukka Rissanen18722c22013-12-11 17:05:37 +02001250{
YueHaibinge250fab2019-01-16 01:54:06 +00001251 lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1252 0644, bt_debugfs,
1253 NULL,
1254 &lowpan_enable_fops);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001255 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1256 bt_debugfs, NULL,
1257 &lowpan_control_fops);
1258
Jukka Rissanen18722c22013-12-11 17:05:37 +02001259 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1260}
1261
Jukka Rissanen5547e482014-06-18 16:37:09 +03001262static void __exit bt_6lowpan_exit(void)
Jukka Rissanen18722c22013-12-11 17:05:37 +02001263{
Jukka Rissanen7b2ed602015-01-08 17:00:55 +02001264 debugfs_remove(lowpan_enable_debugfs);
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03001265 debugfs_remove(lowpan_control_debugfs);
1266
1267 if (listen_chan) {
1268 l2cap_chan_close(listen_chan, 0);
1269 l2cap_chan_put(listen_chan);
1270 }
1271
Jukka Rissanen7f118252014-06-18 16:37:11 +03001272 disconnect_devices();
1273
Jukka Rissanen18722c22013-12-11 17:05:37 +02001274 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1275}
Jukka Rissanen5547e482014-06-18 16:37:09 +03001276
1277module_init(bt_6lowpan_init);
1278module_exit(bt_6lowpan_exit);
1279
1280MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1281MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1282MODULE_VERSION(VERSION);
1283MODULE_LICENSE("GPL");