blob: b34d050c968716d80bb1a3db7841086dac80478d [file] [log] [blame]
Thomas Gleixner1802d0b2019-05-27 08:55:21 +02001// SPDX-License-Identifier: GPL-2.0-only
Alexander Aring4662a0d2015-01-04 17:10:55 +01002
3#include <linux/if_arp.h>
4
5#include <net/6lowpan.h>
Alexander Aring54552d02015-09-02 14:21:29 +02006#include <net/mac802154.h>
Alexander Aring4662a0d2015-01-04 17:10:55 +01007#include <net/ieee802154_netdev.h>
8
9#include "6lowpan_i.h"
10
Alexander Aringfaf7d362015-09-02 14:21:26 +020011#define LOWPAN_DISPATCH_FIRST 0xc0
Alexander Aring72a5e6b2015-09-02 14:21:25 +020012#define LOWPAN_DISPATCH_FRAG_MASK 0xf8
13
Alexander Aringfaf7d362015-09-02 14:21:26 +020014#define LOWPAN_DISPATCH_NALP 0x00
Stefan Schmidta1d8d9a52015-09-03 14:54:19 +020015#define LOWPAN_DISPATCH_ESC 0x40
Alexander Aringad663602015-09-02 14:21:27 +020016#define LOWPAN_DISPATCH_HC1 0x42
17#define LOWPAN_DISPATCH_DFF 0x43
18#define LOWPAN_DISPATCH_BC0 0x50
19#define LOWPAN_DISPATCH_MESH 0x80
Alexander Aringfaf7d362015-09-02 14:21:26 +020020
Alexander Aring72a5e6b2015-09-02 14:21:25 +020021static int lowpan_give_skb_to_device(struct sk_buff *skb)
Alexander Aring4662a0d2015-01-04 17:10:55 +010022{
Alexander Aring4662a0d2015-01-04 17:10:55 +010023 skb->protocol = htons(ETH_P_IPV6);
Alexander Aring1c64f142015-09-30 10:20:11 +020024 skb->dev->stats.rx_packets++;
25 skb->dev->stats.rx_bytes += skb->len;
Alexander Aring4662a0d2015-01-04 17:10:55 +010026
Alexander Aring51e0e5d2015-08-10 21:15:53 +020027 return netif_rx(skb);
Alexander Aring4662a0d2015-01-04 17:10:55 +010028}
29
Alexander Aring72a5e6b2015-09-02 14:21:25 +020030static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
Alexander Aring4662a0d2015-01-04 17:10:55 +010031{
Alexander Aring72a5e6b2015-09-02 14:21:25 +020032 switch (res) {
33 case RX_CONTINUE:
34 /* nobody cared about this packet */
35 net_warn_ratelimited("%s: received unknown dispatch\n",
36 __func__);
37
38 /* fall-through */
39 case RX_DROP_UNUSABLE:
40 kfree_skb(skb);
41
42 /* fall-through */
43 case RX_DROP:
44 return NET_RX_DROP;
45 case RX_QUEUED:
46 return lowpan_give_skb_to_device(skb);
47 default:
48 break;
49 }
50
51 return NET_RX_DROP;
52}
53
54static inline bool lowpan_is_frag1(u8 dispatch)
55{
56 return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1;
57}
58
59static inline bool lowpan_is_fragn(u8 dispatch)
60{
61 return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN;
62}
63
64static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
65{
66 int ret;
67
68 if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
69 lowpan_is_fragn(*skb_network_header(skb))))
70 return RX_CONTINUE;
71
72 ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
73 LOWPAN_DISPATCH_FRAG_MASK);
74 if (ret == 1)
75 return RX_QUEUED;
76
77 /* Packet is freed by lowpan_frag_rcv on error or put into the frag
78 * bucket.
79 */
80 return RX_DROP;
81}
82
83int lowpan_iphc_decompress(struct sk_buff *skb)
84{
Alexander Aring72a5e6b2015-09-02 14:21:25 +020085 struct ieee802154_hdr hdr;
Alexander Aring4662a0d2015-01-04 17:10:55 +010086
Alexander Aring72a5e6b2015-09-02 14:21:25 +020087 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
88 return -EINVAL;
89
Alexander Aring8911d772015-10-13 13:42:58 +020090 return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
Alexander Aring4662a0d2015-01-04 17:10:55 +010091}
92
Alexander Aring72a5e6b2015-09-02 14:21:25 +020093static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
94{
95 int ret;
96
97 if (!lowpan_is_iphc(*skb_network_header(skb)))
98 return RX_CONTINUE;
99
100 /* Setting datagram_offset to zero indicates non frag handling
101 * while doing lowpan_header_decompress.
102 */
103 lowpan_802154_cb(skb)->d_size = 0;
104
105 ret = lowpan_iphc_decompress(skb);
106 if (ret < 0)
107 return RX_DROP_UNUSABLE;
108
109 return RX_QUEUED;
110}
111
112lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
113{
114 if (!lowpan_is_ipv6(*skb_network_header(skb)))
115 return RX_CONTINUE;
116
117 /* Pull off the 1-byte of 6lowpan header. */
118 skb_pull(skb, 1);
119 return RX_QUEUED;
120}
121
Alexander Aringad663602015-09-02 14:21:27 +0200122static inline bool lowpan_is_esc(u8 dispatch)
123{
124 return dispatch == LOWPAN_DISPATCH_ESC;
125}
126
127static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
128{
129 if (!lowpan_is_esc(*skb_network_header(skb)))
130 return RX_CONTINUE;
131
132 net_warn_ratelimited("%s: %s\n", skb->dev->name,
133 "6LoWPAN ESC not supported\n");
134
135 return RX_DROP_UNUSABLE;
136}
137
138static inline bool lowpan_is_hc1(u8 dispatch)
139{
140 return dispatch == LOWPAN_DISPATCH_HC1;
141}
142
143static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
144{
145 if (!lowpan_is_hc1(*skb_network_header(skb)))
146 return RX_CONTINUE;
147
148 net_warn_ratelimited("%s: %s\n", skb->dev->name,
149 "6LoWPAN HC1 not supported\n");
150
151 return RX_DROP_UNUSABLE;
152}
153
154static inline bool lowpan_is_dff(u8 dispatch)
155{
156 return dispatch == LOWPAN_DISPATCH_DFF;
157}
158
159static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
160{
161 if (!lowpan_is_dff(*skb_network_header(skb)))
162 return RX_CONTINUE;
163
164 net_warn_ratelimited("%s: %s\n", skb->dev->name,
165 "6LoWPAN DFF not supported\n");
166
167 return RX_DROP_UNUSABLE;
168}
169
170static inline bool lowpan_is_bc0(u8 dispatch)
171{
172 return dispatch == LOWPAN_DISPATCH_BC0;
173}
174
175static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
176{
177 if (!lowpan_is_bc0(*skb_network_header(skb)))
178 return RX_CONTINUE;
179
180 net_warn_ratelimited("%s: %s\n", skb->dev->name,
181 "6LoWPAN BC0 not supported\n");
182
183 return RX_DROP_UNUSABLE;
184}
185
186static inline bool lowpan_is_mesh(u8 dispatch)
187{
188 return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH;
189}
190
191static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
192{
193 if (!lowpan_is_mesh(*skb_network_header(skb)))
194 return RX_CONTINUE;
195
196 net_warn_ratelimited("%s: %s\n", skb->dev->name,
197 "6LoWPAN MESH not supported\n");
198
199 return RX_DROP_UNUSABLE;
200}
201
Alexander Aring72a5e6b2015-09-02 14:21:25 +0200202static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
203{
204 lowpan_rx_result res;
205
206#define CALL_RXH(rxh) \
207 do { \
208 res = rxh(skb); \
209 if (res != RX_CONTINUE) \
210 goto rxh_next; \
211 } while (0)
212
213 /* likely at first */
214 CALL_RXH(lowpan_rx_h_iphc);
215 CALL_RXH(lowpan_rx_h_frag);
216 CALL_RXH(lowpan_rx_h_ipv6);
Alexander Aringad663602015-09-02 14:21:27 +0200217 CALL_RXH(lowpan_rx_h_esc);
218 CALL_RXH(lowpan_rx_h_hc1);
219 CALL_RXH(lowpan_rx_h_dff);
220 CALL_RXH(lowpan_rx_h_bc0);
221 CALL_RXH(lowpan_rx_h_mesh);
Alexander Aring72a5e6b2015-09-02 14:21:25 +0200222
223rxh_next:
224 return lowpan_rx_handlers_result(skb, res);
225#undef CALL_RXH
226}
227
Alexander Aringfaf7d362015-09-02 14:21:26 +0200228static inline bool lowpan_is_nalp(u8 dispatch)
229{
230 return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP;
231}
232
Alexander Aringc6fdbba2015-09-02 14:21:28 +0200233/* Lookup for reserved dispatch values at:
234 * https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1
235 *
236 * Last Updated: 2015-01-22
237 */
238static inline bool lowpan_is_reserved(u8 dispatch)
239{
240 return ((dispatch >= 0x44 && dispatch <= 0x4F) ||
241 (dispatch >= 0x51 && dispatch <= 0x5F) ||
242 (dispatch >= 0xc8 && dispatch <= 0xdf) ||
Yang Yingliang3712c1c2020-05-08 11:52:08 +0800243 dispatch >= 0xe8);
Alexander Aringc6fdbba2015-09-02 14:21:28 +0200244}
245
Alexander Aringfaf7d362015-09-02 14:21:26 +0200246/* lowpan_rx_h_check checks on generic 6LoWPAN requirements
247 * in MAC and 6LoWPAN header.
248 *
249 * Don't manipulate the skb here, it could be shared buffer.
250 */
251static inline bool lowpan_rx_h_check(struct sk_buff *skb)
252{
Alexander Aring54552d02015-09-02 14:21:29 +0200253 __le16 fc = ieee802154_get_fc_from_skb(skb);
254
255 /* check on ieee802154 conform 6LoWPAN header */
256 if (!ieee802154_is_data(fc) ||
Alexander Aring0ea0b9a2016-07-06 23:32:26 +0200257 !ieee802154_skb_is_intra_pan_addressing(fc, skb))
Alexander Aring54552d02015-09-02 14:21:29 +0200258 return false;
259
Alexander Aringfaf7d362015-09-02 14:21:26 +0200260 /* check if we can dereference the dispatch */
261 if (unlikely(!skb->len))
262 return false;
263
Alexander Aringc6fdbba2015-09-02 14:21:28 +0200264 if (lowpan_is_nalp(*skb_network_header(skb)) ||
265 lowpan_is_reserved(*skb_network_header(skb)))
Alexander Aringfaf7d362015-09-02 14:21:26 +0200266 return false;
267
268 return true;
269}
270
Alexander Aringf4606582015-09-02 14:21:16 +0200271static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
272 struct packet_type *pt, struct net_device *orig_wdev)
Alexander Aring4662a0d2015-01-04 17:10:55 +0100273{
Alexander Aring989d4332015-09-02 14:21:21 +0200274 struct net_device *ldev;
Alexander Aring4662a0d2015-01-04 17:10:55 +0100275
Alexander Aring742c3af2015-09-02 14:21:23 +0200276 if (wdev->type != ARPHRD_IEEE802154 ||
Alexander Aringfaf7d362015-09-02 14:21:26 +0200277 skb->pkt_type == PACKET_OTHERHOST ||
278 !lowpan_rx_h_check(skb))
Alexander Aringaeedebf2015-10-22 12:11:27 +0200279 goto drop;
Alexander Aring989d4332015-09-02 14:21:21 +0200280
281 ldev = wdev->ieee802154_ptr->lowpan_dev;
282 if (!ldev || !netif_running(ldev))
Alexander Aringaeedebf2015-10-22 12:11:27 +0200283 goto drop;
Alexander Aringc0015bf2015-08-15 11:00:33 +0200284
Alexander Aringf801cf42015-09-02 14:21:24 +0200285 /* Replacing skb->dev and followed rx handlers will manipulate skb. */
Alexander Aring4662a0d2015-01-04 17:10:55 +0100286 skb = skb_share_check(skb, GFP_ATOMIC);
287 if (!skb)
Alexander Aringaeedebf2015-10-22 12:11:27 +0200288 goto out;
Alexander Aringf801cf42015-09-02 14:21:24 +0200289 skb->dev = ldev;
Alexander Aring4662a0d2015-01-04 17:10:55 +0100290
Alexander Aring72a5e6b2015-09-02 14:21:25 +0200291 /* When receive frag1 it's likely that we manipulate the buffer.
292 * When recevie iphc we manipulate the data buffer. So we need
293 * to unshare the buffer.
294 */
295 if (lowpan_is_frag1(*skb_network_header(skb)) ||
296 lowpan_is_iphc(*skb_network_header(skb))) {
297 skb = skb_unshare(skb, GFP_ATOMIC);
298 if (!skb)
Alexander Aringaeedebf2015-10-22 12:11:27 +0200299 goto out;
Alexander Aring4662a0d2015-01-04 17:10:55 +0100300 }
301
Alexander Aring72a5e6b2015-09-02 14:21:25 +0200302 return lowpan_invoke_rx_handlers(skb);
Alexander Aringaeedebf2015-10-22 12:11:27 +0200303
304drop:
305 kfree_skb(skb);
306out:
307 return NET_RX_DROP;
Alexander Aring4662a0d2015-01-04 17:10:55 +0100308}
309
310static struct packet_type lowpan_packet_type = {
311 .type = htons(ETH_P_IEEE802154),
312 .func = lowpan_rcv,
313};
314
315void lowpan_rx_init(void)
316{
317 dev_add_pack(&lowpan_packet_type);
318}
319
320void lowpan_rx_exit(void)
321{
322 dev_remove_pack(&lowpan_packet_type);
323}