blob: 5460b9146dd8051ae010f35a65105803dca0fe67 [file] [log] [blame]
Thomas Gleixneree5d8f42019-05-20 19:08:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * X.25 Packet Layer release 002
4 *
5 * This is ALPHA test software. This code may break your machine,
6 * randomly fail to work with new releases, misbehave and/or generally
YOSHIFUJI Hideakif8e1d2012007-02-09 23:25:27 +09007 * screw up. It might even work.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * This code REQUIRES 2.1.15 or higher
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * History
12 * X.25 001 Jonathan Naylor Started coding.
13 * X.25 002 Jonathan Naylor New timer architecture.
YOSHIFUJI Hideakif8e1d2012007-02-09 23:25:27 +090014 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * negotiation.
16 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
17 */
18
wangweidongb73e9e32013-12-06 19:24:33 +080019#define pr_fmt(fmt) "X25: " fmt
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/jiffies.h>
23#include <linux/timer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/netdevice.h>
26#include <linux/skbuff.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080027#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/init.h>
29#include <net/x25.h>
30
andrew hendry5595a1a2010-11-25 02:18:15 +000031LIST_HEAD(x25_neigh_list);
32DEFINE_RWLOCK(x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Kees Cooke99e88a2017-10-16 14:43:17 -070034static void x25_t20timer_expiry(struct timer_list *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
37static void x25_transmit_restart_request(struct x25_neigh *nb);
38
39/*
40 * Linux set/reset timer routines
41 */
42static inline void x25_start_t20timer(struct x25_neigh *nb)
43{
44 mod_timer(&nb->t20timer, jiffies + nb->t20);
45}
46
Kees Cooke99e88a2017-10-16 14:43:17 -070047static void x25_t20timer_expiry(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Kees Cooke99e88a2017-10-16 14:43:17 -070049 struct x25_neigh *nb = from_timer(nb, t, t20timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51 x25_transmit_restart_request(nb);
52
53 x25_start_t20timer(nb);
54}
55
56static inline void x25_stop_t20timer(struct x25_neigh *nb)
57{
58 del_timer(&nb->t20timer);
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * This handles all restart and diagnostic frames.
63 */
64void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
65 unsigned short frametype)
66{
67 struct sk_buff *skbn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 switch (frametype) {
Joe Perchesfddc5f32011-07-01 09:43:13 +000070 case X25_RESTART_REQUEST:
Martin Schillerd023b2b2020-11-26 07:35:56 +010071 switch (nb->state) {
Xie He6b21c0b2020-12-09 00:16:04 -080072 case X25_LINK_STATE_0:
73 /* This can happen when the x25 module just gets loaded
74 * and doesn't know layer 2 has already connected
75 */
76 nb->state = X25_LINK_STATE_3;
77 x25_transmit_restart_confirmation(nb);
78 break;
Martin Schillerd023b2b2020-11-26 07:35:56 +010079 case X25_LINK_STATE_2:
Martin Schillerd023b2b2020-11-26 07:35:56 +010080 x25_stop_t20timer(nb);
81 nb->state = X25_LINK_STATE_3;
Martin Schillerd023b2b2020-11-26 07:35:56 +010082 break;
83 case X25_LINK_STATE_3:
84 /* clear existing virtual calls */
85 x25_kill_by_neigh(nb);
86
Joe Perchesfddc5f32011-07-01 09:43:13 +000087 x25_transmit_restart_confirmation(nb);
Martin Schillerd023b2b2020-11-26 07:35:56 +010088 break;
89 }
Joe Perchesfddc5f32011-07-01 09:43:13 +000090 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Joe Perchesfddc5f32011-07-01 09:43:13 +000092 case X25_RESTART_CONFIRMATION:
Martin Schillerd023b2b2020-11-26 07:35:56 +010093 switch (nb->state) {
94 case X25_LINK_STATE_2:
Xie He6b21c0b2020-12-09 00:16:04 -080095 x25_stop_t20timer(nb);
96 nb->state = X25_LINK_STATE_3;
Martin Schillerd023b2b2020-11-26 07:35:56 +010097 break;
98 case X25_LINK_STATE_3:
99 /* clear existing virtual calls */
100 x25_kill_by_neigh(nb);
101
102 x25_transmit_restart_request(nb);
103 nb->state = X25_LINK_STATE_2;
104 x25_start_t20timer(nb);
105 break;
106 }
Joe Perchesfddc5f32011-07-01 09:43:13 +0000107 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Joe Perchesfddc5f32011-07-01 09:43:13 +0000109 case X25_DIAGNOSTIC:
Matthew Daleycb101ed2011-10-14 18:45:04 +0000110 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
111 break;
112
wangweidongb73e9e32013-12-06 19:24:33 +0800113 pr_warn("diagnostic #%d - %02X %02X %02X\n",
Joe Perchesfddc5f32011-07-01 09:43:13 +0000114 skb->data[3], skb->data[4],
115 skb->data[5], skb->data[6]);
116 break;
YOSHIFUJI Hideakif8e1d2012007-02-09 23:25:27 +0900117
Joe Perchesfddc5f32011-07-01 09:43:13 +0000118 default:
wangweidongb73e9e32013-12-06 19:24:33 +0800119 pr_warn("received unknown %02X with LCI 000\n",
Joe Perchesfddc5f32011-07-01 09:43:13 +0000120 frametype);
121 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 }
123
124 if (nb->state == X25_LINK_STATE_3)
125 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
126 x25_send_frame(skbn, nb);
127}
128
129/*
130 * This routine is called when a Restart Request is needed
131 */
132static void x25_transmit_restart_request(struct x25_neigh *nb)
133{
134 unsigned char *dptr;
135 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
136 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
137
138 if (!skb)
139 return;
140
141 skb_reserve(skb, X25_MAX_L2_LEN);
142
143 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
144
145 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
146 *dptr++ = 0x00;
147 *dptr++ = X25_RESTART_REQUEST;
148 *dptr++ = 0x00;
149 *dptr++ = 0;
150
151 skb->sk = NULL;
152
153 x25_send_frame(skb, nb);
154}
155
156/*
157 * This routine is called when a Restart Confirmation is needed
158 */
159static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
160{
161 unsigned char *dptr;
162 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
163 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
164
165 if (!skb)
166 return;
167
168 skb_reserve(skb, X25_MAX_L2_LEN);
169
170 dptr = skb_put(skb, X25_STD_MIN_LEN);
171
172 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
173 *dptr++ = 0x00;
174 *dptr++ = X25_RESTART_CONFIRMATION;
175
176 skb->sk = NULL;
177
178 x25_send_frame(skb, nb);
179}
180
181/*
182 * This routine is called when a Clear Request is needed outside of the context
183 * of a connected socket.
184 */
185void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
186 unsigned char cause)
187{
188 unsigned char *dptr;
189 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
190 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
191
192 if (!skb)
193 return;
194
195 skb_reserve(skb, X25_MAX_L2_LEN);
196
197 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
198
199 *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
200 X25_GFI_EXTSEQ :
201 X25_GFI_STDSEQ);
202 *dptr++ = (lci >> 0) & 0xFF;
203 *dptr++ = X25_CLEAR_REQUEST;
204 *dptr++ = cause;
205 *dptr++ = 0x00;
206
207 skb->sk = NULL;
208
209 x25_send_frame(skb, nb);
210}
211
212void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
213{
214 switch (nb->state) {
Joe Perchesfddc5f32011-07-01 09:43:13 +0000215 case X25_LINK_STATE_0:
216 skb_queue_tail(&nb->queue, skb);
217 nb->state = X25_LINK_STATE_1;
218 x25_establish_link(nb);
219 break;
220 case X25_LINK_STATE_1:
221 case X25_LINK_STATE_2:
222 skb_queue_tail(&nb->queue, skb);
223 break;
224 case X25_LINK_STATE_3:
225 x25_send_frame(skb, nb);
226 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228}
229
230/*
231 * Called when the link layer has become established.
232 */
233void x25_link_established(struct x25_neigh *nb)
234{
235 switch (nb->state) {
Joe Perchesfddc5f32011-07-01 09:43:13 +0000236 case X25_LINK_STATE_0:
Joe Perchesfddc5f32011-07-01 09:43:13 +0000237 case X25_LINK_STATE_1:
238 x25_transmit_restart_request(nb);
239 nb->state = X25_LINK_STATE_2;
240 x25_start_t20timer(nb);
241 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243}
244
245/*
246 * Called when the link layer has terminated, or an establishment
247 * request has failed.
248 */
249
250void x25_link_terminated(struct x25_neigh *nb)
251{
252 nb->state = X25_LINK_STATE_0;
Martin Schiller7eed7512020-11-26 07:35:53 +0100253 skb_queue_purge(&nb->queue);
254 x25_stop_t20timer(nb);
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
257 x25_kill_by_neigh(nb);
258}
259
260/*
261 * Add a new device.
262 */
263void x25_link_device_up(struct net_device *dev)
264{
265 struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
266
267 if (!nb)
268 return;
269
270 skb_queue_head_init(&nb->queue);
Kees Cooke99e88a2017-10-16 14:43:17 -0700271 timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 dev_hold(dev);
274 nb->dev = dev;
275 nb->state = X25_LINK_STATE_0;
276 nb->extended = 0;
277 /*
278 * Enables negotiation
279 */
280 nb->global_facil_mask = X25_MASK_REVERSE |
281 X25_MASK_THROUGHPUT |
282 X25_MASK_PACKET_SIZE |
283 X25_MASK_WINDOW_SIZE;
284 nb->t20 = sysctl_x25_restart_request_timeout;
Reshetova, Elena5534a512017-07-04 15:53:20 +0300285 refcount_set(&nb->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 write_lock_bh(&x25_neigh_list_lock);
288 list_add(&nb->node, &x25_neigh_list);
289 write_unlock_bh(&x25_neigh_list_lock);
290}
291
292/**
293 * __x25_remove_neigh - remove neighbour from x25_neigh_list
Andrew Lunn62c89232020-07-13 01:15:16 +0200294 * @nb: - neigh to remove
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 *
296 * Remove neighbour from x25_neigh_list. If it was there.
297 * Caller must hold x25_neigh_list_lock.
298 */
299static void __x25_remove_neigh(struct x25_neigh *nb)
300{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (nb->node.next) {
302 list_del(&nb->node);
303 x25_neigh_put(nb);
304 }
305}
306
307/*
308 * A device has been removed, remove its links.
309 */
310void x25_link_device_down(struct net_device *dev)
311{
312 struct x25_neigh *nb;
313 struct list_head *entry, *tmp;
314
315 write_lock_bh(&x25_neigh_list_lock);
316
317 list_for_each_safe(entry, tmp, &x25_neigh_list) {
318 nb = list_entry(entry, struct x25_neigh, node);
319
320 if (nb->dev == dev) {
321 __x25_remove_neigh(nb);
322 dev_put(dev);
323 }
324 }
325
326 write_unlock_bh(&x25_neigh_list_lock);
327}
328
329/*
330 * Given a device, return the neighbour address.
331 */
332struct x25_neigh *x25_get_neigh(struct net_device *dev)
333{
334 struct x25_neigh *nb, *use = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 read_lock_bh(&x25_neigh_list_lock);
Wang Hai3835a662021-06-08 08:05:05 +0000337 list_for_each_entry(nb, &x25_neigh_list, node) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (nb->dev == dev) {
339 use = nb;
340 break;
341 }
342 }
343
344 if (use)
345 x25_neigh_hold(use);
346 read_unlock_bh(&x25_neigh_list_lock);
347 return use;
348}
349
350/*
351 * Handle the ioctls that control the subscription functions.
352 */
353int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
354{
355 struct x25_subscrip_struct x25_subscr;
356 struct x25_neigh *nb;
357 struct net_device *dev;
358 int rc = -EINVAL;
359
360 if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
361 goto out;
362
363 rc = -EFAULT;
364 if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
365 goto out;
366
367 rc = -EINVAL;
368 if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
369 goto out;
370
371 if ((nb = x25_get_neigh(dev)) == NULL)
372 goto out_dev_put;
373
374 dev_put(dev);
375
376 if (cmd == SIOCX25GSUBSCRIP) {
andrew hendry5595a1a2010-11-25 02:18:15 +0000377 read_lock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 x25_subscr.extended = nb->extended;
379 x25_subscr.global_facil_mask = nb->global_facil_mask;
andrew hendry5595a1a2010-11-25 02:18:15 +0000380 read_unlock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 rc = copy_to_user(arg, &x25_subscr,
382 sizeof(x25_subscr)) ? -EFAULT : 0;
383 } else {
384 rc = -EINVAL;
385 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
386 rc = 0;
andrew hendry5595a1a2010-11-25 02:18:15 +0000387 write_lock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 nb->extended = x25_subscr.extended;
389 nb->global_facil_mask = x25_subscr.global_facil_mask;
andrew hendry5595a1a2010-11-25 02:18:15 +0000390 write_unlock_bh(&x25_neigh_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
392 }
393 x25_neigh_put(nb);
394out:
395 return rc;
396out_dev_put:
397 dev_put(dev);
398 goto out;
399}
400
401
402/*
403 * Release all memory associated with X.25 neighbour structures.
404 */
405void __exit x25_link_free(void)
406{
407 struct x25_neigh *nb;
408 struct list_head *entry, *tmp;
409
410 write_lock_bh(&x25_neigh_list_lock);
411
412 list_for_each_safe(entry, tmp, &x25_neigh_list) {
David S. Miller96642d42011-02-09 21:48:36 -0800413 struct net_device *dev;
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 nb = list_entry(entry, struct x25_neigh, node);
David S. Miller96642d42011-02-09 21:48:36 -0800416 dev = nb->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 __x25_remove_neigh(nb);
David S. Miller96642d42011-02-09 21:48:36 -0800418 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420 write_unlock_bh(&x25_neigh_list_lock);
421}