blob: 65358722c397b3988e0f800debf8dc1b4dbb2e4a [file] [log] [blame]
Jennifer Hunteac37312007-02-08 13:51:54 -08001/*
2 * linux/net/iucv/af_iucv.c
3 *
4 * IUCV protocol stack for Linux on zSeries
5 *
6 * Copyright 2006 IBM Corporation
7 *
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/poll.h>
21#include <net/sock.h>
22#include <asm/ebcdic.h>
23#include <asm/cpcmd.h>
24#include <linux/kmod.h>
25
26#include <net/iucv/iucv.h>
27#include <net/iucv/af_iucv.h>
28
29#define CONFIG_IUCV_SOCK_DEBUG 1
30
31#define IPRMDATA 0x80
32#define VERSION "1.0"
33
34static char iucv_userid[80];
35
36static struct proto_ops iucv_sock_ops;
37
38static struct proto iucv_proto = {
39 .name = "AF_IUCV",
40 .owner = THIS_MODULE,
41 .obj_size = sizeof(struct iucv_sock),
42};
43
Heiko Carstens57f20442007-10-08 02:02:52 -070044static void iucv_sock_kill(struct sock *sk);
45static void iucv_sock_close(struct sock *sk);
46
Jennifer Hunteac37312007-02-08 13:51:54 -080047/* Call Back functions */
48static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
49static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
50static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
Heiko Carstensda99f052007-05-04 12:23:27 -070051static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
52 u8 ipuser[16]);
Jennifer Hunteac37312007-02-08 13:51:54 -080053static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
54
55static struct iucv_sock_list iucv_sk_list = {
56 .lock = RW_LOCK_UNLOCKED,
57 .autobind_name = ATOMIC_INIT(0)
58};
59
60static struct iucv_handler af_iucv_handler = {
61 .path_pending = iucv_callback_connreq,
62 .path_complete = iucv_callback_connack,
63 .path_severed = iucv_callback_connrej,
64 .message_pending = iucv_callback_rx,
65 .message_complete = iucv_callback_txdone
66};
67
68static inline void high_nmcpy(unsigned char *dst, char *src)
69{
70 memcpy(dst, src, 8);
71}
72
73static inline void low_nmcpy(unsigned char *dst, char *src)
74{
75 memcpy(&dst[8], src, 8);
76}
77
78/* Timers */
79static void iucv_sock_timeout(unsigned long arg)
80{
81 struct sock *sk = (struct sock *)arg;
82
83 bh_lock_sock(sk);
84 sk->sk_err = ETIMEDOUT;
85 sk->sk_state_change(sk);
86 bh_unlock_sock(sk);
87
88 iucv_sock_kill(sk);
89 sock_put(sk);
90}
91
92static void iucv_sock_clear_timer(struct sock *sk)
93{
94 sk_stop_timer(sk, &sk->sk_timer);
95}
96
97static void iucv_sock_init_timer(struct sock *sk)
98{
99 init_timer(&sk->sk_timer);
100 sk->sk_timer.function = iucv_sock_timeout;
101 sk->sk_timer.data = (unsigned long)sk;
102}
103
104static struct sock *__iucv_get_sock_by_name(char *nm)
105{
106 struct sock *sk;
107 struct hlist_node *node;
108
109 sk_for_each(sk, node, &iucv_sk_list.head)
110 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
111 return sk;
112
113 return NULL;
114}
115
116static void iucv_sock_destruct(struct sock *sk)
117{
118 skb_queue_purge(&sk->sk_receive_queue);
119 skb_queue_purge(&sk->sk_write_queue);
120}
121
122/* Cleanup Listen */
123static void iucv_sock_cleanup_listen(struct sock *parent)
124{
125 struct sock *sk;
126
127 /* Close non-accepted connections */
128 while ((sk = iucv_accept_dequeue(parent, NULL))) {
129 iucv_sock_close(sk);
130 iucv_sock_kill(sk);
131 }
132
133 parent->sk_state = IUCV_CLOSED;
134 sock_set_flag(parent, SOCK_ZAPPED);
135}
136
137/* Kill socket */
138static void iucv_sock_kill(struct sock *sk)
139{
140 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
141 return;
142
143 iucv_sock_unlink(&iucv_sk_list, sk);
144 sock_set_flag(sk, SOCK_DEAD);
145 sock_put(sk);
146}
147
148/* Close an IUCV socket */
149static void iucv_sock_close(struct sock *sk)
150{
151 unsigned char user_data[16];
152 struct iucv_sock *iucv = iucv_sk(sk);
153 int err;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700154 unsigned long timeo;
Jennifer Hunteac37312007-02-08 13:51:54 -0800155
156 iucv_sock_clear_timer(sk);
157 lock_sock(sk);
158
Heiko Carstensda99f052007-05-04 12:23:27 -0700159 switch (sk->sk_state) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800160 case IUCV_LISTEN:
161 iucv_sock_cleanup_listen(sk);
162 break;
163
164 case IUCV_CONNECTED:
165 case IUCV_DISCONN:
166 err = 0;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700167
168 sk->sk_state = IUCV_CLOSING;
169 sk->sk_state_change(sk);
170
Heiko Carstensda99f052007-05-04 12:23:27 -0700171 if (!skb_queue_empty(&iucv->send_skb_q)) {
Jennifer Hunt561e0362007-05-04 12:22:07 -0700172 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
173 timeo = sk->sk_lingertime;
174 else
175 timeo = IUCV_DISCONN_TIMEOUT;
176 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
177 }
178
179 sk->sk_state = IUCV_CLOSED;
180 sk->sk_state_change(sk);
181
Jennifer Hunteac37312007-02-08 13:51:54 -0800182 if (iucv->path) {
183 low_nmcpy(user_data, iucv->src_name);
184 high_nmcpy(user_data, iucv->dst_name);
185 ASCEBC(user_data, sizeof(user_data));
186 err = iucv_path_sever(iucv->path, user_data);
187 iucv_path_free(iucv->path);
188 iucv->path = NULL;
189 }
190
Jennifer Hunteac37312007-02-08 13:51:54 -0800191 sk->sk_err = ECONNRESET;
192 sk->sk_state_change(sk);
193
194 skb_queue_purge(&iucv->send_skb_q);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700195 skb_queue_purge(&iucv->backlog_skb_q);
Jennifer Hunteac37312007-02-08 13:51:54 -0800196
197 sock_set_flag(sk, SOCK_ZAPPED);
198 break;
199
200 default:
201 sock_set_flag(sk, SOCK_ZAPPED);
202 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700203 }
Jennifer Hunteac37312007-02-08 13:51:54 -0800204
205 release_sock(sk);
206 iucv_sock_kill(sk);
207}
208
209static void iucv_sock_init(struct sock *sk, struct sock *parent)
210{
211 if (parent)
212 sk->sk_type = parent->sk_type;
213}
214
215static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
216{
217 struct sock *sk;
218
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700219 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1);
Jennifer Hunteac37312007-02-08 13:51:54 -0800220 if (!sk)
221 return NULL;
222
223 sock_init_data(sock, sk);
224 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
Ursula Braunfebca282007-07-14 19:04:25 -0700225 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
Jennifer Hunteac37312007-02-08 13:51:54 -0800226 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700227 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
Jennifer Hunteac37312007-02-08 13:51:54 -0800228 iucv_sk(sk)->send_tag = 0;
229
230 sk->sk_destruct = iucv_sock_destruct;
231 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
232 sk->sk_allocation = GFP_DMA;
233
234 sock_reset_flag(sk, SOCK_ZAPPED);
235
236 sk->sk_protocol = proto;
237 sk->sk_state = IUCV_OPEN;
238
239 iucv_sock_init_timer(sk);
240
241 iucv_sock_link(&iucv_sk_list, sk);
242 return sk;
243}
244
245/* Create an IUCV socket */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700246static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
Jennifer Hunteac37312007-02-08 13:51:54 -0800247{
248 struct sock *sk;
249
250 if (sock->type != SOCK_STREAM)
251 return -ESOCKTNOSUPPORT;
252
253 sock->state = SS_UNCONNECTED;
254 sock->ops = &iucv_sock_ops;
255
256 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
257 if (!sk)
258 return -ENOMEM;
259
260 iucv_sock_init(sk, NULL);
261
262 return 0;
263}
264
265void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
266{
267 write_lock_bh(&l->lock);
268 sk_add_node(sk, &l->head);
269 write_unlock_bh(&l->lock);
270}
271
272void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
273{
274 write_lock_bh(&l->lock);
275 sk_del_node_init(sk);
276 write_unlock_bh(&l->lock);
277}
278
279void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
280{
Ursula Braunfebca282007-07-14 19:04:25 -0700281 unsigned long flags;
282 struct iucv_sock *par = iucv_sk(parent);
283
Jennifer Hunteac37312007-02-08 13:51:54 -0800284 sock_hold(sk);
Ursula Braunfebca282007-07-14 19:04:25 -0700285 spin_lock_irqsave(&par->accept_q_lock, flags);
286 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
287 spin_unlock_irqrestore(&par->accept_q_lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -0800288 iucv_sk(sk)->parent = parent;
289 parent->sk_ack_backlog++;
290}
291
292void iucv_accept_unlink(struct sock *sk)
293{
Ursula Braunfebca282007-07-14 19:04:25 -0700294 unsigned long flags;
295 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
296
297 spin_lock_irqsave(&par->accept_q_lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -0800298 list_del_init(&iucv_sk(sk)->accept_q);
Ursula Braunfebca282007-07-14 19:04:25 -0700299 spin_unlock_irqrestore(&par->accept_q_lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -0800300 iucv_sk(sk)->parent->sk_ack_backlog--;
301 iucv_sk(sk)->parent = NULL;
302 sock_put(sk);
303}
304
305struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
306{
307 struct iucv_sock *isk, *n;
308 struct sock *sk;
309
Heiko Carstensda99f052007-05-04 12:23:27 -0700310 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800311 sk = (struct sock *) isk;
312 lock_sock(sk);
313
314 if (sk->sk_state == IUCV_CLOSED) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800315 iucv_accept_unlink(sk);
Ursula Braunfebca282007-07-14 19:04:25 -0700316 release_sock(sk);
Jennifer Hunteac37312007-02-08 13:51:54 -0800317 continue;
318 }
319
320 if (sk->sk_state == IUCV_CONNECTED ||
321 sk->sk_state == IUCV_SEVERED ||
322 !newsock) {
323 iucv_accept_unlink(sk);
324 if (newsock)
325 sock_graft(sk, newsock);
326
327 if (sk->sk_state == IUCV_SEVERED)
328 sk->sk_state = IUCV_DISCONN;
329
330 release_sock(sk);
331 return sk;
332 }
333
334 release_sock(sk);
335 }
336 return NULL;
337}
338
339int iucv_sock_wait_state(struct sock *sk, int state, int state2,
340 unsigned long timeo)
341{
342 DECLARE_WAITQUEUE(wait, current);
343 int err = 0;
344
345 add_wait_queue(sk->sk_sleep, &wait);
346 while (sk->sk_state != state && sk->sk_state != state2) {
347 set_current_state(TASK_INTERRUPTIBLE);
348
349 if (!timeo) {
350 err = -EAGAIN;
351 break;
352 }
353
354 if (signal_pending(current)) {
355 err = sock_intr_errno(timeo);
356 break;
357 }
358
359 release_sock(sk);
360 timeo = schedule_timeout(timeo);
361 lock_sock(sk);
362
363 err = sock_error(sk);
364 if (err)
365 break;
366 }
367 set_current_state(TASK_RUNNING);
368 remove_wait_queue(sk->sk_sleep, &wait);
369 return err;
370}
371
372/* Bind an unbound socket */
373static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
374 int addr_len)
375{
376 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
377 struct sock *sk = sock->sk;
378 struct iucv_sock *iucv;
379 int err;
380
381 /* Verify the input sockaddr */
382 if (!addr || addr->sa_family != AF_IUCV)
383 return -EINVAL;
384
385 lock_sock(sk);
386 if (sk->sk_state != IUCV_OPEN) {
387 err = -EBADFD;
388 goto done;
389 }
390
391 write_lock_bh(&iucv_sk_list.lock);
392
393 iucv = iucv_sk(sk);
394 if (__iucv_get_sock_by_name(sa->siucv_name)) {
395 err = -EADDRINUSE;
396 goto done_unlock;
397 }
398 if (iucv->path) {
399 err = 0;
400 goto done_unlock;
401 }
402
403 /* Bind the socket */
404 memcpy(iucv->src_name, sa->siucv_name, 8);
405
406 /* Copy the user id */
407 memcpy(iucv->src_user_id, iucv_userid, 8);
408 sk->sk_state = IUCV_BOUND;
409 err = 0;
410
411done_unlock:
412 /* Release the socket list lock */
413 write_unlock_bh(&iucv_sk_list.lock);
414done:
415 release_sock(sk);
416 return err;
417}
418
419/* Automatically bind an unbound socket */
420static int iucv_sock_autobind(struct sock *sk)
421{
422 struct iucv_sock *iucv = iucv_sk(sk);
423 char query_buffer[80];
424 char name[12];
425 int err = 0;
426
427 /* Set the userid and name */
428 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
429 if (unlikely(err))
430 return -EPROTO;
431
432 memcpy(iucv->src_user_id, query_buffer, 8);
433
434 write_lock_bh(&iucv_sk_list.lock);
435
436 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
437 while (__iucv_get_sock_by_name(name)) {
438 sprintf(name, "%08x",
439 atomic_inc_return(&iucv_sk_list.autobind_name));
440 }
441
442 write_unlock_bh(&iucv_sk_list.lock);
443
444 memcpy(&iucv->src_name, name, 8);
445
446 return err;
447}
448
449/* Connect an unconnected socket */
450static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
451 int alen, int flags)
452{
453 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
454 struct sock *sk = sock->sk;
455 struct iucv_sock *iucv;
456 unsigned char user_data[16];
457 int err;
458
459 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
460 return -EINVAL;
461
462 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
463 return -EBADFD;
464
465 if (sk->sk_type != SOCK_STREAM)
466 return -EINVAL;
467
468 iucv = iucv_sk(sk);
469
470 if (sk->sk_state == IUCV_OPEN) {
471 err = iucv_sock_autobind(sk);
472 if (unlikely(err))
473 return err;
474 }
475
476 lock_sock(sk);
477
478 /* Set the destination information */
479 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
480 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
481
482 high_nmcpy(user_data, sa->siucv_name);
483 low_nmcpy(user_data, iucv_sk(sk)->src_name);
484 ASCEBC(user_data, sizeof(user_data));
485
486 iucv = iucv_sk(sk);
487 /* Create path. */
488 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
489 IPRMDATA, GFP_KERNEL);
490 err = iucv_path_connect(iucv->path, &af_iucv_handler,
491 sa->siucv_user_id, NULL, user_data, sk);
492 if (err) {
493 iucv_path_free(iucv->path);
494 iucv->path = NULL;
495 err = -ECONNREFUSED;
496 goto done;
497 }
498
499 if (sk->sk_state != IUCV_CONNECTED) {
500 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
501 sock_sndtimeo(sk, flags & O_NONBLOCK));
502 }
503
504 if (sk->sk_state == IUCV_DISCONN) {
505 release_sock(sk);
506 return -ECONNREFUSED;
507 }
508done:
509 release_sock(sk);
510 return err;
511}
512
513/* Move a socket into listening state. */
514static int iucv_sock_listen(struct socket *sock, int backlog)
515{
516 struct sock *sk = sock->sk;
517 int err;
518
519 lock_sock(sk);
520
521 err = -EINVAL;
522 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
523 goto done;
524
525 sk->sk_max_ack_backlog = backlog;
526 sk->sk_ack_backlog = 0;
527 sk->sk_state = IUCV_LISTEN;
528 err = 0;
529
530done:
531 release_sock(sk);
532 return err;
533}
534
535/* Accept a pending connection */
536static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
537 int flags)
538{
539 DECLARE_WAITQUEUE(wait, current);
540 struct sock *sk = sock->sk, *nsk;
541 long timeo;
542 int err = 0;
543
Jennifer Hunt561e0362007-05-04 12:22:07 -0700544 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Jennifer Hunteac37312007-02-08 13:51:54 -0800545
546 if (sk->sk_state != IUCV_LISTEN) {
547 err = -EBADFD;
548 goto done;
549 }
550
551 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
552
553 /* Wait for an incoming connection */
554 add_wait_queue_exclusive(sk->sk_sleep, &wait);
Heiko Carstensda99f052007-05-04 12:23:27 -0700555 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800556 set_current_state(TASK_INTERRUPTIBLE);
557 if (!timeo) {
558 err = -EAGAIN;
559 break;
560 }
561
562 release_sock(sk);
563 timeo = schedule_timeout(timeo);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700564 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
Jennifer Hunteac37312007-02-08 13:51:54 -0800565
566 if (sk->sk_state != IUCV_LISTEN) {
567 err = -EBADFD;
568 break;
569 }
570
571 if (signal_pending(current)) {
572 err = sock_intr_errno(timeo);
573 break;
574 }
575 }
576
577 set_current_state(TASK_RUNNING);
578 remove_wait_queue(sk->sk_sleep, &wait);
579
580 if (err)
581 goto done;
582
583 newsock->state = SS_CONNECTED;
584
585done:
586 release_sock(sk);
587 return err;
588}
589
590static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
591 int *len, int peer)
592{
593 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
594 struct sock *sk = sock->sk;
595
596 addr->sa_family = AF_IUCV;
597 *len = sizeof(struct sockaddr_iucv);
598
599 if (peer) {
600 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
601 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
602 } else {
603 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
604 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
605 }
606 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
607 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
608 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
609
610 return 0;
611}
612
613static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
614 struct msghdr *msg, size_t len)
615{
616 struct sock *sk = sock->sk;
617 struct iucv_sock *iucv = iucv_sk(sk);
618 struct sk_buff *skb;
619 struct iucv_message txmsg;
620 int err;
621
622 err = sock_error(sk);
623 if (err)
624 return err;
625
626 if (msg->msg_flags & MSG_OOB)
627 return -EOPNOTSUPP;
628
629 lock_sock(sk);
630
631 if (sk->sk_shutdown & SEND_SHUTDOWN) {
632 err = -EPIPE;
633 goto out;
634 }
635
Heiko Carstensda99f052007-05-04 12:23:27 -0700636 if (sk->sk_state == IUCV_CONNECTED) {
637 if (!(skb = sock_alloc_send_skb(sk, len,
638 msg->msg_flags & MSG_DONTWAIT,
639 &err)))
Jennifer Hunt561e0362007-05-04 12:22:07 -0700640 goto out;
Jennifer Hunteac37312007-02-08 13:51:54 -0800641
Heiko Carstensda99f052007-05-04 12:23:27 -0700642 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800643 err = -EFAULT;
644 goto fail;
645 }
646
647 txmsg.class = 0;
648 txmsg.tag = iucv->send_tag++;
649 memcpy(skb->cb, &txmsg.tag, 4);
650 skb_queue_tail(&iucv->send_skb_q, skb);
651 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
652 (void *) skb->data, skb->len);
653 if (err) {
654 if (err == 3)
655 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
656 skb_unlink(skb, &iucv->send_skb_q);
657 err = -EPIPE;
658 goto fail;
659 }
660
661 } else {
662 err = -ENOTCONN;
663 goto out;
664 }
665
666 release_sock(sk);
667 return len;
668
669fail:
670 kfree_skb(skb);
671out:
672 release_sock(sk);
673 return err;
674}
675
676static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
677 struct msghdr *msg, size_t len, int flags)
678{
679 int noblock = flags & MSG_DONTWAIT;
680 struct sock *sk = sock->sk;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700681 struct iucv_sock *iucv = iucv_sk(sk);
Jennifer Hunteac37312007-02-08 13:51:54 -0800682 int target, copied = 0;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700683 struct sk_buff *skb, *rskb, *cskb;
Jennifer Hunteac37312007-02-08 13:51:54 -0800684 int err = 0;
685
Jennifer Hunt561e0362007-05-04 12:22:07 -0700686 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
687 skb_queue_empty(&iucv->backlog_skb_q) &&
688 skb_queue_empty(&sk->sk_receive_queue))
689 return 0;
690
Jennifer Hunteac37312007-02-08 13:51:54 -0800691 if (flags & (MSG_OOB))
692 return -EOPNOTSUPP;
693
694 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
695
696 skb = skb_recv_datagram(sk, flags, noblock, &err);
697 if (!skb) {
698 if (sk->sk_shutdown & RCV_SHUTDOWN)
699 return 0;
700 return err;
701 }
702
703 copied = min_t(unsigned int, skb->len, len);
704
Jennifer Hunt561e0362007-05-04 12:22:07 -0700705 cskb = skb;
706 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800707 skb_queue_head(&sk->sk_receive_queue, skb);
708 if (copied == 0)
709 return -EFAULT;
Jennifer Hunt561e0362007-05-04 12:22:07 -0700710 goto done;
Jennifer Hunteac37312007-02-08 13:51:54 -0800711 }
712
713 len -= copied;
714
715 /* Mark read part of skb as used */
716 if (!(flags & MSG_PEEK)) {
717 skb_pull(skb, copied);
718
719 if (skb->len) {
720 skb_queue_head(&sk->sk_receive_queue, skb);
721 goto done;
722 }
723
724 kfree_skb(skb);
Jennifer Hunt561e0362007-05-04 12:22:07 -0700725
726 /* Queue backlog skbs */
727 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
Heiko Carstensda99f052007-05-04 12:23:27 -0700728 while (rskb) {
Jennifer Hunt561e0362007-05-04 12:22:07 -0700729 if (sock_queue_rcv_skb(sk, rskb)) {
730 skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
731 rskb);
732 break;
733 } else {
734 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
735 }
736 }
Jennifer Hunteac37312007-02-08 13:51:54 -0800737 } else
738 skb_queue_head(&sk->sk_receive_queue, skb);
739
740done:
741 return err ? : copied;
742}
743
744static inline unsigned int iucv_accept_poll(struct sock *parent)
745{
746 struct iucv_sock *isk, *n;
747 struct sock *sk;
748
Heiko Carstensda99f052007-05-04 12:23:27 -0700749 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800750 sk = (struct sock *) isk;
751
752 if (sk->sk_state == IUCV_CONNECTED)
753 return POLLIN | POLLRDNORM;
754 }
755
756 return 0;
757}
758
759unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
760 poll_table *wait)
761{
762 struct sock *sk = sock->sk;
763 unsigned int mask = 0;
764
765 poll_wait(file, sk->sk_sleep, wait);
766
767 if (sk->sk_state == IUCV_LISTEN)
768 return iucv_accept_poll(sk);
769
770 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
771 mask |= POLLERR;
772
773 if (sk->sk_shutdown & RCV_SHUTDOWN)
774 mask |= POLLRDHUP;
775
776 if (sk->sk_shutdown == SHUTDOWN_MASK)
777 mask |= POLLHUP;
778
779 if (!skb_queue_empty(&sk->sk_receive_queue) ||
Heiko Carstensda99f052007-05-04 12:23:27 -0700780 (sk->sk_shutdown & RCV_SHUTDOWN))
Jennifer Hunteac37312007-02-08 13:51:54 -0800781 mask |= POLLIN | POLLRDNORM;
782
783 if (sk->sk_state == IUCV_CLOSED)
784 mask |= POLLHUP;
785
Jennifer Hunt561e0362007-05-04 12:22:07 -0700786 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
787 mask |= POLLIN;
788
Jennifer Hunteac37312007-02-08 13:51:54 -0800789 if (sock_writeable(sk))
790 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
791 else
792 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
793
794 return mask;
795}
796
797static int iucv_sock_shutdown(struct socket *sock, int how)
798{
799 struct sock *sk = sock->sk;
800 struct iucv_sock *iucv = iucv_sk(sk);
801 struct iucv_message txmsg;
802 int err = 0;
803 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
804
805 how++;
806
807 if ((how & ~SHUTDOWN_MASK) || !how)
808 return -EINVAL;
809
810 lock_sock(sk);
Heiko Carstensda99f052007-05-04 12:23:27 -0700811 switch (sk->sk_state) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800812 case IUCV_CLOSED:
813 err = -ENOTCONN;
814 goto fail;
815
816 default:
817 sk->sk_shutdown |= how;
818 break;
819 }
820
821 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
822 txmsg.class = 0;
823 txmsg.tag = 0;
824 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
825 (void *) prmmsg, 8);
826 if (err) {
Heiko Carstensda99f052007-05-04 12:23:27 -0700827 switch (err) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800828 case 1:
829 err = -ENOTCONN;
830 break;
831 case 2:
832 err = -ECONNRESET;
833 break;
834 default:
835 err = -ENOTCONN;
836 break;
837 }
838 }
839 }
840
841 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
842 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
843 if (err)
844 err = -ENOTCONN;
845
846 skb_queue_purge(&sk->sk_receive_queue);
847 }
848
849 /* Wake up anyone sleeping in poll */
850 sk->sk_state_change(sk);
851
852fail:
853 release_sock(sk);
854 return err;
855}
856
857static int iucv_sock_release(struct socket *sock)
858{
859 struct sock *sk = sock->sk;
860 int err = 0;
861
862 if (!sk)
863 return 0;
864
865 iucv_sock_close(sk);
866
867 /* Unregister with IUCV base support */
868 if (iucv_sk(sk)->path) {
869 iucv_path_sever(iucv_sk(sk)->path, NULL);
870 iucv_path_free(iucv_sk(sk)->path);
871 iucv_sk(sk)->path = NULL;
872 }
873
Jennifer Hunteac37312007-02-08 13:51:54 -0800874 sock_orphan(sk);
875 iucv_sock_kill(sk);
876 return err;
877}
878
879/* Callback wrappers - called from iucv base support */
880static int iucv_callback_connreq(struct iucv_path *path,
881 u8 ipvmid[8], u8 ipuser[16])
882{
883 unsigned char user_data[16];
884 unsigned char nuser_data[16];
885 unsigned char src_name[8];
886 struct hlist_node *node;
887 struct sock *sk, *nsk;
888 struct iucv_sock *iucv, *niucv;
889 int err;
890
891 memcpy(src_name, ipuser, 8);
892 EBCASC(src_name, 8);
893 /* Find out if this path belongs to af_iucv. */
894 read_lock(&iucv_sk_list.lock);
895 iucv = NULL;
Ursula Braunfebca282007-07-14 19:04:25 -0700896 sk = NULL;
Jennifer Hunteac37312007-02-08 13:51:54 -0800897 sk_for_each(sk, node, &iucv_sk_list.head)
898 if (sk->sk_state == IUCV_LISTEN &&
899 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
900 /*
901 * Found a listening socket with
902 * src_name == ipuser[0-7].
903 */
904 iucv = iucv_sk(sk);
905 break;
906 }
907 read_unlock(&iucv_sk_list.lock);
908 if (!iucv)
909 /* No socket found, not one of our paths. */
910 return -EINVAL;
911
912 bh_lock_sock(sk);
913
914 /* Check if parent socket is listening */
915 low_nmcpy(user_data, iucv->src_name);
916 high_nmcpy(user_data, iucv->dst_name);
917 ASCEBC(user_data, sizeof(user_data));
918 if (sk->sk_state != IUCV_LISTEN) {
919 err = iucv_path_sever(path, user_data);
920 goto fail;
921 }
922
923 /* Check for backlog size */
924 if (sk_acceptq_is_full(sk)) {
925 err = iucv_path_sever(path, user_data);
926 goto fail;
927 }
928
929 /* Create the new socket */
930 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
Heiko Carstensda99f052007-05-04 12:23:27 -0700931 if (!nsk) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800932 err = iucv_path_sever(path, user_data);
933 goto fail;
934 }
935
936 niucv = iucv_sk(nsk);
937 iucv_sock_init(nsk, sk);
938
939 /* Set the new iucv_sock */
940 memcpy(niucv->dst_name, ipuser + 8, 8);
941 EBCASC(niucv->dst_name, 8);
942 memcpy(niucv->dst_user_id, ipvmid, 8);
943 memcpy(niucv->src_name, iucv->src_name, 8);
944 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
945 niucv->path = path;
946
947 /* Call iucv_accept */
948 high_nmcpy(nuser_data, ipuser + 8);
949 memcpy(nuser_data + 8, niucv->src_name, 8);
950 ASCEBC(nuser_data + 8, 8);
951
952 path->msglim = IUCV_QUEUELEN_DEFAULT;
953 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
Heiko Carstensda99f052007-05-04 12:23:27 -0700954 if (err) {
Jennifer Hunteac37312007-02-08 13:51:54 -0800955 err = iucv_path_sever(path, user_data);
956 goto fail;
957 }
958
959 iucv_accept_enqueue(sk, nsk);
960
961 /* Wake up accept */
962 nsk->sk_state = IUCV_CONNECTED;
963 sk->sk_data_ready(sk, 1);
964 err = 0;
965fail:
966 bh_unlock_sock(sk);
967 return 0;
968}
969
970static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
971{
972 struct sock *sk = path->private;
973
974 sk->sk_state = IUCV_CONNECTED;
975 sk->sk_state_change(sk);
976}
977
Jennifer Hunt561e0362007-05-04 12:22:07 -0700978static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
Heiko Carstensaf7cd372007-05-05 11:41:18 -0700979 struct sk_buff_head *fragmented_skb_q)
Jennifer Hunt561e0362007-05-04 12:22:07 -0700980{
981 int dataleft, size, copied = 0;
982 struct sk_buff *nskb;
983
984 dataleft = len;
Heiko Carstensda99f052007-05-04 12:23:27 -0700985 while (dataleft) {
Jennifer Hunt561e0362007-05-04 12:22:07 -0700986 if (dataleft >= sk->sk_rcvbuf / 4)
987 size = sk->sk_rcvbuf / 4;
988 else
989 size = dataleft;
990
991 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
992 if (!nskb)
993 return -ENOMEM;
994
995 memcpy(nskb->data, skb->data + copied, size);
996 copied += size;
997 dataleft -= size;
998
Heiko Carstensaf7cd372007-05-05 11:41:18 -0700999 skb_reset_transport_header(nskb);
1000 skb_reset_network_header(nskb);
Jennifer Hunt561e0362007-05-04 12:22:07 -07001001 nskb->len = size;
1002
1003 skb_queue_tail(fragmented_skb_q, nskb);
1004 }
1005
1006 return 0;
1007}
Heiko Carstensda99f052007-05-04 12:23:27 -07001008
Jennifer Hunteac37312007-02-08 13:51:54 -08001009static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1010{
1011 struct sock *sk = path->private;
Jennifer Hunt561e0362007-05-04 12:22:07 -07001012 struct iucv_sock *iucv = iucv_sk(sk);
1013 struct sk_buff *skb, *fskb;
1014 struct sk_buff_head fragmented_skb_q;
Jennifer Hunteac37312007-02-08 13:51:54 -08001015 int rc;
1016
Jennifer Hunt561e0362007-05-04 12:22:07 -07001017 skb_queue_head_init(&fragmented_skb_q);
1018
Jennifer Hunteac37312007-02-08 13:51:54 -08001019 if (sk->sk_shutdown & RCV_SHUTDOWN)
1020 return;
1021
1022 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1023 if (!skb) {
Jennifer Hunt561e0362007-05-04 12:22:07 -07001024 iucv_path_sever(path, NULL);
Jennifer Hunteac37312007-02-08 13:51:54 -08001025 return;
1026 }
1027
1028 if (msg->flags & IPRMDATA) {
1029 skb->data = NULL;
1030 skb->len = 0;
1031 } else {
1032 rc = iucv_message_receive(path, msg, 0, skb->data,
1033 msg->length, NULL);
1034 if (rc) {
1035 kfree_skb(skb);
1036 return;
1037 }
Jennifer Hunt561e0362007-05-04 12:22:07 -07001038 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1039 rc = iucv_fragment_skb(sk, skb, msg->length,
1040 &fragmented_skb_q);
1041 kfree_skb(skb);
1042 skb = NULL;
1043 if (rc) {
1044 iucv_path_sever(path, NULL);
1045 return;
1046 }
1047 } else {
1048 skb_reset_transport_header(skb);
1049 skb_reset_network_header(skb);
1050 skb->len = msg->length;
1051 }
1052 }
1053 /* Queue the fragmented skb */
1054 fskb = skb_dequeue(&fragmented_skb_q);
Heiko Carstensda99f052007-05-04 12:23:27 -07001055 while (fskb) {
Jennifer Hunt561e0362007-05-04 12:22:07 -07001056 if (!skb_queue_empty(&iucv->backlog_skb_q))
1057 skb_queue_tail(&iucv->backlog_skb_q, fskb);
1058 else if (sock_queue_rcv_skb(sk, fskb))
1059 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1060 fskb = skb_dequeue(&fragmented_skb_q);
Jennifer Hunteac37312007-02-08 13:51:54 -08001061 }
1062
Jennifer Hunt561e0362007-05-04 12:22:07 -07001063 /* Queue the original skb if it exists (was not fragmented) */
1064 if (skb) {
1065 if (!skb_queue_empty(&iucv->backlog_skb_q))
1066 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1067 else if (sock_queue_rcv_skb(sk, skb))
1068 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1069 }
1070
Jennifer Hunteac37312007-02-08 13:51:54 -08001071}
1072
1073static void iucv_callback_txdone(struct iucv_path *path,
1074 struct iucv_message *msg)
1075{
1076 struct sock *sk = path->private;
1077 struct sk_buff *this;
1078 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1079 struct sk_buff *list_skb = list->next;
1080 unsigned long flags;
1081
Jennifer Hunt561e0362007-05-04 12:22:07 -07001082 if (list_skb) {
1083 spin_lock_irqsave(&list->lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -08001084
Jennifer Hunt561e0362007-05-04 12:22:07 -07001085 do {
1086 this = list_skb;
1087 list_skb = list_skb->next;
1088 } while (memcmp(&msg->tag, this->cb, 4) && list_skb);
Jennifer Hunteac37312007-02-08 13:51:54 -08001089
Jennifer Hunt561e0362007-05-04 12:22:07 -07001090 spin_unlock_irqrestore(&list->lock, flags);
Jennifer Hunteac37312007-02-08 13:51:54 -08001091
Jennifer Hunt561e0362007-05-04 12:22:07 -07001092 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
1093 kfree_skb(this);
1094 }
1095
Heiko Carstensda99f052007-05-04 12:23:27 -07001096 if (sk->sk_state == IUCV_CLOSING) {
Jennifer Hunt561e0362007-05-04 12:22:07 -07001097 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1098 sk->sk_state = IUCV_CLOSED;
1099 sk->sk_state_change(sk);
1100 }
1101 }
1102
Jennifer Hunteac37312007-02-08 13:51:54 -08001103}
1104
1105static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1106{
1107 struct sock *sk = path->private;
1108
1109 if (!list_empty(&iucv_sk(sk)->accept_q))
1110 sk->sk_state = IUCV_SEVERED;
1111 else
1112 sk->sk_state = IUCV_DISCONN;
1113
1114 sk->sk_state_change(sk);
1115}
1116
1117static struct proto_ops iucv_sock_ops = {
1118 .family = PF_IUCV,
1119 .owner = THIS_MODULE,
1120 .release = iucv_sock_release,
1121 .bind = iucv_sock_bind,
1122 .connect = iucv_sock_connect,
1123 .listen = iucv_sock_listen,
1124 .accept = iucv_sock_accept,
1125 .getname = iucv_sock_getname,
1126 .sendmsg = iucv_sock_sendmsg,
1127 .recvmsg = iucv_sock_recvmsg,
1128 .poll = iucv_sock_poll,
1129 .ioctl = sock_no_ioctl,
1130 .mmap = sock_no_mmap,
1131 .socketpair = sock_no_socketpair,
1132 .shutdown = iucv_sock_shutdown,
1133 .setsockopt = sock_no_setsockopt,
1134 .getsockopt = sock_no_getsockopt
1135};
1136
1137static struct net_proto_family iucv_sock_family_ops = {
1138 .family = AF_IUCV,
1139 .owner = THIS_MODULE,
1140 .create = iucv_sock_create,
1141};
1142
Heiko Carstensda99f052007-05-04 12:23:27 -07001143static int __init afiucv_init(void)
Jennifer Hunteac37312007-02-08 13:51:54 -08001144{
1145 int err;
1146
1147 if (!MACHINE_IS_VM) {
1148 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1149 err = -EPROTONOSUPPORT;
1150 goto out;
1151 }
1152 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1153 if (unlikely(err)) {
1154 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1155 err = -EPROTONOSUPPORT;
1156 goto out;
1157 }
1158
1159 err = iucv_register(&af_iucv_handler, 0);
1160 if (err)
1161 goto out;
1162 err = proto_register(&iucv_proto, 0);
1163 if (err)
1164 goto out_iucv;
1165 err = sock_register(&iucv_sock_family_ops);
1166 if (err)
1167 goto out_proto;
1168 printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1169 return 0;
1170
1171out_proto:
1172 proto_unregister(&iucv_proto);
1173out_iucv:
1174 iucv_unregister(&af_iucv_handler, 0);
1175out:
1176 return err;
1177}
1178
1179static void __exit afiucv_exit(void)
1180{
1181 sock_unregister(PF_IUCV);
1182 proto_unregister(&iucv_proto);
1183 iucv_unregister(&af_iucv_handler, 0);
1184
1185 printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1186}
1187
1188module_init(afiucv_init);
1189module_exit(afiucv_exit);
1190
1191MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1192MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1193MODULE_VERSION(VERSION);
1194MODULE_LICENSE("GPL");
1195MODULE_ALIAS_NETPROTO(PF_IUCV);