Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _INET_COMMON_H |
| 3 | #define _INET_COMMON_H |
| 4 | |
Paolo Abeni | aaa5d90 | 2018-12-14 11:51:58 +0100 | [diff] [blame] | 5 | #include <linux/indirect_call_wrapper.h> |
| 6 | |
Changli Gao | 53d3176 | 2010-07-10 20:41:06 +0000 | [diff] [blame] | 7 | extern const struct proto_ops inet_stream_ops; |
| 8 | extern const struct proto_ops inet_dgram_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | /* |
| 11 | * INET4 prototypes used by INET6 |
| 12 | */ |
| 13 | |
Arnaldo Carvalho de Melo | 77d8bf9 | 2005-08-09 20:00:51 -0700 | [diff] [blame] | 14 | struct msghdr; |
| 15 | struct sock; |
| 16 | struct sockaddr; |
| 17 | struct socket; |
| 18 | |
Joe Perches | 1fd5115 | 2013-09-21 10:22:41 -0700 | [diff] [blame] | 19 | int inet_release(struct socket *sock); |
| 20 | int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
| 21 | int addr_len, int flags); |
| 22 | int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
Willy Tarreau | 3979ad7 | 2017-01-25 14:42:46 +0100 | [diff] [blame] | 23 | int addr_len, int flags, int is_sendmsg); |
Joe Perches | 1fd5115 | 2013-09-21 10:22:41 -0700 | [diff] [blame] | 24 | int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, |
| 25 | int addr_len, int flags); |
David Howells | cdfbabf | 2017-03-09 08:09:05 +0000 | [diff] [blame] | 26 | int inet_accept(struct socket *sock, struct socket *newsock, int flags, |
| 27 | bool kern); |
Paolo Abeni | e4730936 | 2019-07-03 16:06:52 +0200 | [diff] [blame] | 28 | int inet_send_prepare(struct sock *sk); |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 29 | int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); |
Joe Perches | 1fd5115 | 2013-09-21 10:22:41 -0700 | [diff] [blame] | 30 | ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, |
| 31 | size_t size, int flags); |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 32 | int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, |
| 33 | int flags); |
Joe Perches | 1fd5115 | 2013-09-21 10:22:41 -0700 | [diff] [blame] | 34 | int inet_shutdown(struct socket *sock, int how); |
| 35 | int inet_listen(struct socket *sock, int backlog); |
| 36 | void inet_sock_destruct(struct sock *sk); |
| 37 | int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); |
Stanislav Fomichev | cb0721c | 2020-05-08 10:46:10 -0700 | [diff] [blame] | 38 | /* Don't allocate port at this moment, defer to connect. */ |
| 39 | #define BIND_FORCE_ADDRESS_NO_PORT (1 << 0) |
| 40 | /* Grab and release socket lock. */ |
| 41 | #define BIND_WITH_LOCK (1 << 1) |
Stanislav Fomichev | 8086fba | 2020-05-08 10:46:11 -0700 | [diff] [blame] | 42 | /* Called from BPF program. */ |
| 43 | #define BIND_FROM_BPF (1 << 2) |
Stanislav Fomichev | 7724121 | 2021-01-27 11:31:39 -0800 | [diff] [blame] | 44 | /* Skip CAP_NET_BIND_SERVICE check. */ |
| 45 | #define BIND_NO_CAP_NET_BIND_SERVICE (1 << 3) |
Andrey Ignatov | 3679d58 | 2018-03-30 15:08:04 -0700 | [diff] [blame] | 46 | int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, |
Stanislav Fomichev | cb0721c | 2020-05-08 10:46:10 -0700 | [diff] [blame] | 47 | u32 flags); |
Denys Vlasenko | 9b2c45d | 2018-02-12 20:00:20 +0100 | [diff] [blame] | 48 | int inet_getname(struct socket *sock, struct sockaddr *uaddr, |
Joe Perches | 1fd5115 | 2013-09-21 10:22:41 -0700 | [diff] [blame] | 49 | int peer); |
| 50 | int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
| 51 | int inet_ctl_sock_create(struct sock **sk, unsigned short family, |
| 52 | unsigned short type, unsigned char protocol, |
| 53 | struct net *net); |
Willem de Bruijn | f4713a3 | 2014-11-26 14:53:02 -0500 | [diff] [blame] | 54 | int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, |
| 55 | int *addr_len); |
Denis V. Lunev | 5677242 | 2008-04-03 14:28:30 -0700 | [diff] [blame] | 56 | |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 57 | struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb); |
Tom Herbert | b8921ca | 2016-05-18 09:06:23 -0700 | [diff] [blame] | 58 | int inet_gro_complete(struct sk_buff *skb, int nhoff); |
| 59 | struct sk_buff *inet_gso_segment(struct sk_buff *skb, |
| 60 | netdev_features_t features); |
| 61 | |
Denis V. Lunev | 5677242 | 2008-04-03 14:28:30 -0700 | [diff] [blame] | 62 | static inline void inet_ctl_sock_destroy(struct sock *sk) |
| 63 | { |
Eric Dumazet | 8fa677d | 2015-11-02 07:50:07 -0800 | [diff] [blame] | 64 | if (sk) |
| 65 | sock_release(sk->sk_socket); |
Denis V. Lunev | 5677242 | 2008-04-03 14:28:30 -0700 | [diff] [blame] | 66 | } |
Denis V. Lunev | 3d58b5f | 2008-04-03 14:22:32 -0700 | [diff] [blame] | 67 | |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 68 | #define indirect_call_gro_receive(f2, f1, cb, head, skb) \ |
| 69 | ({ \ |
| 70 | unlikely(gro_recursion_inc_test(skb)) ? \ |
| 71 | NAPI_GRO_CB(skb)->flush |= 1, NULL : \ |
| 72 | INDIRECT_CALL_2(cb, f2, f1, head, skb); \ |
| 73 | }) |
| 74 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #endif |