blob: bd79ea3e8c4482450d73db700d9fd2c604f1ecee [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07009 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090024 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090035 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080046#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040053#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080070#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <net/checksum.h>
76#include <net/inetpeer.h>
Roopa Prabhu14972cb2016-08-24 20:10:43 -070077#include <net/lwtunnel.h>
Daniel Mack33b48672016-11-23 16:52:29 +010078#include <linux/bpf-cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/igmp.h>
80#include <linux/netfilter_ipv4.h>
81#include <linux/netfilter_bridge.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070083#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Eric W. Biederman694869b2015-06-12 21:55:31 -050085static int
86ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
87 unsigned int mtu,
88 int (*output)(struct net *, struct sock *, struct sk_buff *));
Andy Zhou49d16b22015-05-15 14:15:37 -070089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090/* Generate a checksum for an outgoing IP datagram. */
Denis Efremov2fbd9672013-05-08 23:19:42 +000091void ip_send_check(struct iphdr *iph)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
93 iph->check = 0;
94 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95}
Eric Dumazet4bc2f182010-07-09 21:22:10 +000096EXPORT_SYMBOL(ip_send_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Eric W. Biedermancf91a992015-10-07 16:48:45 -050098int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Herbert Xuc439cb22008-01-11 19:14:00 -080099{
100 struct iphdr *iph = ip_hdr(skb);
101
102 iph->tot_len = htons(skb->len);
103 ip_send_check(iph);
David Aherna8e3e1a2016-09-10 12:09:53 -0700104
105 /* if egress device is enslaved to an L3 master device pass the
106 * skb to its handler for processing
107 */
108 skb = l3mdev_ip_out(sk, skb);
109 if (unlikely(!skb))
110 return 0;
111
Eli Cooperf4180432016-12-01 10:05:10 +0800112 skb->protocol = htons(ETH_P_IP);
113
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500114 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
115 net, sk, skb, NULL, skb_dst(skb)->dev,
Eric W. Biederman13206b62015-10-07 16:48:35 -0500116 dst_output);
David Miller7026b1d2015-04-05 22:19:04 -0400117}
118
Eric W. Biederman33224b12015-10-07 16:48:46 -0500119int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Herbert Xuc439cb22008-01-11 19:14:00 -0800120{
121 int err;
122
Eric W. Biedermancf91a992015-10-07 16:48:45 -0500123 err = __ip_local_out(net, sk, skb);
Herbert Xuc439cb22008-01-11 19:14:00 -0800124 if (likely(err == 1))
Eric W. Biederman13206b62015-10-07 16:48:35 -0500125 err = dst_output(net, sk, skb);
Herbert Xuc439cb22008-01-11 19:14:00 -0800126
127 return err;
128}
Eric W. Biedermane2cb77d2015-10-07 16:48:38 -0500129EXPORT_SYMBOL_GPL(ip_local_out);
Herbert Xuc439cb22008-01-11 19:14:00 -0800130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
132{
133 int ttl = inet->uc_ttl;
134
135 if (ttl < 0)
David S. Miller323e1262010-12-12 21:55:08 -0800136 ttl = ip4_dst_hoplimit(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return ttl;
138}
139
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900140/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 * Add an ip header to a skbuff and send it out.
142 *
143 */
Eric Dumazetcfe673b2015-09-25 07:39:16 -0700144int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000145 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
147 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000148 struct rtable *rt = skb_rtable(skb);
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500149 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 struct iphdr *iph;
151
152 /* Build the IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000153 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300154 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700155 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 iph->version = 4;
157 iph->ihl = 5;
158 iph->tos = inet->tos;
Changli Gaod8d1f302010-06-10 23:31:35 -0700159 iph->ttl = ip_select_ttl(inet, &rt->dst);
David S. Millerdd927a22011-05-04 12:03:30 -0700160 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
161 iph->saddr = saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 iph->protocol = sk->sk_protocol;
Eric Dumazetcfe673b2015-09-25 07:39:16 -0700163 if (ip_dont_fragment(sk, &rt->dst)) {
164 iph->frag_off = htons(IP_DF);
165 iph->id = 0;
166 } else {
167 iph->frag_off = 0;
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500168 __ip_select_ident(net, iph, 1);
Eric Dumazetcfe673b2015-09-25 07:39:16 -0700169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000171 if (opt && opt->opt.optlen) {
172 iph->ihl += opt->opt.optlen>>2;
173 ip_options_build(skb, &opt->opt, daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176 skb->priority = sk->sk_priority;
Jamal Hadi Salime05a90e2017-07-03 09:51:50 -0400177 if (!skb->mark)
178 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180 /* Send it out. */
Eric W. Biederman33224b12015-10-07 16:48:46 -0500181 return ip_local_out(net, skb->sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700183EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
184
Eric W. Biederman694869b2015-06-12 21:55:31 -0500185static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Eric Dumazetadf30902009-06-02 05:19:30 +0000187 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700188 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700190 unsigned int hh_len = LL_RESERVED_SPACE(dev);
David S. Millerf6b72b622011-07-14 07:53:20 -0700191 struct neighbour *neigh;
David Ahern5c9f7c12019-04-05 16:30:34 -0700192 bool is_v6gw = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Neil Hormanedf391f2009-04-27 02:45:02 -0700194 if (rt->rt_type == RTN_MULTICAST) {
Eric W. Biederman4ba1bf42015-09-15 20:04:01 -0500195 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
Neil Hormanedf391f2009-04-27 02:45:02 -0700196 } else if (rt->rt_type == RTN_BROADCAST)
Eric W. Biederman4ba1bf42015-09-15 20:04:01 -0500197 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700200 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 struct sk_buff *skb2;
202
203 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
Ian Morris51456b22015-04-03 09:17:26 +0100204 if (!skb2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 kfree_skb(skb);
206 return -ENOMEM;
207 }
208 if (skb->sk)
209 skb_set_owner_w(skb2, skb->sk);
Eric Dumazet5d0ba552012-06-04 01:17:19 +0000210 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 skb = skb2;
212 }
213
Roopa Prabhu14972cb2016-08-24 20:10:43 -0700214 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
215 int res = lwtunnel_xmit(skb);
216
217 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
218 return res;
219 }
220
David S. Millera263b302012-07-02 02:02:15 -0700221 rcu_read_lock_bh();
David Ahern5c9f7c12019-04-05 16:30:34 -0700222 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
Vasiliy Kulikov9871f1a2012-08-06 03:55:29 +0000223 if (!IS_ERR(neigh)) {
Julian Anastasov4ff06202017-02-06 23:14:12 +0200224 int res;
225
226 sock_confirm_neigh(skb, neigh);
David Ahern5c9f7c12019-04-05 16:30:34 -0700227 /* if crossing protocols, can not use the cached header */
228 res = neigh_output(neigh, skb, is_v6gw);
David S. Millera263b302012-07-02 02:02:15 -0700229 rcu_read_unlock_bh();
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000230 return res;
231 }
David S. Millera263b302012-07-02 02:02:15 -0700232 rcu_read_unlock_bh();
David S. Miller05e3aa02011-07-16 17:26:00 -0700233
Joe Perchese87cc472012-05-13 21:56:26 +0000234 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
235 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 kfree_skb(skb);
237 return -EINVAL;
238}
239
Eric W. Biederman694869b2015-06-12 21:55:31 -0500240static int ip_finish_output_gso(struct net *net, struct sock *sk,
241 struct sk_buff *skb, unsigned int mtu)
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200242{
Jason A. Donenfeld88bebdf2020-01-13 18:42:31 -0500243 struct sk_buff *segs, *nskb;
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200244 netdev_features_t features;
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200245 int ret = 0;
246
Lance Richardson9ee6c5d2016-11-02 16:36:17 -0400247 /* common case: seglen is <= mtu
Shmulik Ladkani359ebda2016-07-18 14:49:33 +0300248 */
Daniel Axtens779b7932018-03-01 17:13:37 +1100249 if (skb_gso_validate_network_len(skb, mtu))
Eric W. Biederman694869b2015-06-12 21:55:31 -0500250 return ip_finish_output2(net, sk, skb);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200251
Lance Richardson0ace81e2016-11-09 15:04:39 -0500252 /* Slowpath - GSO segment length exceeds the egress MTU.
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200253 *
Lance Richardson0ace81e2016-11-09 15:04:39 -0500254 * This can happen in several cases:
255 * - Forwarding of a TCP GRO skb, when DF flag is not set.
256 * - Forwarding of an skb that arrived on a virtualization interface
257 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
258 * stack.
259 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
260 * interface with a smaller MTU.
261 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
262 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
263 * insufficent MTU.
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200264 */
265 features = netif_skb_features(skb);
Cambda Zhua08e7fd2020-03-26 15:33:14 +0800266 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200267 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
Florian Westphal330966e2014-10-20 13:49:17 +0200268 if (IS_ERR_OR_NULL(segs)) {
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200269 kfree_skb(skb);
270 return -ENOMEM;
271 }
272
273 consume_skb(skb);
274
Jason A. Donenfeld88bebdf2020-01-13 18:42:31 -0500275 skb_list_walk_safe(segs, segs, nskb) {
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200276 int err;
277
David S. Millera8305bf2018-07-29 20:42:53 -0700278 skb_mark_not_on_list(segs);
Eric W. Biederman694869b2015-06-12 21:55:31 -0500279 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200280
281 if (err && ret == 0)
282 ret = err;
Jason A. Donenfeld88bebdf2020-01-13 18:42:31 -0500283 }
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200284
285 return ret;
286}
287
brakmo956fe212019-05-28 16:59:38 -0700288static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Florian Westphalc5501eb2015-05-22 16:32:50 +0200290 unsigned int mtu;
291
Patrick McHardy5c901da2006-01-06 23:05:36 -0800292#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
293 /* Policy lookup after SNAT yielded a new policy */
Ian Morris00db4122015-04-03 09:17:27 +0100294 if (skb_dst(skb)->xfrm) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800295 IPCB(skb)->flags |= IPSKB_REROUTED;
Eric W. Biederman13206b62015-10-07 16:48:35 -0500296 return dst_output(net, sk, skb);
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800297 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800298#endif
Shmulik Ladkanifedbb6b42016-06-29 21:47:03 +0300299 mtu = ip_skb_dst_mtu(sk, skb);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200300 if (skb_is_gso(skb))
Eric W. Biederman694869b2015-06-12 21:55:31 -0500301 return ip_finish_output_gso(net, sk, skb, mtu);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200302
Florian Westphald6b915e2015-05-22 16:32:51 +0200303 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
Eric W. Biederman694869b2015-06-12 21:55:31 -0500304 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200305
Eric W. Biederman694869b2015-06-12 21:55:31 -0500306 return ip_finish_output2(net, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
brakmo956fe212019-05-28 16:59:38 -0700309static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
310{
311 int ret;
312
313 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
314 switch (ret) {
315 case NET_XMIT_SUCCESS:
316 return __ip_finish_output(net, sk, skb);
317 case NET_XMIT_CN:
318 return __ip_finish_output(net, sk, skb) ? : ret;
319 default:
320 kfree_skb(skb);
321 return ret;
322 }
323}
324
Daniel Mack33b48672016-11-23 16:52:29 +0100325static int ip_mc_finish_output(struct net *net, struct sock *sk,
326 struct sk_buff *skb)
327{
Stephen Suryaputra5b18f122019-06-26 02:21:16 -0400328 struct rtable *new_rt;
David S. Millerd96ff262019-06-27 21:06:39 -0700329 bool do_cn = false;
330 int ret, err;
Daniel Mack33b48672016-11-23 16:52:29 +0100331
332 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
brakmo956fe212019-05-28 16:59:38 -0700333 switch (ret) {
brakmo956fe212019-05-28 16:59:38 -0700334 case NET_XMIT_CN:
David S. Millerd96ff262019-06-27 21:06:39 -0700335 do_cn = true;
Joe Perchesa8eceea2020-03-12 15:50:22 -0700336 fallthrough;
David S. Millerd96ff262019-06-27 21:06:39 -0700337 case NET_XMIT_SUCCESS:
338 break;
brakmo956fe212019-05-28 16:59:38 -0700339 default:
Daniel Mack33b48672016-11-23 16:52:29 +0100340 kfree_skb(skb);
341 return ret;
342 }
343
Stephen Suryaputra5b18f122019-06-26 02:21:16 -0400344 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
345 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
346 * see ipv4_pktinfo_prepare().
347 */
348 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
349 if (new_rt) {
350 new_rt->rt_iif = 0;
351 skb_dst_drop(skb);
352 skb_dst_set(skb, &new_rt->dst);
353 }
354
David S. Millerd96ff262019-06-27 21:06:39 -0700355 err = dev_loopback_xmit(net, sk, skb);
356 return (do_cn && err) ? ret : err;
Daniel Mack33b48672016-11-23 16:52:29 +0100357}
358
Eric W. Biedermanede20592015-10-07 16:48:47 -0500359int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Eric Dumazet511c3f92009-06-02 05:14:27 +0000361 struct rtable *rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -0700362 struct net_device *dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 /*
365 * If the indicated interface is up and running, send the packet.
366 */
Eric W. Biederman88f5cc22015-09-15 20:03:57 -0500367 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 skb->dev = dev;
370 skb->protocol = htons(ETH_P_IP);
371
372 /*
373 * Multicasts are looped back for other local users
374 */
375
376 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800377 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378#ifdef CONFIG_IP_MROUTE
379 /* Small optimization: do not loopback not local frames,
380 which returned after forwarding; they will be dropped
381 by ip_mr_input in any case.
382 Note, that local frames are looped back to be delivered
383 to local recipients.
384
385 This check is duplicated in ip_mr_input at the moment.
386 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800387 &&
388 ((rt->rt_flags & RTCF_LOCAL) ||
389 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800391 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
393 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100394 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500395 net, sk, newskb, NULL, newskb->dev,
Daniel Mack33b48672016-11-23 16:52:29 +0100396 ip_mc_finish_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
398
399 /* Multicasts with ttl 0 must not go beyond the host */
400
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700401 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 kfree_skb(skb);
403 return 0;
404 }
405 }
406
407 if (rt->rt_flags&RTCF_BROADCAST) {
408 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
409 if (newskb)
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500410 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
411 net, sk, newskb, NULL, newskb->dev,
Daniel Mack33b48672016-11-23 16:52:29 +0100412 ip_mc_finish_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 }
414
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500415 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
416 net, sk, skb, NULL, skb->dev,
417 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800418 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420
Eric W. Biedermanede20592015-10-07 16:48:47 -0500421int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
Phil Sutter28f8bfd2019-11-12 17:14:37 +0100423 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800424
Eric W. Biederman88f5cc22015-09-15 20:03:57 -0500425 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800427 skb->dev = dev;
428 skb->protocol = htons(ETH_P_IP);
429
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500430 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
Phil Sutter28f8bfd2019-11-12 17:14:37 +0100431 net, sk, skb, indev, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900432 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800433 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
Eric Dumazet84f93072011-11-30 19:00:53 +0000436/*
437 * copy saddr and daddr, possibly using 64bit load/stores
438 * Equivalent to :
439 * iph->saddr = fl4->saddr;
440 * iph->daddr = fl4->daddr;
441 */
442static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
443{
444 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
445 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
446 memcpy(&iph->saddr, &fl4->saddr,
447 sizeof(fl4->saddr) + sizeof(fl4->daddr));
448}
449
Eric Dumazetb0270e92014-04-15 12:58:34 -0400450/* Note: skb->sk can be different from sk, in case of tunnels */
Xin Long69b9e1e2018-07-02 18:21:11 +0800451int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
452 __u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 struct inet_sock *inet = inet_sk(sk);
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500455 struct net *net = sock_net(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000456 struct ip_options_rcu *inet_opt;
David S. Millerb57ae012011-05-06 16:24:06 -0700457 struct flowi4 *fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 struct rtable *rt;
459 struct iphdr *iph;
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000460 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 /* Skip all of this if the packet is already routed,
463 * f.e. by something like SCTP.
464 */
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000465 rcu_read_lock();
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000466 inet_opt = rcu_dereference(inet->inet_opt);
David S. Millerea4fc0d2011-05-06 22:30:20 -0700467 fl4 = &fl->u.ip4;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000468 rt = skb_rtable(skb);
Ian Morris00db4122015-04-03 09:17:27 +0100469 if (rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 goto packet_routed;
471
472 /* Make sure we can route this packet. */
473 rt = (struct rtable *)__sk_dst_check(sk, 0);
Ian Morris51456b22015-04-03 09:17:26 +0100474 if (!rt) {
Al Viro3ca3c682006-09-27 18:28:07 -0700475 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000478 daddr = inet->inet_daddr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000479 if (inet_opt && inet_opt->opt.srr)
480 daddr = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
David S. Miller78fbfd82011-03-12 00:00:52 -0500482 /* If this fails, retransmit mechanism of transport layer will
483 * keep trying until route appears or the connection times
484 * itself out.
485 */
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500486 rt = ip_route_output_ports(net, fl4, sk,
David S. Miller78fbfd82011-03-12 00:00:52 -0500487 daddr, inet->inet_saddr,
488 inet->inet_dport,
489 inet->inet_sport,
490 sk->sk_protocol,
Xin Long69b9e1e2018-07-02 18:21:11 +0800491 RT_CONN_FLAGS_TOS(sk, tos),
David S. Miller78fbfd82011-03-12 00:00:52 -0500492 sk->sk_bound_dev_if);
493 if (IS_ERR(rt))
494 goto no_route;
Changli Gaod8d1f302010-06-10 23:31:35 -0700495 sk_setup_caps(sk, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700497 skb_dst_set_noref(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499packet_routed:
David Ahern77d5bc72019-09-17 10:39:49 -0700500 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 goto no_route;
502
503 /* OK, we know where to send it, allocate and build IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000504 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300505 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700506 iph = ip_hdr(skb);
Xin Long69b9e1e2018-07-02 18:21:11 +0800507 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
WANG Cong60ff7462014-05-04 16:39:18 -0700508 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 iph->frag_off = htons(IP_DF);
510 else
511 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700512 iph->ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 iph->protocol = sk->sk_protocol;
Eric Dumazet84f93072011-11-30 19:00:53 +0000514 ip_copy_addrs(iph, fl4);
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 /* Transport layer set skb->h.foo itself. */
517
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000518 if (inet_opt && inet_opt->opt.optlen) {
519 iph->ihl += inet_opt->opt.optlen >> 2;
520 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500523 ip_select_ident_segs(net, skb, sk,
Hannes Frederic Sowab6a77192015-03-25 17:07:44 +0100524 skb_shinfo(skb)->gso_segs ?: 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Eric Dumazetb0270e92014-04-15 12:58:34 -0400526 /* TODO : should we use skb->sk here instead of sk ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800528 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Eric W. Biederman33224b12015-10-07 16:48:46 -0500530 res = ip_local_out(net, sk, skb);
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000531 rcu_read_unlock();
532 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534no_route:
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000535 rcu_read_unlock();
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500536 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 kfree_skb(skb);
538 return -EHOSTUNREACH;
539}
Xin Long69b9e1e2018-07-02 18:21:11 +0800540EXPORT_SYMBOL(__ip_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Eric Dumazet05e22e82020-06-19 12:12:34 -0700542int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
543{
544 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
545}
546EXPORT_SYMBOL(ip_queue_xmit);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
549{
550 to->pkt_type = from->pkt_type;
551 to->priority = from->priority;
552 to->protocol = from->protocol;
Shmulik Ladkanid2f0c962019-04-29 16:39:30 +0300553 to->skb_iif = from->skb_iif;
Eric Dumazetadf30902009-06-02 05:19:30 +0000554 skb_dst_drop(to);
Eric Dumazetfe76cda2010-07-01 23:48:22 +0000555 skb_dst_copy(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800557 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Paolo Abeni3dd1c9a2018-07-23 16:50:48 +0200559 skb_copy_hash(to, from);
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561#ifdef CONFIG_NET_SCHED
562 to->tc_index = from->tc_index;
563#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700564 nf_copy(to, from);
Florian Westphaldf5042f2018-12-18 17:15:16 +0100565 skb_ext_copy(to, from);
Javier Martinez Canillas6ca40d42016-09-09 08:43:16 -0400566#if IS_ENABLED(CONFIG_IP_VS)
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300567 to->ipvs_property = from->ipvs_property;
568#endif
James Morris984bc162006-06-09 00:29:17 -0700569 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Eric W. Biederman694869b2015-06-12 21:55:31 -0500572static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
Florian Westphalc5501eb2015-05-22 16:32:50 +0200573 unsigned int mtu,
Eric W. Biederman694869b2015-06-12 21:55:31 -0500574 int (*output)(struct net *, struct sock *, struct sk_buff *))
Andy Zhou49d16b22015-05-15 14:15:37 -0700575{
576 struct iphdr *iph = ip_hdr(skb);
Andy Zhou49d16b22015-05-15 14:15:37 -0700577
Florian Westphald6b915e2015-05-22 16:32:51 +0200578 if ((iph->frag_off & htons(IP_DF)) == 0)
Eric W. Biederman694869b2015-06-12 21:55:31 -0500579 return ip_do_fragment(net, sk, skb, output);
Florian Westphald6b915e2015-05-22 16:32:51 +0200580
581 if (unlikely(!skb->ignore_df ||
Andy Zhou49d16b22015-05-15 14:15:37 -0700582 (IPCB(skb)->frag_max_size &&
583 IPCB(skb)->frag_max_size > mtu))) {
Eric W. Biederman9479b0a2015-09-15 20:04:00 -0500584 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
Andy Zhou49d16b22015-05-15 14:15:37 -0700585 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
586 htonl(mtu));
587 kfree_skb(skb);
588 return -EMSGSIZE;
589 }
590
Eric W. Biederman694869b2015-06-12 21:55:31 -0500591 return ip_do_fragment(net, sk, skb, output);
Andy Zhou49d16b22015-05-15 14:15:37 -0700592}
593
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200594void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
595 unsigned int hlen, struct ip_fraglist_iter *iter)
596{
597 unsigned int first_len = skb_pagelen(skb);
598
Eric Dumazetb7034142019-06-02 11:24:18 -0700599 iter->frag = skb_shinfo(skb)->frag_list;
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200600 skb_frag_list_init(skb);
601
602 iter->offset = 0;
603 iter->iph = iph;
604 iter->hlen = hlen;
605
606 skb->data_len = first_len - skb_headlen(skb);
607 skb->len = first_len;
608 iph->tot_len = htons(first_len);
609 iph->frag_off = htons(IP_MF);
610 ip_send_check(iph);
611}
612EXPORT_SYMBOL(ip_fraglist_init);
613
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200614static void ip_fraglist_ipcb_prepare(struct sk_buff *skb,
615 struct ip_fraglist_iter *iter)
616{
617 struct sk_buff *to = iter->frag;
618
619 /* Copy the flags to each fragment. */
620 IPCB(to)->flags = IPCB(skb)->flags;
621
622 if (iter->offset == 0)
623 ip_options_fragment(to);
624}
625
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200626void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
627{
628 unsigned int hlen = iter->hlen;
629 struct iphdr *iph = iter->iph;
630 struct sk_buff *frag;
631
632 frag = iter->frag;
633 frag->ip_summed = CHECKSUM_NONE;
634 skb_reset_transport_header(frag);
635 __skb_push(frag, hlen);
636 skb_reset_network_header(frag);
637 memcpy(skb_network_header(frag), iph, hlen);
638 iter->iph = ip_hdr(frag);
639 iph = iter->iph;
640 iph->tot_len = htons(frag->len);
641 ip_copy_metadata(frag, skb);
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200642 iter->offset += skb->len - hlen;
643 iph->frag_off = htons(iter->offset >> 3);
644 if (frag->next)
645 iph->frag_off |= htons(IP_MF);
646 /* Ready, complete checksum */
647 ip_send_check(iph);
648}
649EXPORT_SYMBOL(ip_fraglist_prepare);
650
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200651void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
Eric Dumazete7a409c2019-10-19 09:26:37 -0700652 unsigned int ll_rs, unsigned int mtu, bool DF,
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200653 struct ip_frag_state *state)
654{
655 struct iphdr *iph = ip_hdr(skb);
656
Eric Dumazete7a409c2019-10-19 09:26:37 -0700657 state->DF = DF;
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200658 state->hlen = hlen;
659 state->ll_rs = ll_rs;
660 state->mtu = mtu;
661
662 state->left = skb->len - hlen; /* Space per frame */
663 state->ptr = hlen; /* Where to start from */
664
665 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
666 state->not_last_frag = iph->frag_off & htons(IP_MF);
667}
668EXPORT_SYMBOL(ip_frag_init);
669
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200670static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
671 bool first_frag, struct ip_frag_state *state)
672{
673 /* Copy the flags to each fragment. */
674 IPCB(to)->flags = IPCB(from)->flags;
675
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200676 /* ANK: dirty, but effective trick. Upgrade options only if
677 * the segment to be fragmented was THE FIRST (otherwise,
678 * options are already fixed) and make it ONCE
679 * on the initial skb, so that all the following fragments
680 * will inherit fixed options.
681 */
682 if (first_frag)
683 ip_options_fragment(from);
684}
685
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200686struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
687{
688 unsigned int len = state->left;
689 struct sk_buff *skb2;
690 struct iphdr *iph;
691
692 len = state->left;
693 /* IF: it doesn't fit, use 'mtu' - the data space left */
694 if (len > state->mtu)
695 len = state->mtu;
696 /* IF: we are not sending up to and including the packet end
697 then align the next start on an eight byte boundary */
698 if (len < state->left) {
699 len &= ~7;
700 }
701
702 /* Allocate buffer */
703 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
704 if (!skb2)
705 return ERR_PTR(-ENOMEM);
706
707 /*
708 * Set up data on packet
709 */
710
711 ip_copy_metadata(skb2, skb);
712 skb_reserve(skb2, state->ll_rs);
713 skb_put(skb2, len + state->hlen);
714 skb_reset_network_header(skb2);
715 skb2->transport_header = skb2->network_header + state->hlen;
716
717 /*
718 * Charge the memory for the fragment to any owner
719 * it might possess
720 */
721
722 if (skb->sk)
723 skb_set_owner_w(skb2, skb->sk);
724
725 /*
726 * Copy the packet header into the new buffer.
727 */
728
729 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
730
731 /*
732 * Copy a block of the IP datagram.
733 */
734 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
735 BUG();
736 state->left -= len;
737
738 /*
739 * Fill in the new header fields.
740 */
741 iph = ip_hdr(skb2);
742 iph->frag_off = htons((state->offset >> 3));
Eric Dumazete7a409c2019-10-19 09:26:37 -0700743 if (state->DF)
744 iph->frag_off |= htons(IP_DF);
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200745
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200746 /*
747 * Added AC : If we are fragmenting a fragment that's not the
748 * last fragment then keep MF on each bit
749 */
750 if (state->left > 0 || state->not_last_frag)
751 iph->frag_off |= htons(IP_MF);
752 state->ptr += len;
753 state->offset += len;
754
755 iph->tot_len = htons(len + state->hlen);
756
757 ip_send_check(iph);
758
759 return skb2;
760}
761EXPORT_SYMBOL(ip_frag_next);
762
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763/*
764 * This IP datagram is too large to be sent in one piece. Break it up into
765 * smaller pieces (each of size equal to IP header plus
766 * a block of the data of the original IP data part) that will yet fit in a
767 * single device frame, and queue such a frame for sending.
768 */
769
Eric W. Biederman694869b2015-06-12 21:55:31 -0500770int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
771 int (*output)(struct net *, struct sock *, struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
773 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 struct sk_buff *skb2;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000775 struct rtable *rt = skb_rtable(skb);
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200776 unsigned int mtu, hlen, ll_rs;
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200777 struct ip_fraglist_iter iter;
Eric Dumazet9669fff2019-10-16 18:00:56 -0700778 ktime_t tstamp = skb->tstamp;
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200779 struct ip_frag_state state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 int err = 0;
781
Hannes Frederic Sowadbd33932015-10-27 22:40:40 +0100782 /* for offloaded checksums cleanup checksum before fragmentation */
783 if (skb->ip_summed == CHECKSUM_PARTIAL &&
784 (err = skb_checksum_help(skb)))
785 goto fail;
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 /*
788 * Point into the IP datagram header.
789 */
790
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700791 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Shmulik Ladkanifedbb6b42016-06-29 21:47:03 +0300793 mtu = ip_skb_dst_mtu(sk, skb);
Florian Westphald6b915e2015-05-22 16:32:51 +0200794 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
795 mtu = IPCB(skb)->frag_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 /*
798 * Setup starting values.
799 */
800
801 hlen = iph->ihl * 4;
Hannes Frederic Sowaf87c10a2014-01-09 10:01:15 +0100802 mtu = mtu - hlen; /* Size of data space */
Herbert Xu89cee8b2005-12-13 23:14:27 -0800803 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Vasily Averin254d9002017-07-14 12:04:16 +0300804 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 /* When frag_list is given, use it. First, check its validity:
807 * some transformers could create wrong frag_list or break existing
808 * one, it is not prohibited. In this case fall back to copying.
809 *
810 * LATER: this step can be merged to real generation of fragments,
811 * we can switch to copy when see the first bad fragment.
812 */
David S. Miller21dc3302010-08-23 00:13:46 -0700813 if (skb_has_frag_list(skb)) {
Eric Dumazet3d130082010-09-21 08:47:45 +0000814 struct sk_buff *frag, *frag2;
Alexey Dobriyanc72d8cd2016-11-19 04:08:08 +0300815 unsigned int first_len = skb_pagelen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 if (first_len - hlen > mtu ||
818 ((first_len - hlen) & 7) ||
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700819 ip_is_fragment(iph) ||
Vasily Averin254d9002017-07-14 12:04:16 +0300820 skb_cloned(skb) ||
821 skb_headroom(skb) < ll_rs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 goto slow_path;
823
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700824 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 /* Correct geometry. */
826 if (frag->len > mtu ||
827 ((frag->len & 7) && frag->next) ||
Vasily Averin254d9002017-07-14 12:04:16 +0300828 skb_headroom(frag) < hlen + ll_rs)
Eric Dumazet3d130082010-09-21 08:47:45 +0000829 goto slow_path_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
831 /* Partially cloned skb? */
832 if (skb_shared(frag))
Eric Dumazet3d130082010-09-21 08:47:45 +0000833 goto slow_path_clean;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700834
835 BUG_ON(frag->sk);
836 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700837 frag->sk = skb->sk;
838 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700839 }
Eric Dumazet3d130082010-09-21 08:47:45 +0000840 skb->truesize -= frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
842
843 /* Everything is OK. Generate! */
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200844 ip_fraglist_init(skb, iph, hlen, &iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 for (;;) {
847 /* Prepare header of the next frame,
848 * before previous one went down. */
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200849 if (iter.frag) {
850 ip_fraglist_ipcb_prepare(skb, &iter);
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200851 ip_fraglist_prepare(skb, &iter);
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200852 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Eric Dumazet9669fff2019-10-16 18:00:56 -0700854 skb->tstamp = tstamp;
Eric W. Biederman694869b2015-06-12 21:55:31 -0500855 err = output(net, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Wei Dongdafee492006-08-02 13:41:21 -0700857 if (!err)
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500858 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200859 if (err || !iter.frag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 break;
861
Pablo Neira Ayusoc8b17be2019-05-29 13:25:31 +0200862 skb = ip_fraglist_next(&iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 }
864
865 if (err == 0) {
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500866 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return 0;
868 }
869
Eric Dumazetb7034142019-06-02 11:24:18 -0700870 kfree_skb_list(iter.frag);
Pablo Neira Ayuso942f1462019-04-04 13:54:20 +0200871
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500872 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 return err;
Eric Dumazet3d130082010-09-21 08:47:45 +0000874
875slow_path_clean:
876 skb_walk_frags(skb, frag2) {
877 if (frag2 == frag)
878 break;
879 frag2->sk = NULL;
880 frag2->destructor = NULL;
881 skb->truesize += frag2->truesize;
882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 }
884
885slow_path:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 /*
887 * Fragment the datagram.
888 */
889
Eric Dumazete7a409c2019-10-19 09:26:37 -0700890 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
891 &state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 /*
894 * Keep copying data until we run out.
895 */
896
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200897 while (state.left > 0) {
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200898 bool first_frag = (state.offset == 0);
899
Pablo Neira Ayuso065ff792019-05-29 13:25:33 +0200900 skb2 = ip_frag_next(skb, &state);
901 if (IS_ERR(skb2)) {
902 err = PTR_ERR(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 goto fail;
904 }
Pablo Neira Ayuso19c34012019-05-29 13:25:35 +0200905 ip_frag_ipcb(skb, skb2, first_frag, &state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 * Put this fragment into the sending queue.
909 */
Eric Dumazet9669fff2019-10-16 18:00:56 -0700910 skb2->tstamp = tstamp;
Eric W. Biederman694869b2015-06-12 21:55:31 -0500911 err = output(net, sk, skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (err)
913 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700914
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500915 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 }
Eric Dumazet5d0ba552012-06-04 01:17:19 +0000917 consume_skb(skb);
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500918 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 return err;
920
921fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900922 kfree_skb(skb);
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500923 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 return err;
925}
Andy Zhou49d16b22015-05-15 14:15:37 -0700926EXPORT_SYMBOL(ip_do_fragment);
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928int
929ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
930{
Al Virof69e6d12014-11-24 13:23:40 -0500931 struct msghdr *msg = from;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Patrick McHardy84fa7932006-08-29 16:44:56 -0700933 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Al Viro0b62fca2016-11-03 18:17:31 -0400934 if (!copy_from_iter_full(to, len, &msg->msg_iter))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return -EFAULT;
936 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800937 __wsum csum = 0;
Al Viro0b62fca2016-11-03 18:17:31 -0400938 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return -EFAULT;
940 skb->csum = csum_block_add(skb->csum, csum, odd);
941 }
942 return 0;
943}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000944EXPORT_SYMBOL(ip_generic_getfrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Al Viro44bb9362006-11-14 21:36:14 -0800946static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947csum_page(struct page *page, int offset, int copy)
948{
949 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800950 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 kaddr = kmap(page);
952 csum = csum_partial(kaddr + offset, copy, 0);
953 kunmap(page);
954 return csum;
955}
956
David S. Millerf5fca602011-05-08 17:24:10 -0700957static int __ip_append_data(struct sock *sk,
958 struct flowi4 *fl4,
959 struct sk_buff_head *queue,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000960 struct inet_cork *cork,
Eric Dumazet5640f762012-09-23 23:04:42 +0000961 struct page_frag *pfrag,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000962 int getfrag(void *from, char *to, int offset,
963 int len, int odd, struct sk_buff *skb),
964 void *from, int length, int transhdrlen,
965 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
967 struct inet_sock *inet = inet_sk(sk);
Willem de Bruijnb5947e52018-11-30 15:32:39 -0500968 struct ubuf_info *uarg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 struct sk_buff *skb;
970
Herbert Xu07df5292011-03-01 23:00:58 -0800971 struct ip_options *opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 int hh_len;
973 int exthdrlen;
974 int mtu;
975 int copy;
976 int err;
977 int offset = 0;
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +0100978 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 int csummode = CHECKSUM_NONE;
Herbert Xu1470ddf2011-03-01 02:36:47 +0000980 struct rtable *rt = (struct rtable *)cork->dst;
Eric Dumazet694aba62018-03-31 13:16:25 -0700981 unsigned int wmem_alloc_delta = 0;
Willem de Bruijn100f6d82019-05-30 18:01:21 -0400982 bool paged, extra_uref = false;
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400983 u32 tskey = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Steffen Klassert96d73032011-06-05 20:48:47 +0000985 skb = skb_peek_tail(queue);
986
987 exthdrlen = !skb ? rt->dst.header_len : 0;
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400988 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
Willem de Bruijn15e36f52018-04-26 13:42:19 -0400989 paged = !!cork->gso_size;
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -0400990
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400991 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
992 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
993 tskey = sk->sk_tskey++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Changli Gaod8d1f302010-06-10 23:31:35 -0700995 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
998 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
WANG Cong60ff7462014-05-04 16:39:18 -0700999 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +01001001 if (cork->length + length > maxnonfragsize - fragheaderlen) {
David S. Millerf5fca602011-05-08 17:24:10 -07001002 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
Hannes Frederic Sowa61e7f092013-12-19 02:13:36 +01001003 mtu - (opt ? opt->optlen : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return -EMSGSIZE;
1005 }
1006
1007 /*
1008 * transhdrlen > 0 means that this is the first fragment and we wish
1009 * it won't be fragmented in the future.
1010 */
1011 if (transhdrlen &&
1012 length + fragheaderlen <= mtu &&
Tom Herbertc8cd0982015-12-14 11:19:44 -08001013 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04001014 (!(flags & MSG_MORE) || cork->gso_size) &&
Jacek Kalwascd027a52018-04-12 12:03:13 -07001015 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
Patrick McHardy84fa7932006-08-29 16:44:56 -07001016 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001018 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1019 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1020 if (!uarg)
1021 return -ENOBUFS;
Willem de Bruijn522924b2019-06-07 17:57:48 -04001022 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001023 if (rt->dst.dev->features & NETIF_F_SG &&
1024 csummode == CHECKSUM_PARTIAL) {
1025 paged = true;
1026 } else {
1027 uarg->zerocopy = 0;
Willem de Bruijn52900d22018-11-30 15:32:40 -05001028 skb_zcopy_set(skb, uarg, &extra_uref);
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001029 }
1030 }
1031
Herbert Xu1470ddf2011-03-01 02:36:47 +00001032 cork->length += length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
1034 /* So, what's going on in the loop below?
1035 *
1036 * We use calculated fragment length to generate chained skb,
1037 * each of segments is IP fragment ready for sending to network after
1038 * adding appropriate IP header.
1039 */
1040
Herbert Xu26cde9f2010-06-15 01:52:25 +00001041 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 goto alloc_new_skb;
1043
1044 while (length > 0) {
1045 /* Check if the remaining data fits into current packet. */
1046 copy = mtu - skb->len;
1047 if (copy < length)
1048 copy = maxfraglen - skb->len;
1049 if (copy <= 0) {
1050 char *data;
1051 unsigned int datalen;
1052 unsigned int fraglen;
1053 unsigned int fraggap;
1054 unsigned int alloclen;
Willem de Bruijnaba36932018-11-24 14:21:16 -05001055 unsigned int pagedlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 struct sk_buff *skb_prev;
1057alloc_new_skb:
1058 skb_prev = skb;
1059 if (skb_prev)
1060 fraggap = skb_prev->len - maxfraglen;
1061 else
1062 fraggap = 0;
1063
1064 /*
1065 * If remaining data exceeds the mtu,
1066 * we know we need more fragment(s).
1067 */
1068 datalen = length + fraggap;
1069 if (datalen > mtu - fragheaderlen)
1070 datalen = maxfraglen - fragheaderlen;
1071 fraglen = datalen + fragheaderlen;
Willem de Bruijnaba36932018-11-24 14:21:16 -05001072 pagedlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001074 if ((flags & MSG_MORE) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07001075 !(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 alloclen = mtu;
Willem de Bruijn15e36f52018-04-26 13:42:19 -04001077 else if (!paged)
Eric Dumazet59104f02010-09-20 20:16:27 +00001078 alloclen = fraglen;
Willem de Bruijn15e36f52018-04-26 13:42:19 -04001079 else {
1080 alloclen = min_t(int, fraglen, MAX_HEADER);
1081 pagedlen = fraglen - alloclen;
1082 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Steffen Klassert353e5c92011-06-22 01:05:37 +00001084 alloclen += exthdrlen;
1085
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /* The last fragment gets additional space at tail.
1087 * Note, with MSG_MORE we overallocate on fragments,
1088 * because we have no idea what fragment will be
1089 * the last.
1090 */
Steffen Klassert33f99dc2011-06-22 01:04:37 +00001091 if (datalen == length + fraggap)
Changli Gaod8d1f302010-06-10 23:31:35 -07001092 alloclen += rt->dst.trailer_len;
Steffen Klassert33f99dc2011-06-22 01:04:37 +00001093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001095 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 alloclen + hh_len + 15,
1097 (flags & MSG_DONTWAIT), &err);
1098 } else {
1099 skb = NULL;
Eric Dumazet694aba62018-03-31 13:16:25 -07001100 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 2 * sk->sk_sndbuf)
Eric Dumazet694aba62018-03-31 13:16:25 -07001102 skb = alloc_skb(alloclen + hh_len + 15,
1103 sk->sk_allocation);
Ian Morris51456b22015-04-03 09:17:26 +01001104 if (unlikely(!skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 err = -ENOBUFS;
1106 }
Ian Morris51456b22015-04-03 09:17:26 +01001107 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 goto error;
1109
1110 /*
1111 * Fill in the control structures
1112 */
1113 skb->ip_summed = csummode;
1114 skb->csum = 0;
1115 skb_reserve(skb, hh_len);
Willem de Bruijn11878b42014-07-14 17:55:06 -04001116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 /*
1118 * Find where to start putting bytes.
1119 */
Willem de Bruijn15e36f52018-04-26 13:42:19 -04001120 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -03001121 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001122 skb->transport_header = (skb->network_header +
1123 fragheaderlen);
Steffen Klassert353e5c92011-06-22 01:05:37 +00001124 data += fragheaderlen + exthdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
1126 if (fraggap) {
1127 skb->csum = skb_copy_and_csum_bits(
1128 skb_prev, maxfraglen,
Al Viro8d5930d2020-07-10 20:07:10 -04001129 data + transhdrlen, fraggap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 skb_prev->csum = csum_sub(skb_prev->csum,
1131 skb->csum);
1132 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -07001133 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 }
1135
Willem de Bruijn15e36f52018-04-26 13:42:19 -04001136 copy = datalen - transhdrlen - fraggap - pagedlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1138 err = -EFAULT;
1139 kfree_skb(skb);
1140 goto error;
1141 }
1142
1143 offset += copy;
Willem de Bruijn15e36f52018-04-26 13:42:19 -04001144 length -= copy + transhdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 transhdrlen = 0;
1146 exthdrlen = 0;
1147 csummode = CHECKSUM_NONE;
1148
Willem de Bruijn52900d22018-11-30 15:32:40 -05001149 /* only the initial fragment is time stamped */
1150 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1151 cork->tx_flags = 0;
1152 skb_shinfo(skb)->tskey = tskey;
1153 tskey = 0;
1154 skb_zcopy_set(skb, uarg, &extra_uref);
1155
Julian Anastasov0dec8792017-02-06 23:14:16 +02001156 if ((flags & MSG_CONFIRM) && !skb_prev)
1157 skb_set_dst_pending_confirm(skb, 1);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /*
1160 * Put the packet on the pending queue.
1161 */
Eric Dumazet694aba62018-03-31 13:16:25 -07001162 if (!skb->destructor) {
1163 skb->destructor = sock_wfree;
1164 skb->sk = sk;
1165 wmem_alloc_delta += skb->truesize;
1166 }
Herbert Xu1470ddf2011-03-01 02:36:47 +00001167 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 continue;
1169 }
1170
1171 if (copy > length)
1172 copy = length;
1173
Willem de Bruijn113f99c2018-05-17 13:13:29 -04001174 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1175 skb_tailroom(skb) >= copy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 unsigned int off;
1177
1178 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001179 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 offset, copy, off, skb) < 0) {
1181 __skb_trim(skb, off);
1182 err = -EFAULT;
1183 goto error;
1184 }
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001185 } else if (!uarg || !uarg->zerocopy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 int i = skb_shinfo(skb)->nr_frags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Eric Dumazet5640f762012-09-23 23:04:42 +00001188 err = -ENOMEM;
1189 if (!sk_page_frag_refill(sk, pfrag))
1190 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Eric Dumazet5640f762012-09-23 23:04:42 +00001192 if (!skb_can_coalesce(skb, i, pfrag->page,
1193 pfrag->offset)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 err = -EMSGSIZE;
Eric Dumazet5640f762012-09-23 23:04:42 +00001195 if (i == MAX_SKB_FRAGS)
1196 goto error;
1197
1198 __skb_fill_page_desc(skb, i, pfrag->page,
1199 pfrag->offset, 0);
1200 skb_shinfo(skb)->nr_frags = ++i;
1201 get_page(pfrag->page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 }
Eric Dumazet5640f762012-09-23 23:04:42 +00001203 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1204 if (getfrag(from,
1205 page_address(pfrag->page) + pfrag->offset,
1206 offset, copy, skb->len, skb) < 0)
1207 goto error_efault;
1208
1209 pfrag->offset += copy;
1210 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 skb->len += copy;
1212 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001213 skb->truesize += copy;
Eric Dumazet694aba62018-03-31 13:16:25 -07001214 wmem_alloc_delta += copy;
Willem de Bruijnb5947e52018-11-30 15:32:39 -05001215 } else {
1216 err = skb_zerocopy_iter_dgram(skb, from, copy);
1217 if (err < 0)
1218 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
1220 offset += copy;
1221 length -= copy;
1222 }
1223
Paolo Abeni9e8445a2018-04-04 14:30:01 +02001224 if (wmem_alloc_delta)
1225 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 return 0;
1227
Eric Dumazet5640f762012-09-23 23:04:42 +00001228error_efault:
1229 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230error:
Willem de Bruijn97ef7b42018-12-08 06:22:46 -05001231 if (uarg)
1232 sock_zerocopy_put_abort(uarg, extra_uref);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001233 cork->length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001234 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Eric Dumazet694aba62018-03-31 13:16:25 -07001235 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001236 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237}
1238
Herbert Xu1470ddf2011-03-01 02:36:47 +00001239static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1240 struct ipcm_cookie *ipc, struct rtable **rtp)
1241{
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001242 struct ip_options_rcu *opt;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001243 struct rtable *rt;
1244
Gao Feng9783ccd2018-04-16 10:16:45 +08001245 rt = *rtp;
1246 if (unlikely(!rt))
1247 return -EFAULT;
1248
Herbert Xu1470ddf2011-03-01 02:36:47 +00001249 /*
1250 * setup for corking.
1251 */
1252 opt = ipc->opt;
1253 if (opt) {
Ian Morris51456b22015-04-03 09:17:26 +01001254 if (!cork->opt) {
Herbert Xu1470ddf2011-03-01 02:36:47 +00001255 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1256 sk->sk_allocation);
Ian Morris51456b22015-04-03 09:17:26 +01001257 if (unlikely(!cork->opt))
Herbert Xu1470ddf2011-03-01 02:36:47 +00001258 return -ENOBUFS;
1259 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001260 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001261 cork->flags |= IPCORK_OPT;
1262 cork->addr = ipc->addr;
1263 }
Gao Feng9783ccd2018-04-16 10:16:45 +08001264
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +01001265 cork->fragsize = ip_sk_use_pmtu(sk) ?
Eric Dumazet501a90c2019-12-05 20:43:46 -08001266 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1267
1268 if (!inetdev_valid_mtu(cork->fragsize))
1269 return -ENETUNREACH;
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04001270
Willem de Bruijnfbf47812018-07-06 10:12:59 -04001271 cork->gso_size = ipc->gso_size;
Eric Dumazet501a90c2019-12-05 20:43:46 -08001272
Herbert Xu1470ddf2011-03-01 02:36:47 +00001273 cork->dst = &rt->dst;
Eric Dumazet501a90c2019-12-05 20:43:46 -08001274 /* We stole this route, caller should not release it. */
1275 *rtp = NULL;
1276
Herbert Xu1470ddf2011-03-01 02:36:47 +00001277 cork->length = 0;
Francesco Fuscoaa661582013-09-24 15:43:09 +02001278 cork->ttl = ipc->ttl;
1279 cork->tos = ipc->tos;
Willem de Bruijnc6af0c22019-09-11 15:50:51 -04001280 cork->mark = ipc->sockc.mark;
Francesco Fuscoaa661582013-09-24 15:43:09 +02001281 cork->priority = ipc->priority;
Jesus Sanchez-Palenciabc969a92018-07-03 15:42:49 -07001282 cork->transmit_time = ipc->sockc.transmit_time;
Willem de Bruijn678ca422018-07-06 10:12:58 -04001283 cork->tx_flags = 0;
1284 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001285
1286 return 0;
1287}
1288
1289/*
1290 * ip_append_data() and ip_append_page() can make one large IP datagram
1291 * from many pieces of data. Each pieces will be holded on the socket
1292 * until ip_push_pending_frames() is called. Each piece can be a page
1293 * or non-page data.
1294 *
1295 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1296 * this interface potentially.
1297 *
1298 * LATER: length must be adjusted by pad at tail, when it is required.
1299 */
David S. Millerf5fca602011-05-08 17:24:10 -07001300int ip_append_data(struct sock *sk, struct flowi4 *fl4,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001301 int getfrag(void *from, char *to, int offset, int len,
1302 int odd, struct sk_buff *skb),
1303 void *from, int length, int transhdrlen,
1304 struct ipcm_cookie *ipc, struct rtable **rtp,
1305 unsigned int flags)
1306{
1307 struct inet_sock *inet = inet_sk(sk);
1308 int err;
1309
1310 if (flags&MSG_PROBE)
1311 return 0;
1312
1313 if (skb_queue_empty(&sk->sk_write_queue)) {
David S. Millerbdc712b2011-05-06 15:02:07 -07001314 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001315 if (err)
1316 return err;
1317 } else {
1318 transhdrlen = 0;
1319 }
1320
Eric Dumazet5640f762012-09-23 23:04:42 +00001321 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1322 sk_page_frag(sk), getfrag,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001323 from, length, transhdrlen, flags);
1324}
1325
David S. Millerf5fca602011-05-08 17:24:10 -07001326ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 int offset, size_t size, int flags)
1328{
1329 struct inet_sock *inet = inet_sk(sk);
1330 struct sk_buff *skb;
1331 struct rtable *rt;
1332 struct ip_options *opt = NULL;
David S. Millerbdc712b2011-05-06 15:02:07 -07001333 struct inet_cork *cork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 int hh_len;
1335 int mtu;
1336 int len;
1337 int err;
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +01001338 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 if (inet->hdrincl)
1341 return -EPERM;
1342
1343 if (flags&MSG_PROBE)
1344 return 0;
1345
1346 if (skb_queue_empty(&sk->sk_write_queue))
1347 return -EINVAL;
1348
David S. Millerbdc712b2011-05-06 15:02:07 -07001349 cork = &inet->cork.base;
1350 rt = (struct rtable *)cork->dst;
1351 if (cork->flags & IPCORK_OPT)
1352 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Changli Gaod8d1f302010-06-10 23:31:35 -07001354 if (!(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 return -EOPNOTSUPP;
1356
Changli Gaod8d1f302010-06-10 23:31:35 -07001357 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Willem de Bruijnbec1f6f2018-04-26 13:42:17 -04001358 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
1360 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1361 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
WANG Cong60ff7462014-05-04 16:39:18 -07001362 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +01001364 if (cork->length + size > maxnonfragsize - fragheaderlen) {
Hannes Frederic Sowa61e7f092013-12-19 02:13:36 +01001365 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1366 mtu - (opt ? opt->optlen : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 return -EMSGSIZE;
1368 }
1369
Ian Morris51456b22015-04-03 09:17:26 +01001370 skb = skb_peek_tail(&sk->sk_write_queue);
1371 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 return -EINVAL;
1373
Hannes Frederic Sowaa8c4a252016-02-22 18:43:25 +01001374 cork->length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 while (size > 0) {
Willem de Bruijnab2fb7e2017-08-22 11:39:57 -04001377 /* Check if the remaining data fits into current packet. */
1378 len = mtu - skb->len;
1379 if (len < size)
1380 len = maxfraglen - skb->len;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (len <= 0) {
1383 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 int alloclen;
1385
1386 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001387 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389 alloclen = fragheaderlen + hh_len + fraggap + 15;
1390 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1391 if (unlikely(!skb)) {
1392 err = -ENOBUFS;
1393 goto error;
1394 }
1395
1396 /*
1397 * Fill in the control structures
1398 */
1399 skb->ip_summed = CHECKSUM_NONE;
1400 skb->csum = 0;
1401 skb_reserve(skb, hh_len);
1402
1403 /*
1404 * Find where to start putting bytes.
1405 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001406 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001407 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001408 skb->transport_header = (skb->network_header +
1409 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001411 skb->csum = skb_copy_and_csum_bits(skb_prev,
1412 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001413 skb_transport_header(skb),
Al Viro8d5930d2020-07-10 20:07:10 -04001414 fraggap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 skb_prev->csum = csum_sub(skb_prev->csum,
1416 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001417 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 }
1419
1420 /*
1421 * Put the packet on the pending queue.
1422 */
1423 __skb_queue_tail(&sk->sk_write_queue, skb);
1424 continue;
1425 }
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 if (len > size)
1428 len = size;
Hannes Frederic Sowabe12a1f2015-05-21 16:59:58 +02001429
1430 if (skb_append_pagefrags(skb, page, offset, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 err = -EMSGSIZE;
1432 goto error;
1433 }
1434
1435 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001436 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 csum = csum_page(page, offset, len);
1438 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1439 }
1440
1441 skb->len += len;
1442 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001443 skb->truesize += len;
Reshetova, Elena14afee42017-06-30 13:08:00 +03001444 refcount_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 offset += len;
1446 size -= len;
1447 }
1448 return 0;
1449
1450error:
David S. Millerbdc712b2011-05-06 15:02:07 -07001451 cork->length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001452 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 return err;
1454}
1455
Herbert Xu1470ddf2011-03-01 02:36:47 +00001456static void ip_cork_release(struct inet_cork *cork)
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001457{
Herbert Xu1470ddf2011-03-01 02:36:47 +00001458 cork->flags &= ~IPCORK_OPT;
1459 kfree(cork->opt);
1460 cork->opt = NULL;
1461 dst_release(cork->dst);
1462 cork->dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001463}
1464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465/*
1466 * Combined all pending IP fragments on the socket as one IP datagram
1467 * and push them out.
1468 */
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001469struct sk_buff *__ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001470 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001471 struct sk_buff_head *queue,
1472 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473{
1474 struct sk_buff *skb, *tmp_skb;
1475 struct sk_buff **tail_skb;
1476 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001477 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 struct ip_options *opt = NULL;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001479 struct rtable *rt = (struct rtable *)cork->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001481 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 __u8 ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Ian Morris51456b22015-04-03 09:17:26 +01001484 skb = __skb_dequeue(queue);
1485 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 goto out;
1487 tail_skb = &(skb_shinfo(skb)->frag_list);
1488
1489 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001490 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001491 __skb_pull(skb, skb_network_offset(skb));
Herbert Xu1470ddf2011-03-01 02:36:47 +00001492 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001493 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 *tail_skb = tmp_skb;
1495 tail_skb = &(tmp_skb->next);
1496 skb->len += tmp_skb->len;
1497 skb->data_len += tmp_skb->len;
1498 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 tmp_skb->destructor = NULL;
1500 tmp_skb->sk = NULL;
1501 }
1502
1503 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1504 * to fragment the frame generated here. No matter, what transforms
1505 * how transforms change size of the packet, it will come out.
1506 */
WANG Cong60ff7462014-05-04 16:39:18 -07001507 skb->ignore_df = ip_sk_ignore_df(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
1509 /* DF bit is set when we want to see DF on outgoing frames.
WANG Cong60ff7462014-05-04 16:39:18 -07001510 * If ignore_df is set too, we still allow to fragment this frame
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 * locally. */
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +01001512 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1513 inet->pmtudisc == IP_PMTUDISC_PROBE ||
Changli Gaod8d1f302010-06-10 23:31:35 -07001514 (skb->len <= dst_mtu(&rt->dst) &&
1515 ip_dont_fragment(sk, &rt->dst)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 df = htons(IP_DF);
1517
Herbert Xu1470ddf2011-03-01 02:36:47 +00001518 if (cork->flags & IPCORK_OPT)
1519 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
Francesco Fuscoaa661582013-09-24 15:43:09 +02001521 if (cork->ttl != 0)
1522 ttl = cork->ttl;
1523 else if (rt->rt_type == RTN_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 ttl = inet->mc_ttl;
1525 else
Changli Gaod8d1f302010-06-10 23:31:35 -07001526 ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Ansis Atteka749154a2013-09-18 15:29:52 -07001528 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 iph->version = 4;
1530 iph->ihl = 5;
Francesco Fuscoaa661582013-09-24 15:43:09 +02001531 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 iph->frag_off = df;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 iph->ttl = ttl;
1534 iph->protocol = sk->sk_protocol;
Eric Dumazet84f93072011-11-30 19:00:53 +00001535 ip_copy_addrs(iph, fl4);
Hannes Frederic Sowab6a77192015-03-25 17:07:44 +01001536 ip_select_ident(net, skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
David S. Miller22f728f2011-05-13 17:21:27 -04001538 if (opt) {
1539 iph->ihl += opt->optlen>>2;
1540 ip_options_build(skb, opt, cork->addr, rt, 0);
1541 }
1542
Francesco Fuscoaa661582013-09-24 15:43:09 +02001543 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
Willem de Bruijnc6af0c22019-09-11 15:50:51 -04001544 skb->mark = cork->mark;
Jesus Sanchez-Palenciabc969a92018-07-03 15:42:49 -07001545 skb->tstamp = cork->transmit_time;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001546 /*
1547 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1548 * on dst refcount
1549 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001550 cork->dst = NULL;
Changli Gaod8d1f302010-06-10 23:31:35 -07001551 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
David L Stevens96793b42007-09-17 09:57:33 -07001553 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001554 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001555 skb_transport_header(skb))->type);
1556
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001557 ip_cork_release(cork);
1558out:
1559 return skb;
1560}
1561
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +00001562int ip_send_skb(struct net *net, struct sk_buff *skb)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001563{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001564 int err;
1565
Eric W. Biederman33224b12015-10-07 16:48:46 -05001566 err = ip_local_out(net, skb->sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 if (err) {
1568 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001569 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 if (err)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001571 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 }
1573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
David S. Miller77968b72011-05-08 17:12:19 -07001577int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
Herbert Xu1470ddf2011-03-01 02:36:47 +00001578{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001579 struct sk_buff *skb;
1580
David S. Miller77968b72011-05-08 17:12:19 -07001581 skb = ip_finish_skb(sk, fl4);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001582 if (!skb)
1583 return 0;
1584
1585 /* Netfilter gets whole the not fragmented skb. */
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +00001586 return ip_send_skb(sock_net(sk), skb);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001587}
1588
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589/*
1590 * Throw away all pending data on the socket.
1591 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001592static void __ip_flush_pending_frames(struct sock *sk,
1593 struct sk_buff_head *queue,
1594 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 struct sk_buff *skb;
1597
Herbert Xu1470ddf2011-03-01 02:36:47 +00001598 while ((skb = __skb_dequeue_tail(queue)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 kfree_skb(skb);
1600
Herbert Xu1470ddf2011-03-01 02:36:47 +00001601 ip_cork_release(cork);
1602}
1603
1604void ip_flush_pending_frames(struct sock *sk)
1605{
David S. Millerbdc712b2011-05-06 15:02:07 -07001606 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607}
1608
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001609struct sk_buff *ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001610 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001611 int getfrag(void *from, char *to, int offset,
1612 int len, int odd, struct sk_buff *skb),
1613 void *from, int length, int transhdrlen,
1614 struct ipcm_cookie *ipc, struct rtable **rtp,
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001615 struct inet_cork *cork, unsigned int flags)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001616{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001617 struct sk_buff_head queue;
1618 int err;
1619
1620 if (flags & MSG_PROBE)
1621 return NULL;
1622
1623 __skb_queue_head_init(&queue);
1624
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001625 cork->flags = 0;
1626 cork->addr = 0;
1627 cork->opt = NULL;
1628 err = ip_setup_cork(sk, cork, ipc, rtp);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001629 if (err)
1630 return ERR_PTR(err);
1631
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001632 err = __ip_append_data(sk, fl4, &queue, cork,
Eric Dumazet5640f762012-09-23 23:04:42 +00001633 &current->task_frag, getfrag,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001634 from, length, transhdrlen, flags);
1635 if (err) {
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001636 __ip_flush_pending_frames(sk, &queue, cork);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001637 return ERR_PTR(err);
1638 }
1639
Willem de Bruijn1cd78842018-04-26 13:42:15 -04001640 return __ip_make_skb(sk, fl4, &queue, cork);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001641}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643/*
1644 * Fetch data from kernel space and fill in checksum if needed.
1645 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001646static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 int len, int odd, struct sk_buff *skb)
1648{
Al Viro50842052006-11-14 21:36:34 -08001649 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Al Virocc44c172020-07-11 00:12:07 -04001651 csum = csum_partial_copy_nocheck(dptr+offset, to, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001653 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654}
1655
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001656/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 * Generic function to send a packet as reply to another packet.
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001658 * Used to send some TCP resets/acks so far.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 */
Eric Dumazetbdbbb852015-01-29 21:35:05 -08001660void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
Eric Dumazet24a2d432014-09-27 09:50:55 -07001661 const struct ip_options *sopt,
1662 __be32 daddr, __be32 saddr,
1663 const struct ip_reply_arg *arg,
Eric Dumazetd6fb3962019-06-13 21:22:35 -07001664 unsigned int len, u64 transmit_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001666 struct ip_options_data replyopts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 struct ipcm_cookie ipc;
David S. Miller77968b72011-05-08 17:12:19 -07001668 struct flowi4 fl4;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001669 struct rtable *rt = skb_rtable(skb);
Eric Dumazetbdbbb852015-01-29 21:35:05 -08001670 struct net *net = sock_net(sk);
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001671 struct sk_buff *nskb;
Vasily Averin40620902014-10-15 16:24:02 +04001672 int err;
David Ahernf7ba8682015-08-13 14:59:08 -06001673 int oif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Paolo Abeni91ed1e62017-08-03 18:07:06 +02001675 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 return;
1677
Willem de Bruijn35178202018-07-06 10:12:54 -04001678 ipcm_init(&ipc);
David S. Miller0a5ebb82011-05-09 13:22:43 -07001679 ipc.addr = daddr;
Eric Dumazetd6fb3962019-06-13 21:22:35 -07001680 ipc.sockc.transmit_time = transmit_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001682 if (replyopts.opt.opt.optlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 ipc.opt = &replyopts.opt;
1684
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001685 if (replyopts.opt.opt.srr)
1686 daddr = replyopts.opt.opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 }
1688
David Ahernf7ba8682015-08-13 14:59:08 -06001689 oif = arg->bound_dev_if;
David Ahern9b6c14d2016-11-09 09:07:26 -08001690 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1691 oif = skb->skb_iif;
David Ahernf7ba8682015-08-13 14:59:08 -06001692
1693 flowi4_init_output(&fl4, oif,
Jon Maxwell00483692018-05-10 16:53:51 +10001694 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
Eric Dumazet66b13d92011-10-24 03:06:21 -04001695 RT_TOS(arg->tos),
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001696 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
David S. Miller77968b72011-05-08 17:12:19 -07001697 ip_reply_arg_flowi_flags(arg),
David S. Miller70e73412012-06-28 03:21:41 -07001698 daddr, saddr,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09001699 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1700 arg->uid);
David S. Miller77968b72011-05-08 17:12:19 -07001701 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001702 rt = ip_route_output_key(net, &fl4);
David S. Miller77968b72011-05-08 17:12:19 -07001703 if (IS_ERR(rt))
1704 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
Eric Dumazetbdbbb852015-01-29 21:35:05 -08001706 inet_sk(sk)->tos = arg->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001708 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001709 sk->sk_bound_dev_if = arg->bound_dev_if;
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001710 sk->sk_sndbuf = sysctl_wmem_default;
Willem de Bruijn0da75362020-07-01 16:00:06 -04001711 ipc.sockc.mark = fl4.flowi4_mark;
Vasily Averin40620902014-10-15 16:24:02 +04001712 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1713 len, 0, &ipc, &rt, MSG_DONTWAIT);
1714 if (unlikely(err)) {
1715 ip_flush_pending_frames(sk);
1716 goto out;
1717 }
1718
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001719 nskb = skb_peek(&sk->sk_write_queue);
1720 if (nskb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 if (arg->csumoffset >= 0)
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001722 *((__sum16 *)skb_transport_header(nskb) +
1723 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001724 arg->csum));
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001725 nskb->ip_summed = CHECKSUM_NONE;
David S. Miller77968b72011-05-08 17:12:19 -07001726 ip_push_pending_frames(sk, &fl4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 }
Vasily Averin40620902014-10-15 16:24:02 +04001728out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 ip_rt_put(rt);
1730}
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732void __init ip_init(void)
1733{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 ip_rt_init();
1735 inet_initpeers();
1736
WANG Cong72c1d3b2014-01-10 16:09:45 -08001737#if defined(CONFIG_IP_MULTICAST)
1738 igmp_mc_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739#endif
1740}