blob: 7a3fd25e8913a99d0fcbb256bc9001f6f1d4dd6f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090034 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/module.h>
47#include <linux/types.h>
48#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/mm.h>
50#include <linux/string.h>
51#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040052#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/socket.h>
56#include <linux/sockios.h>
57#include <linux/in.h>
58#include <linux/inet.h>
59#include <linux/netdevice.h>
60#include <linux/etherdevice.h>
61#include <linux/proc_fs.h>
62#include <linux/stat.h>
63#include <linux/init.h>
64
65#include <net/snmp.h>
66#include <net/ip.h>
67#include <net/protocol.h>
68#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080069#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/skbuff.h>
71#include <net/sock.h>
72#include <net/arp.h>
73#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <net/checksum.h>
75#include <net/inetpeer.h>
Roopa Prabhu14972cb2016-08-24 20:10:43 -070076#include <net/lwtunnel.h>
Daniel Mack33b48672016-11-23 16:52:29 +010077#include <linux/bpf-cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/igmp.h>
79#include <linux/netfilter_ipv4.h>
80#include <linux/netfilter_bridge.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070082#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Eric W. Biederman694869b2015-06-12 21:55:31 -050084static int
85ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
86 unsigned int mtu,
87 int (*output)(struct net *, struct sock *, struct sk_buff *));
Andy Zhou49d16b22015-05-15 14:15:37 -070088
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/* Generate a checksum for an outgoing IP datagram. */
Denis Efremov2fbd9672013-05-08 23:19:42 +000090void ip_send_check(struct iphdr *iph)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 iph->check = 0;
93 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
94}
Eric Dumazet4bc2f182010-07-09 21:22:10 +000095EXPORT_SYMBOL(ip_send_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Eric W. Biedermancf91a992015-10-07 16:48:45 -050097int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Herbert Xuc439cb22008-01-11 19:14:00 -080098{
99 struct iphdr *iph = ip_hdr(skb);
100
101 iph->tot_len = htons(skb->len);
102 ip_send_check(iph);
David Aherna8e3e1a2016-09-10 12:09:53 -0700103
104 /* if egress device is enslaved to an L3 master device pass the
105 * skb to its handler for processing
106 */
107 skb = l3mdev_ip_out(sk, skb);
108 if (unlikely(!skb))
109 return 0;
110
Eli Cooperf4180432016-12-01 10:05:10 +0800111 skb->protocol = htons(ETH_P_IP);
112
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500113 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
114 net, sk, skb, NULL, skb_dst(skb)->dev,
Eric W. Biederman13206b62015-10-07 16:48:35 -0500115 dst_output);
David Miller7026b1d2015-04-05 22:19:04 -0400116}
117
Eric W. Biederman33224b12015-10-07 16:48:46 -0500118int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Herbert Xuc439cb22008-01-11 19:14:00 -0800119{
120 int err;
121
Eric W. Biedermancf91a992015-10-07 16:48:45 -0500122 err = __ip_local_out(net, sk, skb);
Herbert Xuc439cb22008-01-11 19:14:00 -0800123 if (likely(err == 1))
Eric W. Biederman13206b62015-10-07 16:48:35 -0500124 err = dst_output(net, sk, skb);
Herbert Xuc439cb22008-01-11 19:14:00 -0800125
126 return err;
127}
Eric W. Biedermane2cb77d2015-10-07 16:48:38 -0500128EXPORT_SYMBOL_GPL(ip_local_out);
Herbert Xuc439cb22008-01-11 19:14:00 -0800129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
131{
132 int ttl = inet->uc_ttl;
133
134 if (ttl < 0)
David S. Miller323e1262010-12-12 21:55:08 -0800135 ttl = ip4_dst_hoplimit(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return ttl;
137}
138
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900139/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 * Add an ip header to a skbuff and send it out.
141 *
142 */
Eric Dumazetcfe673b2015-09-25 07:39:16 -0700143int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000144 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
146 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000147 struct rtable *rt = skb_rtable(skb);
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500148 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 struct iphdr *iph;
150
151 /* Build the IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000152 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300153 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700154 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 iph->version = 4;
156 iph->ihl = 5;
157 iph->tos = inet->tos;
Changli Gaod8d1f302010-06-10 23:31:35 -0700158 iph->ttl = ip_select_ttl(inet, &rt->dst);
David S. Millerdd927a22011-05-04 12:03:30 -0700159 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
160 iph->saddr = saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 iph->protocol = sk->sk_protocol;
Eric Dumazetcfe673b2015-09-25 07:39:16 -0700162 if (ip_dont_fragment(sk, &rt->dst)) {
163 iph->frag_off = htons(IP_DF);
164 iph->id = 0;
165 } else {
166 iph->frag_off = 0;
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500167 __ip_select_ident(net, iph, 1);
Eric Dumazetcfe673b2015-09-25 07:39:16 -0700168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000170 if (opt && opt->opt.optlen) {
171 iph->ihl += opt->opt.optlen>>2;
172 ip_options_build(skb, &opt->opt, daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800176 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
178 /* Send it out. */
Eric W. Biederman33224b12015-10-07 16:48:46 -0500179 return ip_local_out(net, skb->sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700181EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
182
Eric W. Biederman694869b2015-06-12 21:55:31 -0500183static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Eric Dumazetadf30902009-06-02 05:19:30 +0000185 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700186 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700188 unsigned int hh_len = LL_RESERVED_SPACE(dev);
David S. Millerf6b72b622011-07-14 07:53:20 -0700189 struct neighbour *neigh;
David S. Millera263b302012-07-02 02:02:15 -0700190 u32 nexthop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Neil Hormanedf391f2009-04-27 02:45:02 -0700192 if (rt->rt_type == RTN_MULTICAST) {
Eric W. Biederman4ba1bf42015-09-15 20:04:01 -0500193 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
Neil Hormanedf391f2009-04-27 02:45:02 -0700194 } else if (rt->rt_type == RTN_BROADCAST)
Eric W. Biederman4ba1bf42015-09-15 20:04:01 -0500195 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700198 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 struct sk_buff *skb2;
200
201 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
Ian Morris51456b22015-04-03 09:17:26 +0100202 if (!skb2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 kfree_skb(skb);
204 return -ENOMEM;
205 }
206 if (skb->sk)
207 skb_set_owner_w(skb2, skb->sk);
Eric Dumazet5d0ba552012-06-04 01:17:19 +0000208 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb = skb2;
210 }
211
Roopa Prabhu14972cb2016-08-24 20:10:43 -0700212 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
213 int res = lwtunnel_xmit(skb);
214
215 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
216 return res;
217 }
218
David S. Millera263b302012-07-02 02:02:15 -0700219 rcu_read_lock_bh();
Julian Anastasov155e8332012-10-08 11:41:18 +0000220 nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
David S. Millera263b302012-07-02 02:02:15 -0700221 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
222 if (unlikely(!neigh))
223 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
Vasiliy Kulikov9871f1a2012-08-06 03:55:29 +0000224 if (!IS_ERR(neigh)) {
Julian Anastasov4ff06202017-02-06 23:14:12 +0200225 int res;
226
227 sock_confirm_neigh(skb, neigh);
Julian Anastasovc16ec1852017-02-11 13:49:20 +0200228 res = neigh_output(neigh, skb);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000229
David S. Millera263b302012-07-02 02:02:15 -0700230 rcu_read_unlock_bh();
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000231 return res;
232 }
David S. Millera263b302012-07-02 02:02:15 -0700233 rcu_read_unlock_bh();
David S. Miller05e3aa02011-07-16 17:26:00 -0700234
Joe Perchese87cc472012-05-13 21:56:26 +0000235 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
236 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 kfree_skb(skb);
238 return -EINVAL;
239}
240
Eric W. Biederman694869b2015-06-12 21:55:31 -0500241static int ip_finish_output_gso(struct net *net, struct sock *sk,
242 struct sk_buff *skb, unsigned int mtu)
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200243{
244 netdev_features_t features;
245 struct sk_buff *segs;
246 int ret = 0;
247
Lance Richardson9ee6c5d2016-11-02 16:36:17 -0400248 /* common case: seglen is <= mtu
Shmulik Ladkani359ebda2016-07-18 14:49:33 +0300249 */
Lance Richardson9ee6c5d2016-11-02 16:36:17 -0400250 if (skb_gso_validate_mtu(skb, mtu))
Eric W. Biederman694869b2015-06-12 21:55:31 -0500251 return ip_finish_output2(net, sk, skb);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200252
Lance Richardson0ace81e2016-11-09 15:04:39 -0500253 /* Slowpath - GSO segment length exceeds the egress MTU.
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200254 *
Lance Richardson0ace81e2016-11-09 15:04:39 -0500255 * This can happen in several cases:
256 * - Forwarding of a TCP GRO skb, when DF flag is not set.
257 * - Forwarding of an skb that arrived on a virtualization interface
258 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
259 * stack.
260 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
261 * interface with a smaller MTU.
262 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
263 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
264 * insufficent MTU.
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200265 */
266 features = netif_skb_features(skb);
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +0300267 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200268 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
Florian Westphal330966e2014-10-20 13:49:17 +0200269 if (IS_ERR_OR_NULL(segs)) {
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200270 kfree_skb(skb);
271 return -ENOMEM;
272 }
273
274 consume_skb(skb);
275
276 do {
277 struct sk_buff *nskb = segs->next;
278 int err;
279
280 segs->next = NULL;
Eric W. Biederman694869b2015-06-12 21:55:31 -0500281 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200282
283 if (err && ret == 0)
284 ret = err;
285 segs = nskb;
286 } while (segs);
287
288 return ret;
289}
290
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500291static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Florian Westphalc5501eb2015-05-22 16:32:50 +0200293 unsigned int mtu;
Daniel Mack33b48672016-11-23 16:52:29 +0100294 int ret;
295
296 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
297 if (ret) {
298 kfree_skb(skb);
299 return ret;
300 }
Florian Westphalc5501eb2015-05-22 16:32:50 +0200301
Patrick McHardy5c901da2006-01-06 23:05:36 -0800302#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
303 /* Policy lookup after SNAT yielded a new policy */
Ian Morris00db4122015-04-03 09:17:27 +0100304 if (skb_dst(skb)->xfrm) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800305 IPCB(skb)->flags |= IPSKB_REROUTED;
Eric W. Biederman13206b62015-10-07 16:48:35 -0500306 return dst_output(net, sk, skb);
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800307 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800308#endif
Shmulik Ladkanifedbb6b42016-06-29 21:47:03 +0300309 mtu = ip_skb_dst_mtu(sk, skb);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200310 if (skb_is_gso(skb))
Eric W. Biederman694869b2015-06-12 21:55:31 -0500311 return ip_finish_output_gso(net, sk, skb, mtu);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200312
Florian Westphald6b915e2015-05-22 16:32:51 +0200313 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
Eric W. Biederman694869b2015-06-12 21:55:31 -0500314 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
Florian Westphalc7ba65d2014-05-05 15:00:43 +0200315
Eric W. Biederman694869b2015-06-12 21:55:31 -0500316 return ip_finish_output2(net, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Daniel Mack33b48672016-11-23 16:52:29 +0100319static int ip_mc_finish_output(struct net *net, struct sock *sk,
320 struct sk_buff *skb)
321{
322 int ret;
323
324 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
325 if (ret) {
326 kfree_skb(skb);
327 return ret;
328 }
329
330 return dev_loopback_xmit(net, sk, skb);
331}
332
Eric W. Biedermanede20592015-10-07 16:48:47 -0500333int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Eric Dumazet511c3f92009-06-02 05:14:27 +0000335 struct rtable *rt = skb_rtable(skb);
Changli Gaod8d1f302010-06-10 23:31:35 -0700336 struct net_device *dev = rt->dst.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 /*
339 * If the indicated interface is up and running, send the packet.
340 */
Eric W. Biederman88f5cc22015-09-15 20:03:57 -0500341 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 skb->dev = dev;
344 skb->protocol = htons(ETH_P_IP);
345
346 /*
347 * Multicasts are looped back for other local users
348 */
349
350 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800351 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352#ifdef CONFIG_IP_MROUTE
353 /* Small optimization: do not loopback not local frames,
354 which returned after forwarding; they will be dropped
355 by ip_mr_input in any case.
356 Note, that local frames are looped back to be delivered
357 to local recipients.
358
359 This check is duplicated in ip_mr_input at the moment.
360 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800361 &&
362 ((rt->rt_flags & RTCF_LOCAL) ||
363 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800365 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
367 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100368 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500369 net, sk, newskb, NULL, newskb->dev,
Daniel Mack33b48672016-11-23 16:52:29 +0100370 ip_mc_finish_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
372
373 /* Multicasts with ttl 0 must not go beyond the host */
374
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700375 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 kfree_skb(skb);
377 return 0;
378 }
379 }
380
381 if (rt->rt_flags&RTCF_BROADCAST) {
382 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
383 if (newskb)
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500384 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
385 net, sk, newskb, NULL, newskb->dev,
Daniel Mack33b48672016-11-23 16:52:29 +0100386 ip_mc_finish_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
388
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500389 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
390 net, sk, skb, NULL, skb->dev,
391 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800392 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
Eric W. Biedermanede20592015-10-07 16:48:47 -0500395int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
Eric Dumazetadf30902009-06-02 05:19:30 +0000397 struct net_device *dev = skb_dst(skb)->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800398
Eric W. Biederman88f5cc22015-09-15 20:03:57 -0500399 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800401 skb->dev = dev;
402 skb->protocol = htons(ETH_P_IP);
403
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500404 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
405 net, sk, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900406 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800407 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
Eric Dumazet84f93072011-11-30 19:00:53 +0000410/*
411 * copy saddr and daddr, possibly using 64bit load/stores
412 * Equivalent to :
413 * iph->saddr = fl4->saddr;
414 * iph->daddr = fl4->daddr;
415 */
416static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
417{
418 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
419 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
420 memcpy(&iph->saddr, &fl4->saddr,
421 sizeof(fl4->saddr) + sizeof(fl4->daddr));
422}
423
Eric Dumazetb0270e92014-04-15 12:58:34 -0400424/* Note: skb->sk can be different from sk, in case of tunnels */
425int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 struct inet_sock *inet = inet_sk(sk);
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500428 struct net *net = sock_net(sk);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000429 struct ip_options_rcu *inet_opt;
David S. Millerb57ae012011-05-06 16:24:06 -0700430 struct flowi4 *fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 struct rtable *rt;
432 struct iphdr *iph;
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000433 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 /* Skip all of this if the packet is already routed,
436 * f.e. by something like SCTP.
437 */
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000438 rcu_read_lock();
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000439 inet_opt = rcu_dereference(inet->inet_opt);
David S. Millerea4fc0d2011-05-06 22:30:20 -0700440 fl4 = &fl->u.ip4;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000441 rt = skb_rtable(skb);
Ian Morris00db4122015-04-03 09:17:27 +0100442 if (rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 goto packet_routed;
444
445 /* Make sure we can route this packet. */
446 rt = (struct rtable *)__sk_dst_check(sk, 0);
Ian Morris51456b22015-04-03 09:17:26 +0100447 if (!rt) {
Al Viro3ca3c682006-09-27 18:28:07 -0700448 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000451 daddr = inet->inet_daddr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000452 if (inet_opt && inet_opt->opt.srr)
453 daddr = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
David S. Miller78fbfd82011-03-12 00:00:52 -0500455 /* If this fails, retransmit mechanism of transport layer will
456 * keep trying until route appears or the connection times
457 * itself out.
458 */
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500459 rt = ip_route_output_ports(net, fl4, sk,
David S. Miller78fbfd82011-03-12 00:00:52 -0500460 daddr, inet->inet_saddr,
461 inet->inet_dport,
462 inet->inet_sport,
463 sk->sk_protocol,
464 RT_CONN_FLAGS(sk),
465 sk->sk_bound_dev_if);
466 if (IS_ERR(rt))
467 goto no_route;
Changli Gaod8d1f302010-06-10 23:31:35 -0700468 sk_setup_caps(sk, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 }
Changli Gaod8d1f302010-06-10 23:31:35 -0700470 skb_dst_set_noref(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472packet_routed:
Julian Anastasov155e8332012-10-08 11:41:18 +0000473 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 goto no_route;
475
476 /* OK, we know where to send it, allocate and build IP header. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000477 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300478 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700479 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800480 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
WANG Cong60ff7462014-05-04 16:39:18 -0700481 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 iph->frag_off = htons(IP_DF);
483 else
484 iph->frag_off = 0;
Changli Gaod8d1f302010-06-10 23:31:35 -0700485 iph->ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 iph->protocol = sk->sk_protocol;
Eric Dumazet84f93072011-11-30 19:00:53 +0000487 ip_copy_addrs(iph, fl4);
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 /* Transport layer set skb->h.foo itself. */
490
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000491 if (inet_opt && inet_opt->opt.optlen) {
492 iph->ihl += inet_opt->opt.optlen >> 2;
493 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
495
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500496 ip_select_ident_segs(net, skb, sk,
Hannes Frederic Sowab6a77192015-03-25 17:07:44 +0100497 skb_shinfo(skb)->gso_segs ?: 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Eric Dumazetb0270e92014-04-15 12:58:34 -0400499 /* TODO : should we use skb->sk here instead of sk ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800501 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Eric W. Biederman33224b12015-10-07 16:48:46 -0500503 res = ip_local_out(net, sk, skb);
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000504 rcu_read_unlock();
505 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507no_route:
Eric Dumazetab6e3fe2010-05-10 11:31:49 +0000508 rcu_read_unlock();
Eric W. Biederman77589ce2015-10-07 16:48:42 -0500509 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 kfree_skb(skb);
511 return -EHOSTUNREACH;
512}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000513EXPORT_SYMBOL(ip_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
516{
517 to->pkt_type = from->pkt_type;
518 to->priority = from->priority;
519 to->protocol = from->protocol;
Eric Dumazetadf30902009-06-02 05:19:30 +0000520 skb_dst_drop(to);
Eric Dumazetfe76cda2010-07-01 23:48:22 +0000521 skb_dst_copy(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800523 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
525 /* Copy the flags to each fragment. */
526 IPCB(to)->flags = IPCB(from)->flags;
527
528#ifdef CONFIG_NET_SCHED
529 to->tc_index = from->tc_index;
530#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700531 nf_copy(to, from);
Javier Martinez Canillas6ca40d42016-09-09 08:43:16 -0400532#if IS_ENABLED(CONFIG_IP_VS)
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300533 to->ipvs_property = from->ipvs_property;
534#endif
James Morris984bc162006-06-09 00:29:17 -0700535 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
Eric W. Biederman694869b2015-06-12 21:55:31 -0500538static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
Florian Westphalc5501eb2015-05-22 16:32:50 +0200539 unsigned int mtu,
Eric W. Biederman694869b2015-06-12 21:55:31 -0500540 int (*output)(struct net *, struct sock *, struct sk_buff *))
Andy Zhou49d16b22015-05-15 14:15:37 -0700541{
542 struct iphdr *iph = ip_hdr(skb);
Andy Zhou49d16b22015-05-15 14:15:37 -0700543
Florian Westphald6b915e2015-05-22 16:32:51 +0200544 if ((iph->frag_off & htons(IP_DF)) == 0)
Eric W. Biederman694869b2015-06-12 21:55:31 -0500545 return ip_do_fragment(net, sk, skb, output);
Florian Westphald6b915e2015-05-22 16:32:51 +0200546
547 if (unlikely(!skb->ignore_df ||
Andy Zhou49d16b22015-05-15 14:15:37 -0700548 (IPCB(skb)->frag_max_size &&
549 IPCB(skb)->frag_max_size > mtu))) {
Eric W. Biederman9479b0a2015-09-15 20:04:00 -0500550 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
Andy Zhou49d16b22015-05-15 14:15:37 -0700551 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
552 htonl(mtu));
553 kfree_skb(skb);
554 return -EMSGSIZE;
555 }
556
Eric W. Biederman694869b2015-06-12 21:55:31 -0500557 return ip_do_fragment(net, sk, skb, output);
Andy Zhou49d16b22015-05-15 14:15:37 -0700558}
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560/*
561 * This IP datagram is too large to be sent in one piece. Break it up into
562 * smaller pieces (each of size equal to IP header plus
563 * a block of the data of the original IP data part) that will yet fit in a
564 * single device frame, and queue such a frame for sending.
565 */
566
Eric W. Biederman694869b2015-06-12 21:55:31 -0500567int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
568 int (*output)(struct net *, struct sock *, struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569{
570 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 int ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 struct sk_buff *skb2;
Changli Gaoc893b802010-07-31 13:25:08 +0000573 unsigned int mtu, hlen, left, len, ll_rs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800575 __be16 not_last_frag;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000576 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 int err = 0;
578
Hannes Frederic Sowadbd33932015-10-27 22:40:40 +0100579 /* for offloaded checksums cleanup checksum before fragmentation */
580 if (skb->ip_summed == CHECKSUM_PARTIAL &&
581 (err = skb_checksum_help(skb)))
582 goto fail;
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 /*
585 * Point into the IP datagram header.
586 */
587
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700588 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Shmulik Ladkanifedbb6b42016-06-29 21:47:03 +0300590 mtu = ip_skb_dst_mtu(sk, skb);
Florian Westphald6b915e2015-05-22 16:32:51 +0200591 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
592 mtu = IPCB(skb)->frag_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594 /*
595 * Setup starting values.
596 */
597
598 hlen = iph->ihl * 4;
Hannes Frederic Sowaf87c10a2014-01-09 10:01:15 +0100599 mtu = mtu - hlen; /* Size of data space */
Herbert Xu89cee8b2005-12-13 23:14:27 -0800600 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602 /* When frag_list is given, use it. First, check its validity:
603 * some transformers could create wrong frag_list or break existing
604 * one, it is not prohibited. In this case fall back to copying.
605 *
606 * LATER: this step can be merged to real generation of fragments,
607 * we can switch to copy when see the first bad fragment.
608 */
David S. Miller21dc3302010-08-23 00:13:46 -0700609 if (skb_has_frag_list(skb)) {
Eric Dumazet3d130082010-09-21 08:47:45 +0000610 struct sk_buff *frag, *frag2;
Alexey Dobriyanc72d8cd2016-11-19 04:08:08 +0300611 unsigned int first_len = skb_pagelen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 if (first_len - hlen > mtu ||
614 ((first_len - hlen) & 7) ||
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700615 ip_is_fragment(iph) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 skb_cloned(skb))
617 goto slow_path;
618
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700619 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 /* Correct geometry. */
621 if (frag->len > mtu ||
622 ((frag->len & 7) && frag->next) ||
623 skb_headroom(frag) < hlen)
Eric Dumazet3d130082010-09-21 08:47:45 +0000624 goto slow_path_clean;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626 /* Partially cloned skb? */
627 if (skb_shared(frag))
Eric Dumazet3d130082010-09-21 08:47:45 +0000628 goto slow_path_clean;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700629
630 BUG_ON(frag->sk);
631 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700632 frag->sk = skb->sk;
633 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700634 }
Eric Dumazet3d130082010-09-21 08:47:45 +0000635 skb->truesize -= frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637
638 /* Everything is OK. Generate! */
639
640 err = 0;
641 offset = 0;
642 frag = skb_shinfo(skb)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700643 skb_frag_list_init(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 skb->data_len = first_len - skb_headlen(skb);
645 skb->len = first_len;
646 iph->tot_len = htons(first_len);
647 iph->frag_off = htons(IP_MF);
648 ip_send_check(iph);
649
650 for (;;) {
651 /* Prepare header of the next frame,
652 * before previous one went down. */
653 if (frag) {
654 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300655 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700656 __skb_push(frag, hlen);
657 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700658 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700659 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 iph->tot_len = htons(frag->len);
661 ip_copy_metadata(frag, skb);
662 if (offset == 0)
663 ip_options_fragment(frag);
664 offset += skb->len - hlen;
665 iph->frag_off = htons(offset>>3);
Ian Morris00db4122015-04-03 09:17:27 +0100666 if (frag->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 iph->frag_off |= htons(IP_MF);
668 /* Ready, complete checksum */
669 ip_send_check(iph);
670 }
671
Eric W. Biederman694869b2015-06-12 21:55:31 -0500672 err = output(net, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Wei Dongdafee492006-08-02 13:41:21 -0700674 if (!err)
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500675 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (err || !frag)
677 break;
678
679 skb = frag;
680 frag = skb->next;
681 skb->next = NULL;
682 }
683
684 if (err == 0) {
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500685 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 return 0;
687 }
688
689 while (frag) {
690 skb = frag->next;
691 kfree_skb(frag);
692 frag = skb;
693 }
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500694 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 return err;
Eric Dumazet3d130082010-09-21 08:47:45 +0000696
697slow_path_clean:
698 skb_walk_frags(skb, frag2) {
699 if (frag2 == frag)
700 break;
701 frag2->sk = NULL;
702 frag2->destructor = NULL;
703 skb->truesize += frag2->truesize;
704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 }
706
707slow_path:
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000708 iph = ip_hdr(skb);
Alexander Duyckfc70fb62012-12-07 14:14:15 +0000709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 left = skb->len - hlen; /* Space per frame */
George Kadianakis49085bd2010-07-06 11:44:12 +0000711 ptr = hlen; /* Where to start from */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Florian Westphal8d045162015-03-18 20:55:31 +0100713 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /*
716 * Fragment the datagram.
717 */
718
719 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
720 not_last_frag = iph->frag_off & htons(IP_MF);
721
722 /*
723 * Keep copying data until we run out.
724 */
725
Stephen Hemminger132adf52007-03-08 20:44:43 -0800726 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 len = left;
728 /* IF: it doesn't fit, use 'mtu' - the data space left */
729 if (len > mtu)
730 len = mtu;
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300731 /* IF: we are not sending up to and including the packet end
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 then align the next start on an eight byte boundary */
733 if (len < left) {
734 len &= ~7;
735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Joe Perchescbffccc2014-11-05 14:39:21 -0800737 /* Allocate buffer */
738 skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC);
739 if (!skb2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 err = -ENOMEM;
741 goto fail;
742 }
743
744 /*
745 * Set up data on packet
746 */
747
748 ip_copy_metadata(skb2, skb);
749 skb_reserve(skb2, ll_rs);
750 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700751 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700752 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 /*
755 * Charge the memory for the fragment to any owner
756 * it might possess
757 */
758
759 if (skb->sk)
760 skb_set_owner_w(skb2, skb->sk);
761
762 /*
763 * Copy the packet header into the new buffer.
764 */
765
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300766 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 /*
769 * Copy a block of the IP datagram.
770 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300771 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 BUG();
773 left -= len;
774
775 /*
776 * Fill in the new header fields.
777 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700778 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 iph->frag_off = htons((offset >> 3));
780
Florian Westphald6b915e2015-05-22 16:32:51 +0200781 if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
782 iph->frag_off |= htons(IP_DF);
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 /* ANK: dirty, but effective trick. Upgrade options only if
785 * the segment to be fragmented was THE FIRST (otherwise,
786 * options are already fixed) and make it ONCE
787 * on the initial skb, so that all the following fragments
788 * will inherit fixed options.
789 */
790 if (offset == 0)
791 ip_options_fragment(skb);
792
793 /*
794 * Added AC : If we are fragmenting a fragment that's not the
795 * last fragment then keep MF on each bit
796 */
797 if (left > 0 || not_last_frag)
798 iph->frag_off |= htons(IP_MF);
799 ptr += len;
800 offset += len;
801
802 /*
803 * Put this fragment into the sending queue.
804 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 iph->tot_len = htons(len + hlen);
806
807 ip_send_check(iph);
808
Eric W. Biederman694869b2015-06-12 21:55:31 -0500809 err = output(net, sk, skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 if (err)
811 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700812
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500813 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 }
Eric Dumazet5d0ba552012-06-04 01:17:19 +0000815 consume_skb(skb);
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500816 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 return err;
818
819fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900820 kfree_skb(skb);
Eric W. Biederman26a949d2015-09-15 20:03:59 -0500821 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 return err;
823}
Andy Zhou49d16b22015-05-15 14:15:37 -0700824EXPORT_SYMBOL(ip_do_fragment);
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826int
827ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
828{
Al Virof69e6d12014-11-24 13:23:40 -0500829 struct msghdr *msg = from;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Patrick McHardy84fa7932006-08-29 16:44:56 -0700831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Al Viro0b62fca2016-11-03 18:17:31 -0400832 if (!copy_from_iter_full(to, len, &msg->msg_iter))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return -EFAULT;
834 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800835 __wsum csum = 0;
Al Viro0b62fca2016-11-03 18:17:31 -0400836 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return -EFAULT;
838 skb->csum = csum_block_add(skb->csum, csum, odd);
839 }
840 return 0;
841}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000842EXPORT_SYMBOL(ip_generic_getfrag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Al Viro44bb9362006-11-14 21:36:14 -0800844static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845csum_page(struct page *page, int offset, int copy)
846{
847 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800848 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 kaddr = kmap(page);
850 csum = csum_partial(kaddr + offset, copy, 0);
851 kunmap(page);
852 return csum;
853}
854
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800855static inline int ip_ufo_append_data(struct sock *sk,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000856 struct sk_buff_head *queue,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700857 int getfrag(void *from, char *to, int offset, int len,
858 int odd, struct sk_buff *skb),
859 void *from, int length, int hh_len, int fragheaderlen,
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000860 int transhdrlen, int maxfraglen, unsigned int flags)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700861{
862 struct sk_buff *skb;
863 int err;
864
865 /* There is support for UDP fragmentation offload by network
866 * device, so create one single skb packet containing complete
867 * udp datagram
868 */
Ian Morris51456b22015-04-03 09:17:26 +0100869 skb = skb_peek_tail(queue);
870 if (!skb) {
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700871 skb = sock_alloc_send_skb(sk,
872 hh_len + fragheaderlen + transhdrlen + 20,
873 (flags & MSG_DONTWAIT), &err);
874
Ian Morris51456b22015-04-03 09:17:26 +0100875 if (!skb)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700876 return err;
877
878 /* reserve space for Hardware header */
879 skb_reserve(skb, hh_len);
880
881 /* create space for UDP/IP header */
Jianjun Kongd93191002008-11-03 00:23:42 -0800882 skb_put(skb, fragheaderlen + transhdrlen);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700883
884 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700885 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700886
887 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700888 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700889
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700890 skb->csum = 0;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700891
Julian Anastasov0dec8792017-02-06 23:14:16 +0200892 if (flags & MSG_CONFIRM)
893 skb_set_dst_pending_confirm(skb, 1);
894
Herbert Xu1470ddf2011-03-01 02:36:47 +0000895 __skb_queue_tail(queue, skb);
Jiri Pirkoe93b7d72013-10-19 12:29:17 +0200896 } else if (skb_is_gso(skb)) {
897 goto append;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700898 }
Kostya Bbe9164e2008-04-29 22:36:30 -0700899
Jiri Pirkoe93b7d72013-10-19 12:29:17 +0200900 skb->ip_summed = CHECKSUM_PARTIAL;
901 /* specify the length of each IP datagram fragment */
902 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
903 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
904
905append:
Kostya Bbe9164e2008-04-29 22:36:30 -0700906 return skb_append_datato_frags(sk, skb, getfrag, from,
907 (length - transhdrlen));
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700908}
909
David S. Millerf5fca602011-05-08 17:24:10 -0700910static int __ip_append_data(struct sock *sk,
911 struct flowi4 *fl4,
912 struct sk_buff_head *queue,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000913 struct inet_cork *cork,
Eric Dumazet5640f762012-09-23 23:04:42 +0000914 struct page_frag *pfrag,
Herbert Xu1470ddf2011-03-01 02:36:47 +0000915 int getfrag(void *from, char *to, int offset,
916 int len, int odd, struct sk_buff *skb),
917 void *from, int length, int transhdrlen,
918 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
920 struct inet_sock *inet = inet_sk(sk);
921 struct sk_buff *skb;
922
Herbert Xu07df5292011-03-01 23:00:58 -0800923 struct ip_options *opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 int hh_len;
925 int exthdrlen;
926 int mtu;
927 int copy;
928 int err;
929 int offset = 0;
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +0100930 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 int csummode = CHECKSUM_NONE;
Herbert Xu1470ddf2011-03-01 02:36:47 +0000932 struct rtable *rt = (struct rtable *)cork->dst;
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400933 u32 tskey = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
Steffen Klassert96d73032011-06-05 20:48:47 +0000935 skb = skb_peek_tail(queue);
936
937 exthdrlen = !skb ? rt->dst.header_len : 0;
Herbert Xu07df5292011-03-01 23:00:58 -0800938 mtu = cork->fragsize;
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400939 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
940 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
941 tskey = sk->sk_tskey++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Changli Gaod8d1f302010-06-10 23:31:35 -0700943 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
946 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
WANG Cong60ff7462014-05-04 16:39:18 -0700947 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +0100949 if (cork->length + length > maxnonfragsize - fragheaderlen) {
David S. Millerf5fca602011-05-08 17:24:10 -0700950 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
Hannes Frederic Sowa61e7f092013-12-19 02:13:36 +0100951 mtu - (opt ? opt->optlen : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 return -EMSGSIZE;
953 }
954
955 /*
956 * transhdrlen > 0 means that this is the first fragment and we wish
957 * it won't be fragmented in the future.
958 */
959 if (transhdrlen &&
960 length + fragheaderlen <= mtu &&
Tom Herbertc8cd0982015-12-14 11:19:44 -0800961 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
Hannes Frederic Sowad749c9c2015-10-27 22:40:39 +0100962 !(flags & MSG_MORE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700964 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Herbert Xu1470ddf2011-03-01 02:36:47 +0000966 cork->length += length;
zheng li0a28cfd2016-12-12 09:56:05 +0800967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
Kostya Bbe9164e2008-04-29 22:36:30 -0700968 (sk->sk_protocol == IPPROTO_UDP) &&
Alexey Kodanev4b3b45e2017-03-09 13:56:46 +0300969 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
Michal Kubeček40ba3302016-01-11 07:50:30 +0100970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
Herbert Xu1470ddf2011-03-01 02:36:47 +0000971 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
972 hh_len, fragheaderlen, transhdrlen,
Bill Sommerfeldd9be4f72011-07-19 15:22:33 +0000973 maxfraglen, flags);
Patrick McHardybaa829d2006-03-12 20:35:12 -0800974 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700975 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700976 return 0;
977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 /* So, what's going on in the loop below?
980 *
981 * We use calculated fragment length to generate chained skb,
982 * each of segments is IP fragment ready for sending to network after
983 * adding appropriate IP header.
984 */
985
Herbert Xu26cde9f2010-06-15 01:52:25 +0000986 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 goto alloc_new_skb;
988
989 while (length > 0) {
990 /* Check if the remaining data fits into current packet. */
991 copy = mtu - skb->len;
992 if (copy < length)
993 copy = maxfraglen - skb->len;
994 if (copy <= 0) {
995 char *data;
996 unsigned int datalen;
997 unsigned int fraglen;
998 unsigned int fraggap;
999 unsigned int alloclen;
1000 struct sk_buff *skb_prev;
1001alloc_new_skb:
1002 skb_prev = skb;
1003 if (skb_prev)
1004 fraggap = skb_prev->len - maxfraglen;
1005 else
1006 fraggap = 0;
1007
1008 /*
1009 * If remaining data exceeds the mtu,
1010 * we know we need more fragment(s).
1011 */
1012 datalen = length + fraggap;
1013 if (datalen > mtu - fragheaderlen)
1014 datalen = maxfraglen - fragheaderlen;
1015 fraglen = datalen + fragheaderlen;
1016
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001017 if ((flags & MSG_MORE) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07001018 !(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 alloclen = mtu;
1020 else
Eric Dumazet59104f02010-09-20 20:16:27 +00001021 alloclen = fraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Steffen Klassert353e5c92011-06-22 01:05:37 +00001023 alloclen += exthdrlen;
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 /* The last fragment gets additional space at tail.
1026 * Note, with MSG_MORE we overallocate on fragments,
1027 * because we have no idea what fragment will be
1028 * the last.
1029 */
Steffen Klassert33f99dc2011-06-22 01:04:37 +00001030 if (datalen == length + fraggap)
Changli Gaod8d1f302010-06-10 23:31:35 -07001031 alloclen += rt->dst.trailer_len;
Steffen Klassert33f99dc2011-06-22 01:04:37 +00001032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001034 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 alloclen + hh_len + 15,
1036 (flags & MSG_DONTWAIT), &err);
1037 } else {
1038 skb = NULL;
1039 if (atomic_read(&sk->sk_wmem_alloc) <=
1040 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001041 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 alloclen + hh_len + 15, 1,
1043 sk->sk_allocation);
Ian Morris51456b22015-04-03 09:17:26 +01001044 if (unlikely(!skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 err = -ENOBUFS;
1046 }
Ian Morris51456b22015-04-03 09:17:26 +01001047 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 goto error;
1049
1050 /*
1051 * Fill in the control structures
1052 */
1053 skb->ip_summed = csummode;
1054 skb->csum = 0;
1055 skb_reserve(skb, hh_len);
Willem de Bruijn11878b42014-07-14 17:55:06 -04001056
1057 /* only the initial fragment is time stamped */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001058 skb_shinfo(skb)->tx_flags = cork->tx_flags;
Willem de Bruijn11878b42014-07-14 17:55:06 -04001059 cork->tx_flags = 0;
Willem de Bruijn09c2d252014-08-04 22:11:47 -04001060 skb_shinfo(skb)->tskey = tskey;
1061 tskey = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 /*
1064 * Find where to start putting bytes.
1065 */
Steffen Klassert353e5c92011-06-22 01:05:37 +00001066 data = skb_put(skb, fraglen + exthdrlen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -03001067 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001068 skb->transport_header = (skb->network_header +
1069 fragheaderlen);
Steffen Klassert353e5c92011-06-22 01:05:37 +00001070 data += fragheaderlen + exthdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
1072 if (fraggap) {
1073 skb->csum = skb_copy_and_csum_bits(
1074 skb_prev, maxfraglen,
1075 data + transhdrlen, fraggap, 0);
1076 skb_prev->csum = csum_sub(skb_prev->csum,
1077 skb->csum);
1078 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -07001079 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 }
1081
1082 copy = datalen - transhdrlen - fraggap;
1083 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1084 err = -EFAULT;
1085 kfree_skb(skb);
1086 goto error;
1087 }
1088
1089 offset += copy;
1090 length -= datalen - fraggap;
1091 transhdrlen = 0;
1092 exthdrlen = 0;
1093 csummode = CHECKSUM_NONE;
1094
Julian Anastasov0dec8792017-02-06 23:14:16 +02001095 if ((flags & MSG_CONFIRM) && !skb_prev)
1096 skb_set_dst_pending_confirm(skb, 1);
1097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 /*
1099 * Put the packet on the pending queue.
1100 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001101 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 continue;
1103 }
1104
1105 if (copy > length)
1106 copy = length;
1107
Changli Gaod8d1f302010-06-10 23:31:35 -07001108 if (!(rt->dst.dev->features&NETIF_F_SG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 unsigned int off;
1110
1111 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001112 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 offset, copy, off, skb) < 0) {
1114 __skb_trim(skb, off);
1115 err = -EFAULT;
1116 goto error;
1117 }
1118 } else {
1119 int i = skb_shinfo(skb)->nr_frags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Eric Dumazet5640f762012-09-23 23:04:42 +00001121 err = -ENOMEM;
1122 if (!sk_page_frag_refill(sk, pfrag))
1123 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Eric Dumazet5640f762012-09-23 23:04:42 +00001125 if (!skb_can_coalesce(skb, i, pfrag->page,
1126 pfrag->offset)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 err = -EMSGSIZE;
Eric Dumazet5640f762012-09-23 23:04:42 +00001128 if (i == MAX_SKB_FRAGS)
1129 goto error;
1130
1131 __skb_fill_page_desc(skb, i, pfrag->page,
1132 pfrag->offset, 0);
1133 skb_shinfo(skb)->nr_frags = ++i;
1134 get_page(pfrag->page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 }
Eric Dumazet5640f762012-09-23 23:04:42 +00001136 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1137 if (getfrag(from,
1138 page_address(pfrag->page) + pfrag->offset,
1139 offset, copy, skb->len, skb) < 0)
1140 goto error_efault;
1141
1142 pfrag->offset += copy;
1143 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 skb->len += copy;
1145 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001146 skb->truesize += copy;
1147 atomic_add(copy, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 }
1149 offset += copy;
1150 length -= copy;
1151 }
1152
1153 return 0;
1154
Eric Dumazet5640f762012-09-23 23:04:42 +00001155error_efault:
1156 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157error:
Herbert Xu1470ddf2011-03-01 02:36:47 +00001158 cork->length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001159 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001160 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161}
1162
Herbert Xu1470ddf2011-03-01 02:36:47 +00001163static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1164 struct ipcm_cookie *ipc, struct rtable **rtp)
1165{
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001166 struct ip_options_rcu *opt;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001167 struct rtable *rt;
1168
1169 /*
1170 * setup for corking.
1171 */
1172 opt = ipc->opt;
1173 if (opt) {
Ian Morris51456b22015-04-03 09:17:26 +01001174 if (!cork->opt) {
Herbert Xu1470ddf2011-03-01 02:36:47 +00001175 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1176 sk->sk_allocation);
Ian Morris51456b22015-04-03 09:17:26 +01001177 if (unlikely(!cork->opt))
Herbert Xu1470ddf2011-03-01 02:36:47 +00001178 return -ENOBUFS;
1179 }
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001180 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001181 cork->flags |= IPCORK_OPT;
1182 cork->addr = ipc->addr;
1183 }
1184 rt = *rtp;
1185 if (unlikely(!rt))
1186 return -EFAULT;
1187 /*
1188 * We steal reference to this route, caller should not release it
1189 */
1190 *rtp = NULL;
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +01001191 cork->fragsize = ip_sk_use_pmtu(sk) ?
1192 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001193 cork->dst = &rt->dst;
1194 cork->length = 0;
Francesco Fuscoaa661582013-09-24 15:43:09 +02001195 cork->ttl = ipc->ttl;
1196 cork->tos = ipc->tos;
1197 cork->priority = ipc->priority;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001198 cork->tx_flags = ipc->tx_flags;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001199
1200 return 0;
1201}
1202
1203/*
1204 * ip_append_data() and ip_append_page() can make one large IP datagram
1205 * from many pieces of data. Each pieces will be holded on the socket
1206 * until ip_push_pending_frames() is called. Each piece can be a page
1207 * or non-page data.
1208 *
1209 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1210 * this interface potentially.
1211 *
1212 * LATER: length must be adjusted by pad at tail, when it is required.
1213 */
David S. Millerf5fca602011-05-08 17:24:10 -07001214int ip_append_data(struct sock *sk, struct flowi4 *fl4,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001215 int getfrag(void *from, char *to, int offset, int len,
1216 int odd, struct sk_buff *skb),
1217 void *from, int length, int transhdrlen,
1218 struct ipcm_cookie *ipc, struct rtable **rtp,
1219 unsigned int flags)
1220{
1221 struct inet_sock *inet = inet_sk(sk);
1222 int err;
1223
1224 if (flags&MSG_PROBE)
1225 return 0;
1226
1227 if (skb_queue_empty(&sk->sk_write_queue)) {
David S. Millerbdc712b2011-05-06 15:02:07 -07001228 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001229 if (err)
1230 return err;
1231 } else {
1232 transhdrlen = 0;
1233 }
1234
Eric Dumazet5640f762012-09-23 23:04:42 +00001235 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1236 sk_page_frag(sk), getfrag,
Herbert Xu1470ddf2011-03-01 02:36:47 +00001237 from, length, transhdrlen, flags);
1238}
1239
David S. Millerf5fca602011-05-08 17:24:10 -07001240ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 int offset, size_t size, int flags)
1242{
1243 struct inet_sock *inet = inet_sk(sk);
1244 struct sk_buff *skb;
1245 struct rtable *rt;
1246 struct ip_options *opt = NULL;
David S. Millerbdc712b2011-05-06 15:02:07 -07001247 struct inet_cork *cork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 int hh_len;
1249 int mtu;
1250 int len;
1251 int err;
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +01001252 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 if (inet->hdrincl)
1255 return -EPERM;
1256
1257 if (flags&MSG_PROBE)
1258 return 0;
1259
1260 if (skb_queue_empty(&sk->sk_write_queue))
1261 return -EINVAL;
1262
David S. Millerbdc712b2011-05-06 15:02:07 -07001263 cork = &inet->cork.base;
1264 rt = (struct rtable *)cork->dst;
1265 if (cork->flags & IPCORK_OPT)
1266 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
Changli Gaod8d1f302010-06-10 23:31:35 -07001268 if (!(rt->dst.dev->features&NETIF_F_SG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 return -EOPNOTSUPP;
1270
Changli Gaod8d1f302010-06-10 23:31:35 -07001271 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
David S. Millerbdc712b2011-05-06 15:02:07 -07001272 mtu = cork->fragsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
1274 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1275 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
WANG Cong60ff7462014-05-04 16:39:18 -07001276 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Hannes Frederic Sowadaba2872013-10-27 17:29:11 +01001278 if (cork->length + size > maxnonfragsize - fragheaderlen) {
Hannes Frederic Sowa61e7f092013-12-19 02:13:36 +01001279 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1280 mtu - (opt ? opt->optlen : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 return -EMSGSIZE;
1282 }
1283
Ian Morris51456b22015-04-03 09:17:26 +01001284 skb = skb_peek_tail(&sk->sk_write_queue);
1285 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 return -EINVAL;
1287
Herbert Xu26cde9f2010-06-15 01:52:25 +00001288 if ((size + skb->len > mtu) &&
1289 (sk->sk_protocol == IPPROTO_UDP) &&
Changli Gaod8d1f302010-06-10 23:31:35 -07001290 (rt->dst.dev->features & NETIF_F_UFO)) {
Hannes Frederic Sowaa8c4a252016-02-22 18:43:25 +01001291 if (skb->ip_summed != CHECKSUM_PARTIAL)
1292 return -EOPNOTSUPP;
1293
Herbert Xu79671682006-06-22 02:40:14 -07001294 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001295 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001296 }
Hannes Frederic Sowaa8c4a252016-02-22 18:43:25 +01001297 cork->length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 while (size > 0) {
Hannes Frederic Sowabe12a1f2015-05-21 16:59:58 +02001300 if (skb_is_gso(skb)) {
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001301 len = size;
Hannes Frederic Sowabe12a1f2015-05-21 16:59:58 +02001302 } else {
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001303
1304 /* Check if the remaining data fits into current packet. */
1305 len = mtu - skb->len;
1306 if (len < size)
1307 len = maxfraglen - skb->len;
1308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 if (len <= 0) {
1310 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 int alloclen;
1312
1313 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001314 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316 alloclen = fragheaderlen + hh_len + fraggap + 15;
1317 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1318 if (unlikely(!skb)) {
1319 err = -ENOBUFS;
1320 goto error;
1321 }
1322
1323 /*
1324 * Fill in the control structures
1325 */
1326 skb->ip_summed = CHECKSUM_NONE;
1327 skb->csum = 0;
1328 skb_reserve(skb, hh_len);
1329
1330 /*
1331 * Find where to start putting bytes.
1332 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001333 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001334 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001335 skb->transport_header = (skb->network_header +
1336 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001338 skb->csum = skb_copy_and_csum_bits(skb_prev,
1339 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001340 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001341 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 skb_prev->csum = csum_sub(skb_prev->csum,
1343 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001344 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 }
1346
1347 /*
1348 * Put the packet on the pending queue.
1349 */
1350 __skb_queue_tail(&sk->sk_write_queue, skb);
1351 continue;
1352 }
1353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 if (len > size)
1355 len = size;
Hannes Frederic Sowabe12a1f2015-05-21 16:59:58 +02001356
1357 if (skb_append_pagefrags(skb, page, offset, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 err = -EMSGSIZE;
1359 goto error;
1360 }
1361
1362 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001363 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 csum = csum_page(page, offset, len);
1365 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1366 }
1367
1368 skb->len += len;
1369 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001370 skb->truesize += len;
1371 atomic_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 offset += len;
1373 size -= len;
1374 }
1375 return 0;
1376
1377error:
David S. Millerbdc712b2011-05-06 15:02:07 -07001378 cork->length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001379 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 return err;
1381}
1382
Herbert Xu1470ddf2011-03-01 02:36:47 +00001383static void ip_cork_release(struct inet_cork *cork)
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001384{
Herbert Xu1470ddf2011-03-01 02:36:47 +00001385 cork->flags &= ~IPCORK_OPT;
1386 kfree(cork->opt);
1387 cork->opt = NULL;
1388 dst_release(cork->dst);
1389 cork->dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001390}
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392/*
1393 * Combined all pending IP fragments on the socket as one IP datagram
1394 * and push them out.
1395 */
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001396struct sk_buff *__ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001397 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001398 struct sk_buff_head *queue,
1399 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400{
1401 struct sk_buff *skb, *tmp_skb;
1402 struct sk_buff **tail_skb;
1403 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001404 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 struct ip_options *opt = NULL;
Herbert Xu1470ddf2011-03-01 02:36:47 +00001406 struct rtable *rt = (struct rtable *)cork->dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001408 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 __u8 ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Ian Morris51456b22015-04-03 09:17:26 +01001411 skb = __skb_dequeue(queue);
1412 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 goto out;
1414 tail_skb = &(skb_shinfo(skb)->frag_list);
1415
1416 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001417 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001418 __skb_pull(skb, skb_network_offset(skb));
Herbert Xu1470ddf2011-03-01 02:36:47 +00001419 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001420 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 *tail_skb = tmp_skb;
1422 tail_skb = &(tmp_skb->next);
1423 skb->len += tmp_skb->len;
1424 skb->data_len += tmp_skb->len;
1425 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 tmp_skb->destructor = NULL;
1427 tmp_skb->sk = NULL;
1428 }
1429
1430 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1431 * to fragment the frame generated here. No matter, what transforms
1432 * how transforms change size of the packet, it will come out.
1433 */
WANG Cong60ff7462014-05-04 16:39:18 -07001434 skb->ignore_df = ip_sk_ignore_df(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436 /* DF bit is set when we want to see DF on outgoing frames.
WANG Cong60ff7462014-05-04 16:39:18 -07001437 * If ignore_df is set too, we still allow to fragment this frame
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 * locally. */
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +01001439 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1440 inet->pmtudisc == IP_PMTUDISC_PROBE ||
Changli Gaod8d1f302010-06-10 23:31:35 -07001441 (skb->len <= dst_mtu(&rt->dst) &&
1442 ip_dont_fragment(sk, &rt->dst)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 df = htons(IP_DF);
1444
Herbert Xu1470ddf2011-03-01 02:36:47 +00001445 if (cork->flags & IPCORK_OPT)
1446 opt = cork->opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Francesco Fuscoaa661582013-09-24 15:43:09 +02001448 if (cork->ttl != 0)
1449 ttl = cork->ttl;
1450 else if (rt->rt_type == RTN_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 ttl = inet->mc_ttl;
1452 else
Changli Gaod8d1f302010-06-10 23:31:35 -07001453 ttl = ip_select_ttl(inet, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Ansis Atteka749154a2013-09-18 15:29:52 -07001455 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 iph->version = 4;
1457 iph->ihl = 5;
Francesco Fuscoaa661582013-09-24 15:43:09 +02001458 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 iph->frag_off = df;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 iph->ttl = ttl;
1461 iph->protocol = sk->sk_protocol;
Eric Dumazet84f93072011-11-30 19:00:53 +00001462 ip_copy_addrs(iph, fl4);
Hannes Frederic Sowab6a77192015-03-25 17:07:44 +01001463 ip_select_ident(net, skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
David S. Miller22f728f2011-05-13 17:21:27 -04001465 if (opt) {
1466 iph->ihl += opt->optlen>>2;
1467 ip_options_build(skb, opt, cork->addr, rt, 0);
1468 }
1469
Francesco Fuscoaa661582013-09-24 15:43:09 +02001470 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001471 skb->mark = sk->sk_mark;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001472 /*
1473 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1474 * on dst refcount
1475 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001476 cork->dst = NULL;
Changli Gaod8d1f302010-06-10 23:31:35 -07001477 skb_dst_set(skb, &rt->dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
David L Stevens96793b42007-09-17 09:57:33 -07001479 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001480 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001481 skb_transport_header(skb))->type);
1482
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001483 ip_cork_release(cork);
1484out:
1485 return skb;
1486}
1487
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +00001488int ip_send_skb(struct net *net, struct sk_buff *skb)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001489{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001490 int err;
1491
Eric W. Biederman33224b12015-10-07 16:48:46 -05001492 err = ip_local_out(net, skb->sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (err) {
1494 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001495 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 if (err)
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001497 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 }
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
1502
David S. Miller77968b72011-05-08 17:12:19 -07001503int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
Herbert Xu1470ddf2011-03-01 02:36:47 +00001504{
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001505 struct sk_buff *skb;
1506
David S. Miller77968b72011-05-08 17:12:19 -07001507 skb = ip_finish_skb(sk, fl4);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001508 if (!skb)
1509 return 0;
1510
1511 /* Netfilter gets whole the not fragmented skb. */
Eric Dumazetb5ec8ee2012-08-10 02:22:47 +00001512 return ip_send_skb(sock_net(sk), skb);
Herbert Xu1470ddf2011-03-01 02:36:47 +00001513}
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515/*
1516 * Throw away all pending data on the socket.
1517 */
Herbert Xu1470ddf2011-03-01 02:36:47 +00001518static void __ip_flush_pending_frames(struct sock *sk,
1519 struct sk_buff_head *queue,
1520 struct inet_cork *cork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 struct sk_buff *skb;
1523
Herbert Xu1470ddf2011-03-01 02:36:47 +00001524 while ((skb = __skb_dequeue_tail(queue)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 kfree_skb(skb);
1526
Herbert Xu1470ddf2011-03-01 02:36:47 +00001527 ip_cork_release(cork);
1528}
1529
1530void ip_flush_pending_frames(struct sock *sk)
1531{
David S. Millerbdc712b2011-05-06 15:02:07 -07001532 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001535struct sk_buff *ip_make_skb(struct sock *sk,
David S. Miller77968b72011-05-08 17:12:19 -07001536 struct flowi4 *fl4,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001537 int getfrag(void *from, char *to, int offset,
1538 int len, int odd, struct sk_buff *skb),
1539 void *from, int length, int transhdrlen,
1540 struct ipcm_cookie *ipc, struct rtable **rtp,
1541 unsigned int flags)
1542{
David S. Millerb80d7222011-05-06 15:06:01 -07001543 struct inet_cork cork;
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001544 struct sk_buff_head queue;
1545 int err;
1546
1547 if (flags & MSG_PROBE)
1548 return NULL;
1549
1550 __skb_queue_head_init(&queue);
1551
David S. Millerb80d7222011-05-06 15:06:01 -07001552 cork.flags = 0;
1553 cork.addr = 0;
David S. Miller70652722011-05-06 16:01:15 -07001554 cork.opt = NULL;
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001555 err = ip_setup_cork(sk, &cork, ipc, rtp);
1556 if (err)
1557 return ERR_PTR(err);
1558
Eric Dumazet5640f762012-09-23 23:04:42 +00001559 err = __ip_append_data(sk, fl4, &queue, &cork,
1560 &current->task_frag, getfrag,
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001561 from, length, transhdrlen, flags);
1562 if (err) {
1563 __ip_flush_pending_frames(sk, &queue, &cork);
1564 return ERR_PTR(err);
1565 }
1566
David S. Miller77968b72011-05-08 17:12:19 -07001567 return __ip_make_skb(sk, fl4, &queue, &cork);
Herbert Xu1c32c5a2011-03-01 02:36:47 +00001568}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570/*
1571 * Fetch data from kernel space and fill in checksum if needed.
1572 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001573static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 int len, int odd, struct sk_buff *skb)
1575{
Al Viro50842052006-11-14 21:36:34 -08001576 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1579 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001580 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581}
1582
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001583/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 * Generic function to send a packet as reply to another packet.
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001585 * Used to send some TCP resets/acks so far.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 */
Eric Dumazetbdbbb852015-01-29 21:35:05 -08001587void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
Eric Dumazet24a2d432014-09-27 09:50:55 -07001588 const struct ip_options *sopt,
1589 __be32 daddr, __be32 saddr,
1590 const struct ip_reply_arg *arg,
David S. Miller70e73412012-06-28 03:21:41 -07001591 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592{
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001593 struct ip_options_data replyopts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 struct ipcm_cookie ipc;
David S. Miller77968b72011-05-08 17:12:19 -07001595 struct flowi4 fl4;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001596 struct rtable *rt = skb_rtable(skb);
Eric Dumazetbdbbb852015-01-29 21:35:05 -08001597 struct net *net = sock_net(sk);
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001598 struct sk_buff *nskb;
Vasily Averin40620902014-10-15 16:24:02 +04001599 int err;
David Ahernf7ba8682015-08-13 14:59:08 -06001600 int oif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
Eric Dumazet24a2d432014-09-27 09:50:55 -07001602 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return;
1604
David S. Miller0a5ebb82011-05-09 13:22:43 -07001605 ipc.addr = daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 ipc.opt = NULL;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001607 ipc.tx_flags = 0;
Francesco Fuscoaa661582013-09-24 15:43:09 +02001608 ipc.ttl = 0;
1609 ipc.tos = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001611 if (replyopts.opt.opt.optlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 ipc.opt = &replyopts.opt;
1613
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001614 if (replyopts.opt.opt.srr)
1615 daddr = replyopts.opt.opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 }
1617
David Ahernf7ba8682015-08-13 14:59:08 -06001618 oif = arg->bound_dev_if;
David Ahern9b6c14d2016-11-09 09:07:26 -08001619 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1620 oif = skb->skb_iif;
David Ahernf7ba8682015-08-13 14:59:08 -06001621
1622 flowi4_init_output(&fl4, oif,
Lorenzo Colittie1108612014-05-13 10:17:33 -07001623 IP4_REPLY_MARK(net, skb->mark),
Eric Dumazet66b13d92011-10-24 03:06:21 -04001624 RT_TOS(arg->tos),
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001625 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
David S. Miller77968b72011-05-08 17:12:19 -07001626 ip_reply_arg_flowi_flags(arg),
David S. Miller70e73412012-06-28 03:21:41 -07001627 daddr, saddr,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09001628 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1629 arg->uid);
David S. Miller77968b72011-05-08 17:12:19 -07001630 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001631 rt = ip_route_output_key(net, &fl4);
David S. Miller77968b72011-05-08 17:12:19 -07001632 if (IS_ERR(rt))
1633 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
Eric Dumazetbdbbb852015-01-29 21:35:05 -08001635 inet_sk(sk)->tos = arg->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001638 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001639 sk->sk_bound_dev_if = arg->bound_dev_if;
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001640 sk->sk_sndbuf = sysctl_wmem_default;
Pau Espin Pedrolbf99b4d2017-01-06 20:33:28 +01001641 sk->sk_mark = fl4.flowi4_mark;
Vasily Averin40620902014-10-15 16:24:02 +04001642 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1643 len, 0, &ipc, &rt, MSG_DONTWAIT);
1644 if (unlikely(err)) {
1645 ip_flush_pending_frames(sk);
1646 goto out;
1647 }
1648
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001649 nskb = skb_peek(&sk->sk_write_queue);
1650 if (nskb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 if (arg->csumoffset >= 0)
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001652 *((__sum16 *)skb_transport_header(nskb) +
1653 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001654 arg->csum));
Eric Dumazetbe9f4a42012-07-19 07:34:03 +00001655 nskb->ip_summed = CHECKSUM_NONE;
David S. Miller77968b72011-05-08 17:12:19 -07001656 ip_push_pending_frames(sk, &fl4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 }
Vasily Averin40620902014-10-15 16:24:02 +04001658out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 ip_rt_put(rt);
1660}
1661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662void __init ip_init(void)
1663{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 ip_rt_init();
1665 inet_initpeers();
1666
WANG Cong72c1d3b2014-01-10 16:09:45 -08001667#if defined(CONFIG_IP_MULTICAST)
1668 igmp_mc_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669#endif
1670}