Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 1 | /* |
| 2 | * xfrm_output.c - Common IPsec encapsulation code. |
| 3 | * |
| 4 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/netdevice.h> |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 15 | #include <linux/netfilter.h> |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 16 | #include <linux/skbuff.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 18 | #include <linux/spinlock.h> |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 19 | #include <net/dst.h> |
| 20 | #include <net/xfrm.h> |
| 21 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 22 | static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb); |
Florian Westphal | 0c620e9 | 2019-03-29 21:16:25 +0100 | [diff] [blame^] | 23 | static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 24 | |
Steffen Klassert | 26b2072 | 2012-03-21 23:32:39 +0000 | [diff] [blame] | 25 | static int xfrm_skb_check_space(struct sk_buff *skb) |
Herbert Xu | 83815de | 2007-10-08 17:25:08 -0700 | [diff] [blame] | 26 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 27 | struct dst_entry *dst = skb_dst(skb); |
Herbert Xu | 550ade8 | 2007-11-13 21:33:01 -0800 | [diff] [blame] | 28 | int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) |
Herbert Xu | 83815de | 2007-10-08 17:25:08 -0700 | [diff] [blame] | 29 | - skb_headroom(skb); |
Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 30 | int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); |
Herbert Xu | 83815de | 2007-10-08 17:25:08 -0700 | [diff] [blame] | 31 | |
Herbert Xu | d01dbeb | 2008-09-30 02:03:19 -0700 | [diff] [blame] | 32 | if (nhead <= 0) { |
| 33 | if (ntail <= 0) |
| 34 | return 0; |
| 35 | nhead = 0; |
| 36 | } else if (ntail < 0) |
| 37 | ntail = 0; |
Herbert Xu | 83815de | 2007-10-08 17:25:08 -0700 | [diff] [blame] | 38 | |
Herbert Xu | d01dbeb | 2008-09-30 02:03:19 -0700 | [diff] [blame] | 39 | return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); |
Herbert Xu | 83815de | 2007-10-08 17:25:08 -0700 | [diff] [blame] | 40 | } |
| 41 | |
Ying Xue | 9449c3c | 2015-05-12 18:29:44 +0800 | [diff] [blame] | 42 | /* Children define the path of the packet through the |
| 43 | * Linux networking. Thus, destinations are stackable. |
| 44 | */ |
| 45 | |
| 46 | static struct dst_entry *skb_dst_pop(struct sk_buff *skb) |
| 47 | { |
David Miller | b92cf4a | 2017-11-28 15:40:22 -0500 | [diff] [blame] | 48 | struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb))); |
Ying Xue | 9449c3c | 2015-05-12 18:29:44 +0800 | [diff] [blame] | 49 | |
| 50 | skb_dst_drop(skb); |
| 51 | return child; |
| 52 | } |
| 53 | |
Florian Westphal | 0c620e9 | 2019-03-29 21:16:25 +0100 | [diff] [blame^] | 54 | /* Add encapsulation header. |
| 55 | * |
| 56 | * The IP header will be moved forward to make space for the encapsulation |
| 57 | * header. |
| 58 | */ |
| 59 | static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) |
| 60 | { |
| 61 | #if IS_ENABLED(CONFIG_INET_XFRM_MODE_TRANSPORT) |
| 62 | struct iphdr *iph = ip_hdr(skb); |
| 63 | int ihl = iph->ihl * 4; |
| 64 | |
| 65 | skb_set_inner_transport_header(skb, skb_transport_offset(skb)); |
| 66 | |
| 67 | skb_set_network_header(skb, -x->props.header_len); |
| 68 | skb->mac_header = skb->network_header + |
| 69 | offsetof(struct iphdr, protocol); |
| 70 | skb->transport_header = skb->network_header + ihl; |
| 71 | __skb_pull(skb, ihl); |
| 72 | memmove(skb_network_header(skb), iph, ihl); |
| 73 | return 0; |
| 74 | #else |
| 75 | WARN_ON_ONCE(1); |
| 76 | return -EOPNOTSUPP; |
| 77 | #endif |
| 78 | } |
| 79 | |
| 80 | /* Add encapsulation header. |
| 81 | * |
| 82 | * The IP header and mutable extension headers will be moved forward to make |
| 83 | * space for the encapsulation header. |
| 84 | */ |
| 85 | static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) |
| 86 | { |
| 87 | #if IS_ENABLED(CONFIG_INET6_XFRM_MODE_TRANSPORT) |
| 88 | struct ipv6hdr *iph; |
| 89 | u8 *prevhdr; |
| 90 | int hdr_len; |
| 91 | |
| 92 | iph = ipv6_hdr(skb); |
| 93 | skb_set_inner_transport_header(skb, skb_transport_offset(skb)); |
| 94 | |
| 95 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); |
| 96 | if (hdr_len < 0) |
| 97 | return hdr_len; |
| 98 | skb_set_mac_header(skb, |
| 99 | (prevhdr - x->props.header_len) - skb->data); |
| 100 | skb_set_network_header(skb, -x->props.header_len); |
| 101 | skb->transport_header = skb->network_header + hdr_len; |
| 102 | __skb_pull(skb, hdr_len); |
| 103 | memmove(ipv6_hdr(skb), iph, hdr_len); |
| 104 | return 0; |
| 105 | #else |
| 106 | WARN_ON_ONCE(1); |
| 107 | return -EOPNOTSUPP; |
| 108 | #endif |
| 109 | } |
| 110 | |
| 111 | /* Add route optimization header space. |
| 112 | * |
| 113 | * The IP header and mutable extension headers will be moved forward to make |
| 114 | * space for the route optimization header. |
| 115 | */ |
| 116 | static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) |
| 117 | { |
| 118 | #if IS_ENABLED(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) |
| 119 | struct ipv6hdr *iph; |
| 120 | u8 *prevhdr; |
| 121 | int hdr_len; |
| 122 | |
| 123 | iph = ipv6_hdr(skb); |
| 124 | |
| 125 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); |
| 126 | if (hdr_len < 0) |
| 127 | return hdr_len; |
| 128 | skb_set_mac_header(skb, |
| 129 | (prevhdr - x->props.header_len) - skb->data); |
| 130 | skb_set_network_header(skb, -x->props.header_len); |
| 131 | skb->transport_header = skb->network_header + hdr_len; |
| 132 | __skb_pull(skb, hdr_len); |
| 133 | memmove(ipv6_hdr(skb), iph, hdr_len); |
| 134 | |
| 135 | x->lastused = ktime_get_real_seconds(); |
| 136 | |
| 137 | return 0; |
| 138 | #else |
| 139 | WARN_ON_ONCE(1); |
| 140 | return -EOPNOTSUPP; |
| 141 | #endif |
| 142 | } |
| 143 | |
| 144 | static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) |
| 145 | { |
| 146 | int err; |
| 147 | |
| 148 | err = xfrm_inner_extract_output(x, skb); |
| 149 | if (err) |
| 150 | return err; |
| 151 | |
| 152 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; |
| 153 | skb->protocol = htons(ETH_P_IP); |
| 154 | |
| 155 | return x->outer_mode->output2(x, skb); |
| 156 | } |
| 157 | |
| 158 | static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) |
| 159 | { |
| 160 | #if IS_ENABLED(CONFIG_IPV6) |
| 161 | int err; |
| 162 | |
| 163 | err = xfrm_inner_extract_output(x, skb); |
| 164 | if (err) |
| 165 | return err; |
| 166 | |
| 167 | skb->ignore_df = 1; |
| 168 | skb->protocol = htons(ETH_P_IPV6); |
| 169 | |
| 170 | return x->outer_mode->output2(x, skb); |
| 171 | #else |
| 172 | WARN_ON_ONCE(1); |
| 173 | return -EOPNOTSUPP; |
| 174 | #endif |
| 175 | } |
| 176 | |
| 177 | static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb) |
| 178 | { |
| 179 | switch (x->outer_mode->encap) { |
| 180 | case XFRM_MODE_BEET: |
| 181 | case XFRM_MODE_TUNNEL: |
| 182 | if (x->outer_mode->family == AF_INET) |
| 183 | return xfrm4_prepare_output(x, skb); |
| 184 | if (x->outer_mode->family == AF_INET6) |
| 185 | return xfrm6_prepare_output(x, skb); |
| 186 | break; |
| 187 | case XFRM_MODE_TRANSPORT: |
| 188 | if (x->outer_mode->family == AF_INET) |
| 189 | return xfrm4_transport_output(x, skb); |
| 190 | if (x->outer_mode->family == AF_INET6) |
| 191 | return xfrm6_transport_output(x, skb); |
| 192 | break; |
| 193 | case XFRM_MODE_ROUTEOPTIMIZATION: |
| 194 | if (x->outer_mode->family == AF_INET6) |
| 195 | return xfrm6_ro_output(x, skb); |
| 196 | WARN_ON_ONCE(1); |
| 197 | break; |
| 198 | default: |
| 199 | WARN_ON_ONCE(1); |
| 200 | break; |
| 201 | } |
| 202 | |
| 203 | return -EOPNOTSUPP; |
| 204 | } |
| 205 | |
| 206 | #if IS_ENABLED(CONFIG_NET_PKTGEN) |
| 207 | int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb) |
| 208 | { |
| 209 | return xfrm_outer_mode_output(x, skb); |
| 210 | } |
| 211 | EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output); |
| 212 | #endif |
| 213 | |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 214 | static int xfrm_output_one(struct sk_buff *skb, int err) |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 215 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 216 | struct dst_entry *dst = skb_dst(skb); |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 217 | struct xfrm_state *x = dst->xfrm; |
Alexey Dobriyan | a6483b7 | 2008-11-25 17:38:20 -0800 | [diff] [blame] | 218 | struct net *net = xs_net(x); |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 219 | |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 220 | if (err <= 0) |
| 221 | goto resume; |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 222 | |
| 223 | do { |
Steffen Klassert | 26b2072 | 2012-03-21 23:32:39 +0000 | [diff] [blame] | 224 | err = xfrm_skb_check_space(skb); |
Masahide NAKAMURA | b15c4bc | 2007-12-24 16:00:09 -0800 | [diff] [blame] | 225 | if (err) { |
Alexey Dobriyan | 59c9940 | 2008-11-25 17:59:52 -0800 | [diff] [blame] | 226 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
Herbert Xu | 910ef70 | 2007-12-18 22:14:25 -0800 | [diff] [blame] | 227 | goto error_nolock; |
Masahide NAKAMURA | b15c4bc | 2007-12-24 16:00:09 -0800 | [diff] [blame] | 228 | } |
Herbert Xu | 910ef70 | 2007-12-18 22:14:25 -0800 | [diff] [blame] | 229 | |
Steffen Klassert | 9b42c1f | 2018-06-12 12:44:26 +0200 | [diff] [blame] | 230 | skb->mark = xfrm_smark_get(skb->mark, x); |
Lorenzo Colitti | 077fbac | 2017-08-11 02:11:33 +0900 | [diff] [blame] | 231 | |
Florian Westphal | 0c620e9 | 2019-03-29 21:16:25 +0100 | [diff] [blame^] | 232 | err = xfrm_outer_mode_output(x, skb); |
Masahide NAKAMURA | b15c4bc | 2007-12-24 16:00:09 -0800 | [diff] [blame] | 233 | if (err) { |
Alexey Dobriyan | 59c9940 | 2008-11-25 17:59:52 -0800 | [diff] [blame] | 234 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); |
Herbert Xu | 910ef70 | 2007-12-18 22:14:25 -0800 | [diff] [blame] | 235 | goto error_nolock; |
Masahide NAKAMURA | b15c4bc | 2007-12-24 16:00:09 -0800 | [diff] [blame] | 236 | } |
Herbert Xu | a2deb6d2 | 2007-11-13 21:39:38 -0800 | [diff] [blame] | 237 | |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 238 | spin_lock_bh(&x->lock); |
Li RongQing | bb65a9c | 2012-12-28 16:06:28 +0800 | [diff] [blame] | 239 | |
| 240 | if (unlikely(x->km.state != XFRM_STATE_VALID)) { |
| 241 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); |
Timo Teräs | 497574c | 2013-05-22 01:40:47 +0000 | [diff] [blame] | 242 | err = -EINVAL; |
Li RongQing | fa8599d | 2013-02-01 13:17:16 +0800 | [diff] [blame] | 243 | goto error; |
Li RongQing | bb65a9c | 2012-12-28 16:06:28 +0800 | [diff] [blame] | 244 | } |
| 245 | |
Herbert Xu | 910ef70 | 2007-12-18 22:14:25 -0800 | [diff] [blame] | 246 | err = xfrm_state_check_expire(x); |
Masahide NAKAMURA | b15c4bc | 2007-12-24 16:00:09 -0800 | [diff] [blame] | 247 | if (err) { |
Alexey Dobriyan | 59c9940 | 2008-11-25 17:59:52 -0800 | [diff] [blame] | 248 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED); |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 249 | goto error; |
Masahide NAKAMURA | b15c4bc | 2007-12-24 16:00:09 -0800 | [diff] [blame] | 250 | } |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 251 | |
Steffen Klassert | 9fdc488 | 2011-03-08 00:08:32 +0000 | [diff] [blame] | 252 | err = x->repl->overflow(x, skb); |
| 253 | if (err) { |
| 254 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR); |
| 255 | goto error; |
Herbert Xu | 436a0a4 | 2007-10-08 17:25:53 -0700 | [diff] [blame] | 256 | } |
| 257 | |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 258 | x->curlft.bytes += skb->len; |
| 259 | x->curlft.packets++; |
| 260 | |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 261 | spin_unlock_bh(&x->lock); |
| 262 | |
Steffen Klassert | 3bc0732 | 2011-03-15 21:08:28 +0000 | [diff] [blame] | 263 | skb_dst_force(skb); |
Steffen Klassert | 9e14379 | 2018-09-11 10:31:15 +0200 | [diff] [blame] | 264 | if (!skb_dst(skb)) { |
| 265 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
Wei Yongjun | 533555e | 2018-10-27 06:12:06 +0000 | [diff] [blame] | 266 | err = -EHOSTUNREACH; |
Steffen Klassert | 9e14379 | 2018-09-11 10:31:15 +0200 | [diff] [blame] | 267 | goto error_nolock; |
| 268 | } |
Steffen Klassert | 3bc0732 | 2011-03-15 21:08:28 +0000 | [diff] [blame] | 269 | |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 270 | if (xfrm_offload(skb)) { |
| 271 | x->type_offload->encap(x, skb); |
| 272 | } else { |
Steffen Klassert | 73b9fc4 | 2017-10-30 10:04:04 +0100 | [diff] [blame] | 273 | /* Inner headers are invalid now. */ |
| 274 | skb->encapsulation = 0; |
| 275 | |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 276 | err = x->type->output(x, skb); |
| 277 | if (err == -EINPROGRESS) |
| 278 | goto out; |
| 279 | } |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 280 | |
| 281 | resume: |
Masahide NAKAMURA | 0aa6477 | 2007-12-20 20:43:36 -0800 | [diff] [blame] | 282 | if (err) { |
Alexey Dobriyan | 59c9940 | 2008-11-25 17:59:52 -0800 | [diff] [blame] | 283 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); |
Herbert Xu | b7c6538c | 2007-10-09 13:33:35 -0700 | [diff] [blame] | 284 | goto error_nolock; |
Masahide NAKAMURA | 0aa6477 | 2007-12-20 20:43:36 -0800 | [diff] [blame] | 285 | } |
Herbert Xu | b7c6538c | 2007-10-09 13:33:35 -0700 | [diff] [blame] | 286 | |
Steffen Klassert | 8764ab2 | 2010-06-04 01:57:38 +0000 | [diff] [blame] | 287 | dst = skb_dst_pop(skb); |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 288 | if (!dst) { |
Alexey Dobriyan | 59c9940 | 2008-11-25 17:59:52 -0800 | [diff] [blame] | 289 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 290 | err = -EHOSTUNREACH; |
| 291 | goto error_nolock; |
| 292 | } |
Steffen Klassert | e433430 | 2011-03-15 21:09:32 +0000 | [diff] [blame] | 293 | skb_dst_set(skb, dst); |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 294 | x = dst->xfrm; |
Herbert Xu | 1399637 | 2007-10-17 21:35:51 -0700 | [diff] [blame] | 295 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 296 | |
Jean Sacren | ebd4687 | 2013-06-01 16:23:15 +0000 | [diff] [blame] | 297 | return 0; |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 298 | |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 299 | error: |
| 300 | spin_unlock_bh(&x->lock); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 301 | error_nolock: |
| 302 | kfree_skb(skb); |
Jean Sacren | ebd4687 | 2013-06-01 16:23:15 +0000 | [diff] [blame] | 303 | out: |
| 304 | return err; |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 305 | } |
| 306 | |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 307 | int xfrm_output_resume(struct sk_buff *skb, int err) |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 308 | { |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 309 | struct net *net = xs_net(skb_dst(skb)->xfrm); |
Eric W. Biederman | be10de0 | 2015-09-17 17:21:31 -0500 | [diff] [blame] | 310 | |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 311 | while (likely((err = xfrm_output_one(skb, err)) == 0)) { |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 312 | nf_reset(skb); |
| 313 | |
Eric W. Biederman | cf91a99 | 2015-10-07 16:48:45 -0500 | [diff] [blame] | 314 | err = skb_dst(skb)->ops->local_out(net, skb->sk, skb); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 315 | if (unlikely(err != 1)) |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 316 | goto out; |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 317 | |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 318 | if (!skb_dst(skb)->xfrm) |
Eric W. Biederman | 13206b6 | 2015-10-07 16:48:35 -0500 | [diff] [blame] | 319 | return dst_output(net, skb->sk, skb); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 320 | |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 321 | err = nf_hook(skb_dst(skb)->ops->family, |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 322 | NF_INET_POST_ROUTING, net, skb->sk, skb, |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 323 | NULL, skb_dst(skb)->dev, xfrm_output2); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 324 | if (unlikely(err != 1)) |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 325 | goto out; |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 326 | } |
| 327 | |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 328 | if (err == -EINPROGRESS) |
| 329 | err = 0; |
| 330 | |
| 331 | out: |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 332 | return err; |
| 333 | } |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 334 | EXPORT_SYMBOL_GPL(xfrm_output_resume); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 335 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 336 | static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb) |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 337 | { |
| 338 | return xfrm_output_resume(skb, 1); |
| 339 | } |
| 340 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 341 | static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb) |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 342 | { |
| 343 | struct sk_buff *segs; |
| 344 | |
Konstantin Khlebnikov | 9207f9d | 2016-01-08 15:21:46 +0300 | [diff] [blame] | 345 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); |
| 346 | BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 347 | segs = skb_gso_segment(skb, 0); |
| 348 | kfree_skb(skb); |
Hirofumi Nakagawa | 801678c | 2008-04-29 01:03:09 -0700 | [diff] [blame] | 349 | if (IS_ERR(segs)) |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 350 | return PTR_ERR(segs); |
Florian Westphal | 330966e | 2014-10-20 13:49:17 +0200 | [diff] [blame] | 351 | if (segs == NULL) |
| 352 | return -EINVAL; |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 353 | |
| 354 | do { |
| 355 | struct sk_buff *nskb = segs->next; |
| 356 | int err; |
| 357 | |
David S. Miller | a8305bf | 2018-07-29 20:42:53 -0700 | [diff] [blame] | 358 | skb_mark_not_on_list(segs); |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 359 | err = xfrm_output2(net, sk, segs); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 360 | |
| 361 | if (unlikely(err)) { |
Florian Westphal | 46cfd72 | 2014-09-10 01:08:46 +0200 | [diff] [blame] | 362 | kfree_skb_list(nskb); |
Herbert Xu | 862b82c | 2007-11-13 21:43:11 -0800 | [diff] [blame] | 363 | return err; |
| 364 | } |
| 365 | |
| 366 | segs = nskb; |
| 367 | } while (segs); |
| 368 | |
| 369 | return 0; |
Herbert Xu | 406ef77 | 2007-10-08 17:16:30 -0700 | [diff] [blame] | 370 | } |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 371 | |
David Miller | 7026b1d | 2015-04-05 22:19:04 -0400 | [diff] [blame] | 372 | int xfrm_output(struct sock *sk, struct sk_buff *skb) |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 373 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 374 | struct net *net = dev_net(skb_dst(skb)->dev); |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 375 | struct xfrm_state *x = skb_dst(skb)->xfrm; |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 376 | int err; |
| 377 | |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 378 | secpath_reset(skb); |
| 379 | |
| 380 | if (xfrm_dev_offload_ok(skb, x)) { |
| 381 | struct sec_path *sp; |
| 382 | |
Florian Westphal | a84e3f5 | 2018-12-18 17:15:26 +0100 | [diff] [blame] | 383 | sp = secpath_set(skb); |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 384 | if (!sp) { |
| 385 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
| 386 | kfree_skb(skb); |
| 387 | return -ENOMEM; |
| 388 | } |
Ilan Tayari | f1bd7d6 | 2017-04-14 10:07:39 +0200 | [diff] [blame] | 389 | skb->encapsulation = 1; |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 390 | |
| 391 | sp->olen++; |
Florian Westphal | a84e3f5 | 2018-12-18 17:15:26 +0100 | [diff] [blame] | 392 | sp->xvec[sp->len++] = x; |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 393 | xfrm_state_hold(x); |
| 394 | |
| 395 | if (skb_is_gso(skb)) { |
| 396 | skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; |
| 397 | |
| 398 | return xfrm_output2(net, sk, skb); |
| 399 | } |
| 400 | |
| 401 | if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) |
| 402 | goto out; |
| 403 | } |
| 404 | |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 405 | if (skb_is_gso(skb)) |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 406 | return xfrm_output_gso(net, sk, skb); |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 407 | |
| 408 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 409 | err = skb_checksum_help(skb); |
| 410 | if (err) { |
Alexey Dobriyan | 59c9940 | 2008-11-25 17:59:52 -0800 | [diff] [blame] | 411 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 412 | kfree_skb(skb); |
| 413 | return err; |
| 414 | } |
| 415 | } |
| 416 | |
Steffen Klassert | d77e38e | 2017-04-14 10:06:10 +0200 | [diff] [blame] | 417 | out: |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 418 | return xfrm_output2(net, sk, skb); |
Herbert Xu | c6581a4 | 2007-11-13 21:43:43 -0800 | [diff] [blame] | 419 | } |
Fabian Frederick | fc68086 | 2014-05-12 19:54:34 +0200 | [diff] [blame] | 420 | EXPORT_SYMBOL_GPL(xfrm_output); |
Kazunori MIYAZAWA | df9dcb4 | 2008-03-24 14:51:51 -0700 | [diff] [blame] | 421 | |
Florian Westphal | 0c620e9 | 2019-03-29 21:16:25 +0100 | [diff] [blame^] | 422 | static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) |
Kazunori MIYAZAWA | df9dcb4 | 2008-03-24 14:51:51 -0700 | [diff] [blame] | 423 | { |
| 424 | struct xfrm_mode *inner_mode; |
| 425 | if (x->sel.family == AF_UNSPEC) |
| 426 | inner_mode = xfrm_ip2inner_mode(x, |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 427 | xfrm_af2proto(skb_dst(skb)->ops->family)); |
Kazunori MIYAZAWA | df9dcb4 | 2008-03-24 14:51:51 -0700 | [diff] [blame] | 428 | else |
| 429 | inner_mode = x->inner_mode; |
| 430 | |
| 431 | if (inner_mode == NULL) |
| 432 | return -EAFNOSUPPORT; |
| 433 | return inner_mode->afinfo->extract_output(x, skb); |
| 434 | } |
| 435 | |
Hannes Frederic Sowa | 628e341 | 2013-08-14 13:05:23 +0200 | [diff] [blame] | 436 | void xfrm_local_error(struct sk_buff *skb, int mtu) |
| 437 | { |
Hannes Frederic Sowa | 844d487 | 2013-08-18 13:47:01 +0200 | [diff] [blame] | 438 | unsigned int proto; |
Hannes Frederic Sowa | 628e341 | 2013-08-14 13:05:23 +0200 | [diff] [blame] | 439 | struct xfrm_state_afinfo *afinfo; |
| 440 | |
Hannes Frederic Sowa | 844d487 | 2013-08-18 13:47:01 +0200 | [diff] [blame] | 441 | if (skb->protocol == htons(ETH_P_IP)) |
| 442 | proto = AF_INET; |
| 443 | else if (skb->protocol == htons(ETH_P_IPV6)) |
| 444 | proto = AF_INET6; |
| 445 | else |
| 446 | return; |
| 447 | |
| 448 | afinfo = xfrm_state_get_afinfo(proto); |
Taehee Yoo | 46c0ef6 | 2018-03-16 11:35:51 +0900 | [diff] [blame] | 449 | if (afinfo) { |
Florian Westphal | af5d27c | 2017-01-09 14:20:47 +0100 | [diff] [blame] | 450 | afinfo->local_error(skb, mtu); |
Taehee Yoo | 46c0ef6 | 2018-03-16 11:35:51 +0900 | [diff] [blame] | 451 | rcu_read_unlock(); |
| 452 | } |
Hannes Frederic Sowa | 628e341 | 2013-08-14 13:05:23 +0200 | [diff] [blame] | 453 | } |
Hannes Frederic Sowa | 628e341 | 2013-08-14 13:05:23 +0200 | [diff] [blame] | 454 | EXPORT_SYMBOL_GPL(xfrm_local_error); |