Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 1 | /* |
| 2 | * IPV4 GSO/GRO offload support |
| 3 | * Linux INET implementation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | * |
| 10 | * UDPv4 GSO support |
| 11 | */ |
| 12 | |
| 13 | #include <linux/skbuff.h> |
| 14 | #include <net/udp.h> |
| 15 | #include <net/protocol.h> |
| 16 | |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 17 | static DEFINE_SPINLOCK(udp_offload_lock); |
Shlomo Pongratz | a1d0cd8 | 2014-01-22 15:23:29 +0200 | [diff] [blame] | 18 | static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 19 | |
Shlomo Pongratz | a664a4f | 2014-02-02 15:42:10 +0200 | [diff] [blame] | 20 | #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock)) |
| 21 | |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 22 | struct udp_offload_priv { |
| 23 | struct udp_offload *offload; |
| 24 | struct rcu_head rcu; |
| 25 | struct udp_offload_priv __rcu *next; |
| 26 | }; |
| 27 | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 28 | static int udp4_ufo_send_check(struct sk_buff *skb) |
| 29 | { |
| 30 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 31 | return -EINVAL; |
| 32 | |
| 33 | if (likely(!skb->encapsulation)) { |
| 34 | const struct iphdr *iph; |
| 35 | struct udphdr *uh; |
| 36 | |
| 37 | iph = ip_hdr(skb); |
| 38 | uh = udp_hdr(skb); |
| 39 | |
| 40 | uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, |
| 41 | IPPROTO_UDP, 0); |
| 42 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 43 | skb->csum_offset = offsetof(struct udphdr, check); |
| 44 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 45 | } |
| 46 | |
| 47 | return 0; |
| 48 | } |
| 49 | |
| 50 | static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, |
| 51 | netdev_features_t features) |
| 52 | { |
| 53 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 54 | unsigned int mss; |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 55 | int offset; |
| 56 | __wsum csum; |
| 57 | |
| 58 | if (skb->encapsulation && |
Tom Herbert | 0f4f4ff | 2014-06-04 17:20:16 -0700 | [diff] [blame^] | 59 | (skb_shinfo(skb)->gso_type & |
| 60 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 61 | segs = skb_udp_tunnel_segment(skb, features); |
| 62 | goto out; |
| 63 | } |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 64 | |
| 65 | mss = skb_shinfo(skb)->gso_size; |
| 66 | if (unlikely(skb->len <= mss)) |
| 67 | goto out; |
| 68 | |
| 69 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 70 | /* Packet is from an untrusted source, reset gso_segs. */ |
| 71 | int type = skb_shinfo(skb)->gso_type; |
| 72 | |
| 73 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | |
| 74 | SKB_GSO_UDP_TUNNEL | |
Tom Herbert | 0f4f4ff | 2014-06-04 17:20:16 -0700 | [diff] [blame^] | 75 | SKB_GSO_UDP_TUNNEL_CSUM | |
Eric Dumazet | cb32f51 | 2013-10-19 11:42:57 -0700 | [diff] [blame] | 76 | SKB_GSO_IPIP | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 77 | SKB_GSO_GRE | SKB_GSO_MPLS) || |
| 78 | !(type & (SKB_GSO_UDP)))) |
| 79 | goto out; |
| 80 | |
| 81 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| 82 | |
| 83 | segs = NULL; |
| 84 | goto out; |
| 85 | } |
| 86 | |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 87 | /* Do software UFO. Complete and fill in the UDP checksum as |
| 88 | * HW cannot do checksum of UDP packets sent as multiple |
| 89 | * IP fragments. |
| 90 | */ |
| 91 | offset = skb_checksum_start_offset(skb); |
| 92 | csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| 93 | offset += skb->csum_offset; |
| 94 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); |
| 95 | skb->ip_summed = CHECKSUM_NONE; |
| 96 | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 97 | /* Fragment the skb. IP headers of the fragments are updated in |
| 98 | * inet_gso_segment() |
| 99 | */ |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 100 | segs = skb_segment(skb, features); |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 101 | out: |
| 102 | return segs; |
| 103 | } |
| 104 | |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 105 | int udp_add_offload(struct udp_offload *uo) |
| 106 | { |
Or Gerlitz | b5aaab1 | 2014-01-29 18:08:59 +0200 | [diff] [blame] | 107 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC); |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 108 | |
| 109 | if (!new_offload) |
| 110 | return -ENOMEM; |
| 111 | |
| 112 | new_offload->offload = uo; |
| 113 | |
| 114 | spin_lock(&udp_offload_lock); |
Shlomo Pongratz | a664a4f | 2014-02-02 15:42:10 +0200 | [diff] [blame] | 115 | new_offload->next = udp_offload_base; |
| 116 | rcu_assign_pointer(udp_offload_base, new_offload); |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 117 | spin_unlock(&udp_offload_lock); |
| 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | EXPORT_SYMBOL(udp_add_offload); |
| 122 | |
| 123 | static void udp_offload_free_routine(struct rcu_head *head) |
| 124 | { |
| 125 | struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); |
| 126 | kfree(ou_priv); |
| 127 | } |
| 128 | |
| 129 | void udp_del_offload(struct udp_offload *uo) |
| 130 | { |
| 131 | struct udp_offload_priv __rcu **head = &udp_offload_base; |
| 132 | struct udp_offload_priv *uo_priv; |
| 133 | |
| 134 | spin_lock(&udp_offload_lock); |
| 135 | |
Shlomo Pongratz | a664a4f | 2014-02-02 15:42:10 +0200 | [diff] [blame] | 136 | uo_priv = udp_deref_protected(*head); |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 137 | for (; uo_priv != NULL; |
Shlomo Pongratz | a664a4f | 2014-02-02 15:42:10 +0200 | [diff] [blame] | 138 | uo_priv = udp_deref_protected(*head)) { |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 139 | if (uo_priv->offload == uo) { |
Shlomo Pongratz | a664a4f | 2014-02-02 15:42:10 +0200 | [diff] [blame] | 140 | rcu_assign_pointer(*head, |
| 141 | udp_deref_protected(uo_priv->next)); |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 142 | goto unlock; |
| 143 | } |
| 144 | head = &uo_priv->next; |
| 145 | } |
Shlomo Pongratz | a1d0cd8 | 2014-01-22 15:23:29 +0200 | [diff] [blame] | 146 | pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 147 | unlock: |
| 148 | spin_unlock(&udp_offload_lock); |
| 149 | if (uo_priv != NULL) |
| 150 | call_rcu(&uo_priv->rcu, udp_offload_free_routine); |
| 151 | } |
| 152 | EXPORT_SYMBOL(udp_del_offload); |
| 153 | |
| 154 | static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 155 | { |
| 156 | struct udp_offload_priv *uo_priv; |
| 157 | struct sk_buff *p, **pp = NULL; |
| 158 | struct udphdr *uh, *uh2; |
| 159 | unsigned int hlen, off; |
| 160 | int flush = 1; |
| 161 | |
| 162 | if (NAPI_GRO_CB(skb)->udp_mark || |
| 163 | (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) |
| 164 | goto out; |
| 165 | |
| 166 | /* mark that this skb passed once through the udp gro layer */ |
| 167 | NAPI_GRO_CB(skb)->udp_mark = 1; |
| 168 | |
| 169 | off = skb_gro_offset(skb); |
| 170 | hlen = off + sizeof(*uh); |
| 171 | uh = skb_gro_header_fast(skb, off); |
| 172 | if (skb_gro_header_hard(skb, hlen)) { |
| 173 | uh = skb_gro_header_slow(skb, hlen, off); |
| 174 | if (unlikely(!uh)) |
| 175 | goto out; |
| 176 | } |
| 177 | |
| 178 | rcu_read_lock(); |
| 179 | uo_priv = rcu_dereference(udp_offload_base); |
| 180 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { |
| 181 | if (uo_priv->offload->port == uh->dest && |
| 182 | uo_priv->offload->callbacks.gro_receive) |
| 183 | goto unflush; |
| 184 | } |
| 185 | goto out_unlock; |
| 186 | |
| 187 | unflush: |
| 188 | flush = 0; |
| 189 | |
| 190 | for (p = *head; p; p = p->next) { |
| 191 | if (!NAPI_GRO_CB(p)->same_flow) |
| 192 | continue; |
| 193 | |
| 194 | uh2 = (struct udphdr *)(p->data + off); |
| 195 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { |
| 196 | NAPI_GRO_CB(p)->same_flow = 0; |
| 197 | continue; |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ |
| 202 | pp = uo_priv->offload->callbacks.gro_receive(head, skb); |
| 203 | |
| 204 | out_unlock: |
| 205 | rcu_read_unlock(); |
| 206 | out: |
| 207 | NAPI_GRO_CB(skb)->flush |= flush; |
| 208 | return pp; |
| 209 | } |
| 210 | |
| 211 | static int udp_gro_complete(struct sk_buff *skb, int nhoff) |
| 212 | { |
| 213 | struct udp_offload_priv *uo_priv; |
| 214 | __be16 newlen = htons(skb->len - nhoff); |
| 215 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); |
| 216 | int err = -ENOSYS; |
| 217 | |
| 218 | uh->len = newlen; |
| 219 | |
| 220 | rcu_read_lock(); |
| 221 | |
| 222 | uo_priv = rcu_dereference(udp_offload_base); |
| 223 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { |
| 224 | if (uo_priv->offload->port == uh->dest && |
| 225 | uo_priv->offload->callbacks.gro_complete) |
| 226 | break; |
| 227 | } |
| 228 | |
| 229 | if (uo_priv != NULL) |
| 230 | err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); |
| 231 | |
| 232 | rcu_read_unlock(); |
| 233 | return err; |
| 234 | } |
| 235 | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 236 | static const struct net_offload udpv4_offload = { |
| 237 | .callbacks = { |
| 238 | .gso_send_check = udp4_ufo_send_check, |
| 239 | .gso_segment = udp4_ufo_fragment, |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame] | 240 | .gro_receive = udp_gro_receive, |
| 241 | .gro_complete = udp_gro_complete, |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 242 | }, |
| 243 | }; |
| 244 | |
| 245 | int __init udpv4_offload_init(void) |
| 246 | { |
| 247 | return inet_add_offload(&udpv4_offload, IPPROTO_UDP); |
| 248 | } |