Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 1 | /* |
| 2 | * IPV4 GSO/GRO offload support |
| 3 | * Linux INET implementation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | * |
| 10 | * UDPv4 GSO support |
| 11 | */ |
| 12 | |
| 13 | #include <linux/skbuff.h> |
| 14 | #include <net/udp.h> |
| 15 | #include <net/protocol.h> |
| 16 | |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame^] | 17 | static DEFINE_SPINLOCK(udp_offload_lock); |
| 18 | static struct udp_offload_priv *udp_offload_base __read_mostly; |
| 19 | |
| 20 | struct udp_offload_priv { |
| 21 | struct udp_offload *offload; |
| 22 | struct rcu_head rcu; |
| 23 | struct udp_offload_priv __rcu *next; |
| 24 | }; |
| 25 | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 26 | static int udp4_ufo_send_check(struct sk_buff *skb) |
| 27 | { |
| 28 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 29 | return -EINVAL; |
| 30 | |
| 31 | if (likely(!skb->encapsulation)) { |
| 32 | const struct iphdr *iph; |
| 33 | struct udphdr *uh; |
| 34 | |
| 35 | iph = ip_hdr(skb); |
| 36 | uh = udp_hdr(skb); |
| 37 | |
| 38 | uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, |
| 39 | IPPROTO_UDP, 0); |
| 40 | skb->csum_start = skb_transport_header(skb) - skb->head; |
| 41 | skb->csum_offset = offsetof(struct udphdr, check); |
| 42 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 43 | } |
| 44 | |
| 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, |
| 49 | netdev_features_t features) |
| 50 | { |
| 51 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 52 | unsigned int mss; |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 53 | int offset; |
| 54 | __wsum csum; |
| 55 | |
| 56 | if (skb->encapsulation && |
| 57 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { |
| 58 | segs = skb_udp_tunnel_segment(skb, features); |
| 59 | goto out; |
| 60 | } |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 61 | |
| 62 | mss = skb_shinfo(skb)->gso_size; |
| 63 | if (unlikely(skb->len <= mss)) |
| 64 | goto out; |
| 65 | |
| 66 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 67 | /* Packet is from an untrusted source, reset gso_segs. */ |
| 68 | int type = skb_shinfo(skb)->gso_type; |
| 69 | |
| 70 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | |
| 71 | SKB_GSO_UDP_TUNNEL | |
Eric Dumazet | cb32f51 | 2013-10-19 11:42:57 -0700 | [diff] [blame] | 72 | SKB_GSO_IPIP | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 73 | SKB_GSO_GRE | SKB_GSO_MPLS) || |
| 74 | !(type & (SKB_GSO_UDP)))) |
| 75 | goto out; |
| 76 | |
| 77 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| 78 | |
| 79 | segs = NULL; |
| 80 | goto out; |
| 81 | } |
| 82 | |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 83 | /* Do software UFO. Complete and fill in the UDP checksum as |
| 84 | * HW cannot do checksum of UDP packets sent as multiple |
| 85 | * IP fragments. |
| 86 | */ |
| 87 | offset = skb_checksum_start_offset(skb); |
| 88 | csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| 89 | offset += skb->csum_offset; |
| 90 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); |
| 91 | skb->ip_summed = CHECKSUM_NONE; |
| 92 | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 93 | /* Fragment the skb. IP headers of the fragments are updated in |
| 94 | * inet_gso_segment() |
| 95 | */ |
Wei-Chun Chao | 7a7ffba | 2013-12-26 13:10:22 -0800 | [diff] [blame] | 96 | segs = skb_segment(skb, features); |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 97 | out: |
| 98 | return segs; |
| 99 | } |
| 100 | |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame^] | 101 | int udp_add_offload(struct udp_offload *uo) |
| 102 | { |
| 103 | struct udp_offload_priv **head = &udp_offload_base; |
| 104 | struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL); |
| 105 | |
| 106 | if (!new_offload) |
| 107 | return -ENOMEM; |
| 108 | |
| 109 | new_offload->offload = uo; |
| 110 | |
| 111 | spin_lock(&udp_offload_lock); |
| 112 | rcu_assign_pointer(new_offload->next, rcu_dereference(*head)); |
| 113 | rcu_assign_pointer(*head, rcu_dereference(new_offload)); |
| 114 | spin_unlock(&udp_offload_lock); |
| 115 | |
| 116 | return 0; |
| 117 | } |
| 118 | EXPORT_SYMBOL(udp_add_offload); |
| 119 | |
| 120 | static void udp_offload_free_routine(struct rcu_head *head) |
| 121 | { |
| 122 | struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu); |
| 123 | kfree(ou_priv); |
| 124 | } |
| 125 | |
| 126 | void udp_del_offload(struct udp_offload *uo) |
| 127 | { |
| 128 | struct udp_offload_priv __rcu **head = &udp_offload_base; |
| 129 | struct udp_offload_priv *uo_priv; |
| 130 | |
| 131 | spin_lock(&udp_offload_lock); |
| 132 | |
| 133 | uo_priv = rcu_dereference(*head); |
| 134 | for (; uo_priv != NULL; |
| 135 | uo_priv = rcu_dereference(*head)) { |
| 136 | |
| 137 | if (uo_priv->offload == uo) { |
| 138 | rcu_assign_pointer(*head, rcu_dereference(uo_priv->next)); |
| 139 | goto unlock; |
| 140 | } |
| 141 | head = &uo_priv->next; |
| 142 | } |
| 143 | pr_warn("udp_del_offload: didn't find offload for port %d\n", htons(uo->port)); |
| 144 | unlock: |
| 145 | spin_unlock(&udp_offload_lock); |
| 146 | if (uo_priv != NULL) |
| 147 | call_rcu(&uo_priv->rcu, udp_offload_free_routine); |
| 148 | } |
| 149 | EXPORT_SYMBOL(udp_del_offload); |
| 150 | |
| 151 | static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 152 | { |
| 153 | struct udp_offload_priv *uo_priv; |
| 154 | struct sk_buff *p, **pp = NULL; |
| 155 | struct udphdr *uh, *uh2; |
| 156 | unsigned int hlen, off; |
| 157 | int flush = 1; |
| 158 | |
| 159 | if (NAPI_GRO_CB(skb)->udp_mark || |
| 160 | (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) |
| 161 | goto out; |
| 162 | |
| 163 | /* mark that this skb passed once through the udp gro layer */ |
| 164 | NAPI_GRO_CB(skb)->udp_mark = 1; |
| 165 | |
| 166 | off = skb_gro_offset(skb); |
| 167 | hlen = off + sizeof(*uh); |
| 168 | uh = skb_gro_header_fast(skb, off); |
| 169 | if (skb_gro_header_hard(skb, hlen)) { |
| 170 | uh = skb_gro_header_slow(skb, hlen, off); |
| 171 | if (unlikely(!uh)) |
| 172 | goto out; |
| 173 | } |
| 174 | |
| 175 | rcu_read_lock(); |
| 176 | uo_priv = rcu_dereference(udp_offload_base); |
| 177 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { |
| 178 | if (uo_priv->offload->port == uh->dest && |
| 179 | uo_priv->offload->callbacks.gro_receive) |
| 180 | goto unflush; |
| 181 | } |
| 182 | goto out_unlock; |
| 183 | |
| 184 | unflush: |
| 185 | flush = 0; |
| 186 | |
| 187 | for (p = *head; p; p = p->next) { |
| 188 | if (!NAPI_GRO_CB(p)->same_flow) |
| 189 | continue; |
| 190 | |
| 191 | uh2 = (struct udphdr *)(p->data + off); |
| 192 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { |
| 193 | NAPI_GRO_CB(p)->same_flow = 0; |
| 194 | continue; |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ |
| 199 | pp = uo_priv->offload->callbacks.gro_receive(head, skb); |
| 200 | |
| 201 | out_unlock: |
| 202 | rcu_read_unlock(); |
| 203 | out: |
| 204 | NAPI_GRO_CB(skb)->flush |= flush; |
| 205 | return pp; |
| 206 | } |
| 207 | |
| 208 | static int udp_gro_complete(struct sk_buff *skb, int nhoff) |
| 209 | { |
| 210 | struct udp_offload_priv *uo_priv; |
| 211 | __be16 newlen = htons(skb->len - nhoff); |
| 212 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); |
| 213 | int err = -ENOSYS; |
| 214 | |
| 215 | uh->len = newlen; |
| 216 | |
| 217 | rcu_read_lock(); |
| 218 | |
| 219 | uo_priv = rcu_dereference(udp_offload_base); |
| 220 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { |
| 221 | if (uo_priv->offload->port == uh->dest && |
| 222 | uo_priv->offload->callbacks.gro_complete) |
| 223 | break; |
| 224 | } |
| 225 | |
| 226 | if (uo_priv != NULL) |
| 227 | err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); |
| 228 | |
| 229 | rcu_read_unlock(); |
| 230 | return err; |
| 231 | } |
| 232 | |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 233 | static const struct net_offload udpv4_offload = { |
| 234 | .callbacks = { |
| 235 | .gso_send_check = udp4_ufo_send_check, |
| 236 | .gso_segment = udp4_ufo_fragment, |
Or Gerlitz | b582ef0 | 2014-01-20 13:59:19 +0200 | [diff] [blame^] | 237 | .gro_receive = udp_gro_receive, |
| 238 | .gro_complete = udp_gro_complete, |
Daniel Borkmann | da5bab0 | 2013-06-08 12:56:03 +0200 | [diff] [blame] | 239 | }, |
| 240 | }; |
| 241 | |
| 242 | int __init udpv4_offload_init(void) |
| 243 | { |
| 244 | return inet_add_offload(&udpv4_offload, IPPROTO_UDP); |
| 245 | } |