Martin Varghese | 571912c | 2020-02-24 10:57:50 +0530 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Bareudp: UDP tunnel encasulation for different Payload types like |
| 3 | * MPLS, NSH, IP, etc. |
| 4 | * Copyright (c) 2019 Nokia, Inc. |
| 5 | * Authors: Martin Varghese, <martin.varghese@nokia.com> |
| 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/etherdevice.h> |
| 13 | #include <linux/hash.h> |
| 14 | #include <net/dst_metadata.h> |
| 15 | #include <net/gro_cells.h> |
| 16 | #include <net/rtnetlink.h> |
| 17 | #include <net/protocol.h> |
| 18 | #include <net/ip6_tunnel.h> |
| 19 | #include <net/ip_tunnels.h> |
| 20 | #include <net/udp_tunnel.h> |
| 21 | #include <net/bareudp.h> |
| 22 | |
| 23 | #define BAREUDP_BASE_HLEN sizeof(struct udphdr) |
| 24 | #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \ |
| 25 | sizeof(struct udphdr)) |
| 26 | #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \ |
| 27 | sizeof(struct udphdr)) |
| 28 | |
| 29 | static bool log_ecn_error = true; |
| 30 | module_param(log_ecn_error, bool, 0644); |
| 31 | MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); |
| 32 | |
| 33 | /* per-network namespace private data for this module */ |
| 34 | |
| 35 | static unsigned int bareudp_net_id; |
| 36 | |
| 37 | struct bareudp_net { |
| 38 | struct list_head bareudp_list; |
| 39 | }; |
| 40 | |
| 41 | /* Pseudo network device */ |
| 42 | struct bareudp_dev { |
| 43 | struct net *net; /* netns for packet i/o */ |
| 44 | struct net_device *dev; /* netdev for bareudp tunnel */ |
| 45 | __be16 ethertype; |
| 46 | __be16 port; |
| 47 | u16 sport_min; |
| 48 | struct socket __rcu *sock; |
| 49 | struct list_head next; /* bareudp node on namespace list */ |
| 50 | struct gro_cells gro_cells; |
| 51 | }; |
| 52 | |
| 53 | static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) |
| 54 | { |
| 55 | struct metadata_dst *tun_dst = NULL; |
| 56 | struct pcpu_sw_netstats *stats; |
| 57 | struct bareudp_dev *bareudp; |
| 58 | unsigned short family; |
| 59 | unsigned int len; |
| 60 | __be16 proto; |
| 61 | void *oiph; |
| 62 | int err; |
| 63 | |
| 64 | bareudp = rcu_dereference_sk_user_data(sk); |
| 65 | if (!bareudp) |
| 66 | goto drop; |
| 67 | |
| 68 | if (skb->protocol == htons(ETH_P_IP)) |
| 69 | family = AF_INET; |
| 70 | else |
| 71 | family = AF_INET6; |
| 72 | |
| 73 | proto = bareudp->ethertype; |
| 74 | |
| 75 | if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN, |
| 76 | proto, |
| 77 | !net_eq(bareudp->net, |
| 78 | dev_net(bareudp->dev)))) { |
| 79 | bareudp->dev->stats.rx_dropped++; |
| 80 | goto drop; |
| 81 | } |
| 82 | |
| 83 | tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); |
| 84 | if (!tun_dst) { |
| 85 | bareudp->dev->stats.rx_dropped++; |
| 86 | goto drop; |
| 87 | } |
| 88 | skb_dst_set(skb, &tun_dst->dst); |
| 89 | skb->dev = bareudp->dev; |
| 90 | oiph = skb_network_header(skb); |
| 91 | skb_reset_network_header(skb); |
| 92 | |
| 93 | if (family == AF_INET) |
| 94 | err = IP_ECN_decapsulate(oiph, skb); |
| 95 | #if IS_ENABLED(CONFIG_IPV6) |
| 96 | else |
| 97 | err = IP6_ECN_decapsulate(oiph, skb); |
| 98 | #endif |
| 99 | |
| 100 | if (unlikely(err)) { |
| 101 | if (log_ecn_error) { |
| 102 | if (family == AF_INET) |
| 103 | net_info_ratelimited("non-ECT from %pI4 " |
| 104 | "with TOS=%#x\n", |
| 105 | &((struct iphdr *)oiph)->saddr, |
| 106 | ((struct iphdr *)oiph)->tos); |
| 107 | #if IS_ENABLED(CONFIG_IPV6) |
| 108 | else |
| 109 | net_info_ratelimited("non-ECT from %pI6\n", |
| 110 | &((struct ipv6hdr *)oiph)->saddr); |
| 111 | #endif |
| 112 | } |
| 113 | if (err > 1) { |
| 114 | ++bareudp->dev->stats.rx_frame_errors; |
| 115 | ++bareudp->dev->stats.rx_errors; |
| 116 | goto drop; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | len = skb->len; |
| 121 | err = gro_cells_receive(&bareudp->gro_cells, skb); |
| 122 | if (likely(err == NET_RX_SUCCESS)) { |
| 123 | stats = this_cpu_ptr(bareudp->dev->tstats); |
| 124 | u64_stats_update_begin(&stats->syncp); |
| 125 | stats->rx_packets++; |
| 126 | stats->rx_bytes += len; |
| 127 | u64_stats_update_end(&stats->syncp); |
| 128 | } |
| 129 | return 0; |
| 130 | drop: |
| 131 | /* Consume bad packet */ |
| 132 | kfree_skb(skb); |
| 133 | |
| 134 | return 0; |
| 135 | } |
| 136 | |
| 137 | static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb) |
| 138 | { |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | static int bareudp_init(struct net_device *dev) |
| 143 | { |
| 144 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 145 | int err; |
| 146 | |
| 147 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
| 148 | if (!dev->tstats) |
| 149 | return -ENOMEM; |
| 150 | |
| 151 | err = gro_cells_init(&bareudp->gro_cells, dev); |
| 152 | if (err) { |
| 153 | free_percpu(dev->tstats); |
| 154 | return err; |
| 155 | } |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | static void bareudp_uninit(struct net_device *dev) |
| 160 | { |
| 161 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 162 | |
| 163 | gro_cells_destroy(&bareudp->gro_cells); |
| 164 | free_percpu(dev->tstats); |
| 165 | } |
| 166 | |
| 167 | static struct socket *bareudp_create_sock(struct net *net, __be16 port) |
| 168 | { |
| 169 | struct udp_port_cfg udp_conf; |
| 170 | struct socket *sock; |
| 171 | int err; |
| 172 | |
| 173 | memset(&udp_conf, 0, sizeof(udp_conf)); |
| 174 | #if IS_ENABLED(CONFIG_IPV6) |
| 175 | udp_conf.family = AF_INET6; |
| 176 | #else |
| 177 | udp_conf.family = AF_INET; |
| 178 | #endif |
| 179 | udp_conf.local_udp_port = port; |
| 180 | /* Open UDP socket */ |
| 181 | err = udp_sock_create(net, &udp_conf, &sock); |
| 182 | if (err < 0) |
| 183 | return ERR_PTR(err); |
| 184 | |
| 185 | return sock; |
| 186 | } |
| 187 | |
| 188 | /* Create new listen socket if needed */ |
| 189 | static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) |
| 190 | { |
| 191 | struct udp_tunnel_sock_cfg tunnel_cfg; |
| 192 | struct socket *sock; |
| 193 | |
| 194 | sock = bareudp_create_sock(bareudp->net, port); |
| 195 | if (IS_ERR(sock)) |
| 196 | return PTR_ERR(sock); |
| 197 | |
| 198 | /* Mark socket as an encapsulation socket */ |
| 199 | memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); |
| 200 | tunnel_cfg.sk_user_data = bareudp; |
| 201 | tunnel_cfg.encap_type = 1; |
| 202 | tunnel_cfg.encap_rcv = bareudp_udp_encap_recv; |
| 203 | tunnel_cfg.encap_err_lookup = bareudp_err_lookup; |
| 204 | tunnel_cfg.encap_destroy = NULL; |
| 205 | setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); |
| 206 | |
| 207 | if (sock->sk->sk_family == AF_INET6) |
| 208 | udp_encap_enable(); |
| 209 | |
| 210 | rcu_assign_pointer(bareudp->sock, sock); |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | static int bareudp_open(struct net_device *dev) |
| 215 | { |
| 216 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 217 | int ret = 0; |
| 218 | |
| 219 | ret = bareudp_socket_create(bareudp, bareudp->port); |
| 220 | return ret; |
| 221 | } |
| 222 | |
| 223 | static void bareudp_sock_release(struct bareudp_dev *bareudp) |
| 224 | { |
| 225 | struct socket *sock; |
| 226 | |
| 227 | sock = bareudp->sock; |
| 228 | rcu_assign_pointer(bareudp->sock, NULL); |
| 229 | synchronize_net(); |
| 230 | udp_tunnel_sock_release(sock); |
| 231 | } |
| 232 | |
| 233 | static int bareudp_stop(struct net_device *dev) |
| 234 | { |
| 235 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 236 | |
| 237 | bareudp_sock_release(bareudp); |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
| 242 | struct bareudp_dev *bareudp, |
| 243 | const struct ip_tunnel_info *info) |
| 244 | { |
| 245 | bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); |
| 246 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
| 247 | struct socket *sock = rcu_dereference(bareudp->sock); |
| 248 | bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); |
| 249 | const struct ip_tunnel_key *key = &info->key; |
| 250 | struct rtable *rt; |
| 251 | __be16 sport, df; |
| 252 | int min_headroom; |
| 253 | __u8 tos, ttl; |
| 254 | __be32 saddr; |
| 255 | int err; |
| 256 | |
| 257 | if (!sock) |
| 258 | return -ESHUTDOWN; |
| 259 | |
| 260 | rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, |
| 261 | IPPROTO_UDP, use_cache); |
| 262 | |
| 263 | if (IS_ERR(rt)) |
| 264 | return PTR_ERR(rt); |
| 265 | |
| 266 | skb_tunnel_check_pmtu(skb, &rt->dst, |
| 267 | BAREUDP_IPV4_HLEN + info->options_len); |
| 268 | |
| 269 | sport = udp_flow_src_port(bareudp->net, skb, |
| 270 | bareudp->sport_min, USHRT_MAX, |
| 271 | true); |
| 272 | tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
| 273 | ttl = key->ttl; |
| 274 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; |
| 275 | skb_scrub_packet(skb, xnet); |
| 276 | |
| 277 | if (!skb_pull(skb, skb_network_offset(skb))) |
| 278 | goto free_dst; |
| 279 | |
| 280 | min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + |
| 281 | BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr); |
| 282 | |
| 283 | err = skb_cow_head(skb, min_headroom); |
| 284 | if (unlikely(err)) |
| 285 | goto free_dst; |
| 286 | |
| 287 | err = udp_tunnel_handle_offloads(skb, udp_sum); |
| 288 | if (err) |
| 289 | goto free_dst; |
| 290 | |
| 291 | skb_set_inner_protocol(skb, bareudp->ethertype); |
| 292 | udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, |
| 293 | tos, ttl, df, sport, bareudp->port, |
| 294 | !net_eq(bareudp->net, dev_net(bareudp->dev)), |
| 295 | !(info->key.tun_flags & TUNNEL_CSUM)); |
| 296 | return 0; |
| 297 | |
| 298 | free_dst: |
| 299 | dst_release(&rt->dst); |
| 300 | return err; |
| 301 | } |
| 302 | |
| 303 | #if IS_ENABLED(CONFIG_IPV6) |
| 304 | static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
| 305 | struct bareudp_dev *bareudp, |
| 306 | const struct ip_tunnel_info *info) |
| 307 | { |
| 308 | bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); |
| 309 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
| 310 | struct socket *sock = rcu_dereference(bareudp->sock); |
| 311 | bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); |
| 312 | const struct ip_tunnel_key *key = &info->key; |
| 313 | struct dst_entry *dst = NULL; |
| 314 | struct in6_addr saddr, daddr; |
| 315 | int min_headroom; |
| 316 | __u8 prio, ttl; |
| 317 | __be16 sport; |
| 318 | int err; |
| 319 | |
| 320 | if (!sock) |
| 321 | return -ESHUTDOWN; |
| 322 | |
| 323 | dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info, |
| 324 | IPPROTO_UDP, use_cache); |
| 325 | if (IS_ERR(dst)) |
| 326 | return PTR_ERR(dst); |
| 327 | |
| 328 | skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len); |
| 329 | |
| 330 | sport = udp_flow_src_port(bareudp->net, skb, |
| 331 | bareudp->sport_min, USHRT_MAX, |
| 332 | true); |
| 333 | prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
| 334 | ttl = key->ttl; |
| 335 | |
| 336 | skb_scrub_packet(skb, xnet); |
| 337 | |
| 338 | if (!skb_pull(skb, skb_network_offset(skb))) |
| 339 | goto free_dst; |
| 340 | |
| 341 | min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + |
| 342 | BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr); |
| 343 | |
| 344 | err = skb_cow_head(skb, min_headroom); |
| 345 | if (unlikely(err)) |
| 346 | goto free_dst; |
| 347 | |
| 348 | err = udp_tunnel_handle_offloads(skb, udp_sum); |
| 349 | if (err) |
| 350 | goto free_dst; |
| 351 | |
| 352 | daddr = info->key.u.ipv6.dst; |
| 353 | udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, |
| 354 | &saddr, &daddr, prio, ttl, |
| 355 | info->key.label, sport, bareudp->port, |
| 356 | !(info->key.tun_flags & TUNNEL_CSUM)); |
| 357 | return 0; |
| 358 | |
| 359 | free_dst: |
| 360 | dst_release(dst); |
| 361 | return err; |
| 362 | } |
| 363 | #endif |
| 364 | |
| 365 | static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) |
| 366 | { |
| 367 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 368 | struct ip_tunnel_info *info = NULL; |
| 369 | int err; |
| 370 | |
| 371 | if (skb->protocol != bareudp->ethertype) { |
| 372 | err = -EINVAL; |
| 373 | goto tx_error; |
| 374 | } |
| 375 | |
| 376 | info = skb_tunnel_info(skb); |
| 377 | if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { |
| 378 | err = -EINVAL; |
| 379 | goto tx_error; |
| 380 | } |
| 381 | |
| 382 | rcu_read_lock(); |
| 383 | #if IS_ENABLED(CONFIG_IPV6) |
| 384 | if (info->mode & IP_TUNNEL_INFO_IPV6) |
| 385 | err = bareudp6_xmit_skb(skb, dev, bareudp, info); |
| 386 | else |
| 387 | #endif |
| 388 | err = bareudp_xmit_skb(skb, dev, bareudp, info); |
| 389 | |
| 390 | rcu_read_unlock(); |
| 391 | |
| 392 | if (likely(!err)) |
| 393 | return NETDEV_TX_OK; |
| 394 | tx_error: |
| 395 | dev_kfree_skb(skb); |
| 396 | |
| 397 | if (err == -ELOOP) |
| 398 | dev->stats.collisions++; |
| 399 | else if (err == -ENETUNREACH) |
| 400 | dev->stats.tx_carrier_errors++; |
| 401 | |
| 402 | dev->stats.tx_errors++; |
| 403 | return NETDEV_TX_OK; |
| 404 | } |
| 405 | |
| 406 | static int bareudp_fill_metadata_dst(struct net_device *dev, |
| 407 | struct sk_buff *skb) |
| 408 | { |
| 409 | struct ip_tunnel_info *info = skb_tunnel_info(skb); |
| 410 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 411 | bool use_cache; |
| 412 | |
| 413 | use_cache = ip_tunnel_dst_cache_usable(skb, info); |
| 414 | |
| 415 | if (ip_tunnel_info_af(info) == AF_INET) { |
| 416 | struct rtable *rt; |
| 417 | __be32 saddr; |
| 418 | |
| 419 | rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, |
| 420 | info, IPPROTO_UDP, use_cache); |
| 421 | if (IS_ERR(rt)) |
| 422 | return PTR_ERR(rt); |
| 423 | |
| 424 | ip_rt_put(rt); |
| 425 | info->key.u.ipv4.src = saddr; |
| 426 | #if IS_ENABLED(CONFIG_IPV6) |
| 427 | } else if (ip_tunnel_info_af(info) == AF_INET6) { |
| 428 | struct dst_entry *dst; |
| 429 | struct in6_addr saddr; |
| 430 | struct socket *sock = rcu_dereference(bareudp->sock); |
| 431 | |
| 432 | dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, |
| 433 | &saddr, info, IPPROTO_UDP, |
| 434 | use_cache); |
| 435 | if (IS_ERR(dst)) |
| 436 | return PTR_ERR(dst); |
| 437 | |
| 438 | dst_release(dst); |
| 439 | info->key.u.ipv6.src = saddr; |
| 440 | #endif |
| 441 | } else { |
| 442 | return -EINVAL; |
| 443 | } |
| 444 | |
| 445 | info->key.tp_src = udp_flow_src_port(bareudp->net, skb, |
| 446 | bareudp->sport_min, |
| 447 | USHRT_MAX, true); |
| 448 | info->key.tp_dst = bareudp->port; |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | static const struct net_device_ops bareudp_netdev_ops = { |
| 453 | .ndo_init = bareudp_init, |
| 454 | .ndo_uninit = bareudp_uninit, |
| 455 | .ndo_open = bareudp_open, |
| 456 | .ndo_stop = bareudp_stop, |
| 457 | .ndo_start_xmit = bareudp_xmit, |
| 458 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
| 459 | .ndo_fill_metadata_dst = bareudp_fill_metadata_dst, |
| 460 | }; |
| 461 | |
| 462 | static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = { |
| 463 | [IFLA_BAREUDP_PORT] = { .type = NLA_U16 }, |
| 464 | [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 }, |
| 465 | [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 }, |
| 466 | }; |
| 467 | |
| 468 | /* Info for udev, that this is a virtual tunnel endpoint */ |
| 469 | static struct device_type bareudp_type = { |
| 470 | .name = "bareudp", |
| 471 | }; |
| 472 | |
| 473 | /* Initialize the device structure. */ |
| 474 | static void bareudp_setup(struct net_device *dev) |
| 475 | { |
| 476 | dev->netdev_ops = &bareudp_netdev_ops; |
| 477 | dev->needs_free_netdev = true; |
| 478 | SET_NETDEV_DEVTYPE(dev, &bareudp_type); |
| 479 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; |
| 480 | dev->features |= NETIF_F_RXCSUM; |
| 481 | dev->features |= NETIF_F_GSO_SOFTWARE; |
| 482 | dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; |
| 483 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; |
| 484 | dev->hard_header_len = 0; |
| 485 | dev->addr_len = 0; |
| 486 | dev->mtu = ETH_DATA_LEN; |
| 487 | dev->min_mtu = IPV4_MIN_MTU; |
| 488 | dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN; |
| 489 | dev->type = ARPHRD_NONE; |
| 490 | netif_keep_dst(dev); |
| 491 | dev->priv_flags |= IFF_NO_QUEUE; |
| 492 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
| 493 | } |
| 494 | |
| 495 | static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], |
| 496 | struct netlink_ext_ack *extack) |
| 497 | { |
| 498 | if (!data) { |
| 499 | NL_SET_ERR_MSG(extack, |
| 500 | "Not enough attributes provided to perform the operation"); |
| 501 | return -EINVAL; |
| 502 | } |
| 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf) |
| 507 | { |
| 508 | if (!data[IFLA_BAREUDP_PORT] || !data[IFLA_BAREUDP_ETHERTYPE]) |
| 509 | return -EINVAL; |
| 510 | |
| 511 | if (data[IFLA_BAREUDP_PORT]) |
| 512 | conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); |
| 513 | |
| 514 | if (data[IFLA_BAREUDP_ETHERTYPE]) |
| 515 | conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); |
| 516 | |
| 517 | if (data[IFLA_BAREUDP_SRCPORT_MIN]) |
| 518 | conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); |
| 519 | |
| 520 | return 0; |
| 521 | } |
| 522 | |
| 523 | static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, |
| 524 | const struct bareudp_conf *conf) |
| 525 | { |
| 526 | struct bareudp_dev *bareudp, *t = NULL; |
| 527 | |
| 528 | list_for_each_entry(bareudp, &bn->bareudp_list, next) { |
| 529 | if (conf->port == bareudp->port) |
| 530 | t = bareudp; |
| 531 | } |
| 532 | return t; |
| 533 | } |
| 534 | |
| 535 | static int bareudp_configure(struct net *net, struct net_device *dev, |
| 536 | struct bareudp_conf *conf) |
| 537 | { |
| 538 | struct bareudp_net *bn = net_generic(net, bareudp_net_id); |
| 539 | struct bareudp_dev *t, *bareudp = netdev_priv(dev); |
| 540 | int err; |
| 541 | |
| 542 | bareudp->net = net; |
| 543 | bareudp->dev = dev; |
| 544 | t = bareudp_find_dev(bn, conf); |
| 545 | if (t) |
| 546 | return -EBUSY; |
| 547 | |
| 548 | bareudp->port = conf->port; |
| 549 | bareudp->ethertype = conf->ethertype; |
| 550 | bareudp->sport_min = conf->sport_min; |
| 551 | err = register_netdevice(dev); |
| 552 | if (err) |
| 553 | return err; |
| 554 | |
| 555 | list_add(&bareudp->next, &bn->bareudp_list); |
| 556 | return 0; |
| 557 | } |
| 558 | |
| 559 | static int bareudp_link_config(struct net_device *dev, |
| 560 | struct nlattr *tb[]) |
| 561 | { |
| 562 | int err; |
| 563 | |
| 564 | if (tb[IFLA_MTU]) { |
| 565 | err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); |
| 566 | if (err) |
| 567 | return err; |
| 568 | } |
| 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | static int bareudp_newlink(struct net *net, struct net_device *dev, |
| 573 | struct nlattr *tb[], struct nlattr *data[], |
| 574 | struct netlink_ext_ack *extack) |
| 575 | { |
| 576 | struct bareudp_conf conf; |
| 577 | int err; |
| 578 | |
| 579 | err = bareudp2info(data, &conf); |
| 580 | if (err) |
| 581 | return err; |
| 582 | |
| 583 | err = bareudp_configure(net, dev, &conf); |
| 584 | if (err) |
| 585 | return err; |
| 586 | |
| 587 | err = bareudp_link_config(dev, tb); |
| 588 | if (err) |
| 589 | return err; |
| 590 | |
| 591 | return 0; |
| 592 | } |
| 593 | |
| 594 | static void bareudp_dellink(struct net_device *dev, struct list_head *head) |
| 595 | { |
| 596 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 597 | |
| 598 | list_del(&bareudp->next); |
| 599 | unregister_netdevice_queue(dev, head); |
| 600 | } |
| 601 | |
| 602 | static size_t bareudp_get_size(const struct net_device *dev) |
| 603 | { |
| 604 | return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */ |
| 605 | nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ |
| 606 | nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ |
| 607 | 0; |
| 608 | } |
| 609 | |
| 610 | static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) |
| 611 | { |
| 612 | struct bareudp_dev *bareudp = netdev_priv(dev); |
| 613 | |
| 614 | if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port)) |
| 615 | goto nla_put_failure; |
| 616 | if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype)) |
| 617 | goto nla_put_failure; |
| 618 | if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min)) |
| 619 | goto nla_put_failure; |
| 620 | |
| 621 | return 0; |
| 622 | |
| 623 | nla_put_failure: |
| 624 | return -EMSGSIZE; |
| 625 | } |
| 626 | |
| 627 | static struct rtnl_link_ops bareudp_link_ops __read_mostly = { |
| 628 | .kind = "bareudp", |
| 629 | .maxtype = IFLA_BAREUDP_MAX, |
| 630 | .policy = bareudp_policy, |
| 631 | .priv_size = sizeof(struct bareudp_dev), |
| 632 | .setup = bareudp_setup, |
| 633 | .validate = bareudp_validate, |
| 634 | .newlink = bareudp_newlink, |
| 635 | .dellink = bareudp_dellink, |
| 636 | .get_size = bareudp_get_size, |
| 637 | .fill_info = bareudp_fill_info, |
| 638 | }; |
| 639 | |
| 640 | struct net_device *bareudp_dev_create(struct net *net, const char *name, |
| 641 | u8 name_assign_type, |
| 642 | struct bareudp_conf *conf) |
| 643 | { |
| 644 | struct nlattr *tb[IFLA_MAX + 1]; |
| 645 | struct net_device *dev; |
| 646 | LIST_HEAD(list_kill); |
| 647 | int err; |
| 648 | |
| 649 | memset(tb, 0, sizeof(tb)); |
| 650 | dev = rtnl_create_link(net, name, name_assign_type, |
| 651 | &bareudp_link_ops, tb, NULL); |
| 652 | if (IS_ERR(dev)) |
| 653 | return dev; |
| 654 | |
| 655 | err = bareudp_configure(net, dev, conf); |
| 656 | if (err) { |
| 657 | free_netdev(dev); |
| 658 | return ERR_PTR(err); |
| 659 | } |
| 660 | err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN); |
| 661 | if (err) |
| 662 | goto err; |
| 663 | |
| 664 | err = rtnl_configure_link(dev, NULL); |
| 665 | if (err < 0) |
| 666 | goto err; |
| 667 | |
| 668 | return dev; |
| 669 | err: |
| 670 | bareudp_dellink(dev, &list_kill); |
| 671 | unregister_netdevice_many(&list_kill); |
| 672 | return ERR_PTR(err); |
| 673 | } |
| 674 | EXPORT_SYMBOL_GPL(bareudp_dev_create); |
| 675 | |
| 676 | static __net_init int bareudp_init_net(struct net *net) |
| 677 | { |
| 678 | struct bareudp_net *bn = net_generic(net, bareudp_net_id); |
| 679 | |
| 680 | INIT_LIST_HEAD(&bn->bareudp_list); |
| 681 | return 0; |
| 682 | } |
| 683 | |
| 684 | static void bareudp_destroy_tunnels(struct net *net, struct list_head *head) |
| 685 | { |
| 686 | struct bareudp_net *bn = net_generic(net, bareudp_net_id); |
| 687 | struct bareudp_dev *bareudp, *next; |
| 688 | |
| 689 | list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next) |
| 690 | unregister_netdevice_queue(bareudp->dev, head); |
| 691 | } |
| 692 | |
| 693 | static void __net_exit bareudp_exit_batch_net(struct list_head *net_list) |
| 694 | { |
| 695 | struct net *net; |
| 696 | LIST_HEAD(list); |
| 697 | |
| 698 | rtnl_lock(); |
| 699 | list_for_each_entry(net, net_list, exit_list) |
| 700 | bareudp_destroy_tunnels(net, &list); |
| 701 | |
| 702 | /* unregister the devices gathered above */ |
| 703 | unregister_netdevice_many(&list); |
| 704 | rtnl_unlock(); |
| 705 | } |
| 706 | |
| 707 | static struct pernet_operations bareudp_net_ops = { |
| 708 | .init = bareudp_init_net, |
| 709 | .exit_batch = bareudp_exit_batch_net, |
| 710 | .id = &bareudp_net_id, |
| 711 | .size = sizeof(struct bareudp_net), |
| 712 | }; |
| 713 | |
| 714 | static int __init bareudp_init_module(void) |
| 715 | { |
| 716 | int rc; |
| 717 | |
| 718 | rc = register_pernet_subsys(&bareudp_net_ops); |
| 719 | if (rc) |
| 720 | goto out1; |
| 721 | |
| 722 | rc = rtnl_link_register(&bareudp_link_ops); |
| 723 | if (rc) |
| 724 | goto out2; |
| 725 | |
| 726 | return 0; |
| 727 | out2: |
| 728 | unregister_pernet_subsys(&bareudp_net_ops); |
| 729 | out1: |
| 730 | return rc; |
| 731 | } |
| 732 | late_initcall(bareudp_init_module); |
| 733 | |
| 734 | static void __exit bareudp_cleanup_module(void) |
| 735 | { |
| 736 | rtnl_link_unregister(&bareudp_link_ops); |
| 737 | unregister_pernet_subsys(&bareudp_net_ops); |
| 738 | } |
| 739 | module_exit(bareudp_cleanup_module); |
| 740 | |
| 741 | MODULE_LICENSE("GPL"); |
| 742 | MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>"); |
| 743 | MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic"); |