blob: ee58bcbc99a5e59b0c0a22c331b6fe36ff513c09 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
26 *
27 */
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/timer.h>
35#include <linux/mm.h>
36#include <linux/kernel.h>
37#include <linux/fcntl.h>
38#include <linux/stat.h>
39#include <linux/socket.h>
40#include <linux/in.h>
41#include <linux/inet.h>
42#include <linux/netdevice.h>
43#include <linux/inetdevice.h>
44#include <linux/igmp.h>
45#include <linux/proc_fs.h>
46#include <linux/seq_file.h>
47#include <linux/mroute.h>
48#include <linux/init.h>
Kris Katterjohn46f25df2006-01-05 16:35:42 -080049#include <linux/if_ether.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020050#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020054#include <net/route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
62#include <net/ipip.h>
63#include <net/checksum.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070064#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67#define CONFIG_IP_PIMSM 1
68#endif
69
70static struct sock *mroute_socket;
71
72
73/* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
75 */
76
77static DEFINE_RWLOCK(mrt_lock);
78
79/*
80 * Multicast router control variables
81 */
82
83static struct vif_device vif_table[MAXVIFS]; /* Devices */
84static int maxvif;
85
86#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
87
88static int mroute_do_assert; /* Set in PIM assert */
89static int mroute_do_pim;
90
91static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
92
93static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94static atomic_t cache_resolve_queue_len; /* Size of unresolved */
95
96/* Special spinlock for queue of unresolved entries */
97static DEFINE_SPINLOCK(mfc_unres_lock);
98
99/* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
103
104 In this case data path is free of exclusive locks at all.
105 */
106
Christoph Lametere18b8902006-12-06 20:33:20 -0800107static struct kmem_cache *mrt_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
111static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
112
113#ifdef CONFIG_IP_PIMSM_V2
114static struct net_protocol pim_protocol;
115#endif
116
117static struct timer_list ipmr_expire_timer;
118
119/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
120
Wang Chend6070322008-07-14 20:55:26 -0700121static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
122{
123 dev_close(dev);
124
125 dev = __dev_get_by_name(&init_net, "tunl0");
126 if (dev) {
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800127 const struct net_device_ops *ops = dev->netdev_ops;
Wang Chend6070322008-07-14 20:55:26 -0700128 struct ifreq ifr;
Wang Chend6070322008-07-14 20:55:26 -0700129 struct ip_tunnel_parm p;
130
131 memset(&p, 0, sizeof(p));
132 p.iph.daddr = v->vifc_rmt_addr.s_addr;
133 p.iph.saddr = v->vifc_lcl_addr.s_addr;
134 p.iph.version = 4;
135 p.iph.ihl = 5;
136 p.iph.protocol = IPPROTO_IPIP;
137 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
138 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
139
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800140 if (ops->ndo_do_ioctl) {
141 mm_segment_t oldfs = get_fs();
142
143 set_fs(KERNEL_DS);
144 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
145 set_fs(oldfs);
146 }
Wang Chend6070322008-07-14 20:55:26 -0700147 }
148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150static
151struct net_device *ipmr_new_tunnel(struct vifctl *v)
152{
153 struct net_device *dev;
154
Eric W. Biederman881d9662007-09-17 11:56:21 -0700155 dev = __dev_get_by_name(&init_net, "tunl0");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 if (dev) {
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800158 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 int err;
160 struct ifreq ifr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct ip_tunnel_parm p;
162 struct in_device *in_dev;
163
164 memset(&p, 0, sizeof(p));
165 p.iph.daddr = v->vifc_rmt_addr.s_addr;
166 p.iph.saddr = v->vifc_lcl_addr.s_addr;
167 p.iph.version = 4;
168 p.iph.ihl = 5;
169 p.iph.protocol = IPPROTO_IPIP;
170 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
Stephen Hemmingerba93ef72008-01-21 17:28:59 -0800171 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Stephen Hemminger5bc3eb72008-11-19 21:52:05 -0800173 if (ops->ndo_do_ioctl) {
174 mm_segment_t oldfs = get_fs();
175
176 set_fs(KERNEL_DS);
177 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
178 set_fs(oldfs);
179 } else
180 err = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 dev = NULL;
183
Eric W. Biederman881d9662007-09-17 11:56:21 -0700184 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 dev->flags |= IFF_MULTICAST;
186
Herbert Xue5ed6392005-10-03 14:35:55 -0700187 in_dev = __in_dev_get_rtnl(dev);
Herbert Xu71e27da2007-06-04 23:36:06 -0700188 if (in_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 goto failure;
Herbert Xu71e27da2007-06-04 23:36:06 -0700190
191 ipv4_devconf_setall(in_dev);
192 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 if (dev_open(dev))
195 goto failure;
Wang Chen7dc00c82008-07-14 20:56:34 -0700196 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 }
198 }
199 return dev;
200
201failure:
202 /* allow the register to be completed before unregistering. */
203 rtnl_unlock();
204 rtnl_lock();
205
206 unregister_netdevice(dev);
207 return NULL;
208}
209
210#ifdef CONFIG_IP_PIMSM
211
212static int reg_vif_num = -1;
213
214static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
215{
216 read_lock(&mrt_lock);
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -0700217 dev->stats.tx_bytes += skb->len;
218 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
220 read_unlock(&mrt_lock);
221 kfree_skb(skb);
222 return 0;
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225static void reg_vif_setup(struct net_device *dev)
226{
227 dev->type = ARPHRD_PIMREG;
Kris Katterjohn46f25df2006-01-05 16:35:42 -0800228 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 dev->flags = IFF_NOARP;
230 dev->hard_start_xmit = reg_vif_xmit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 dev->destructor = free_netdev;
232}
233
234static struct net_device *ipmr_reg_vif(void)
235{
236 struct net_device *dev;
237 struct in_device *in_dev;
238
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -0700239 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 if (dev == NULL)
242 return NULL;
243
244 if (register_netdevice(dev)) {
245 free_netdev(dev);
246 return NULL;
247 }
248 dev->iflink = 0;
249
Herbert Xu71e27da2007-06-04 23:36:06 -0700250 rcu_read_lock();
251 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
252 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 goto failure;
Herbert Xu71e27da2007-06-04 23:36:06 -0700254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Herbert Xu71e27da2007-06-04 23:36:06 -0700256 ipv4_devconf_setall(in_dev);
257 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
258 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 if (dev_open(dev))
261 goto failure;
262
Wang Chen7dc00c82008-07-14 20:56:34 -0700263 dev_hold(dev);
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return dev;
266
267failure:
268 /* allow the register to be completed before unregistering. */
269 rtnl_unlock();
270 rtnl_lock();
271
272 unregister_netdevice(dev);
273 return NULL;
274}
275#endif
276
277/*
278 * Delete a VIF entry
Wang Chen7dc00c82008-07-14 20:56:34 -0700279 * @notify: Set to 1, if the caller is a notifier_call
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900281
Wang Chen7dc00c82008-07-14 20:56:34 -0700282static int vif_delete(int vifi, int notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
284 struct vif_device *v;
285 struct net_device *dev;
286 struct in_device *in_dev;
287
288 if (vifi < 0 || vifi >= maxvif)
289 return -EADDRNOTAVAIL;
290
291 v = &vif_table[vifi];
292
293 write_lock_bh(&mrt_lock);
294 dev = v->dev;
295 v->dev = NULL;
296
297 if (!dev) {
298 write_unlock_bh(&mrt_lock);
299 return -EADDRNOTAVAIL;
300 }
301
302#ifdef CONFIG_IP_PIMSM
303 if (vifi == reg_vif_num)
304 reg_vif_num = -1;
305#endif
306
307 if (vifi+1 == maxvif) {
308 int tmp;
309 for (tmp=vifi-1; tmp>=0; tmp--) {
310 if (VIF_EXISTS(tmp))
311 break;
312 }
313 maxvif = tmp+1;
314 }
315
316 write_unlock_bh(&mrt_lock);
317
318 dev_set_allmulti(dev, -1);
319
Herbert Xue5ed6392005-10-03 14:35:55 -0700320 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
Herbert Xu42f811b2007-06-04 23:34:44 -0700321 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 ip_rt_multicast_event(in_dev);
323 }
324
Wang Chen7dc00c82008-07-14 20:56:34 -0700325 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 unregister_netdevice(dev);
327
328 dev_put(dev);
329 return 0;
330}
331
332/* Destroy an unresolved cache entry, killing queued skbs
333 and reporting error to netlink readers.
334 */
335
336static void ipmr_destroy_unres(struct mfc_cache *c)
337{
338 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700339 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 atomic_dec(&cache_resolve_queue_len);
342
Jianjun Kongc354e122008-11-03 00:28:02 -0800343 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700344 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
346 nlh->nlmsg_type = NLMSG_ERROR;
347 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
348 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700349 e = NLMSG_DATA(nlh);
350 e->error = -ETIMEDOUT;
351 memset(&e->msg, 0, sizeof(e->msg));
Thomas Graf2942e902006-08-15 00:30:25 -0700352
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800353 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 } else
355 kfree_skb(skb);
356 }
357
358 kmem_cache_free(mrt_cachep, c);
359}
360
361
362/* Single timer process for all the unresolved queue. */
363
364static void ipmr_expire_process(unsigned long dummy)
365{
366 unsigned long now;
367 unsigned long expires;
368 struct mfc_cache *c, **cp;
369
370 if (!spin_trylock(&mfc_unres_lock)) {
371 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
372 return;
373 }
374
375 if (atomic_read(&cache_resolve_queue_len) == 0)
376 goto out;
377
378 now = jiffies;
379 expires = 10*HZ;
380 cp = &mfc_unres_queue;
381
382 while ((c=*cp) != NULL) {
383 if (time_after(c->mfc_un.unres.expires, now)) {
384 unsigned long interval = c->mfc_un.unres.expires - now;
385 if (interval < expires)
386 expires = interval;
387 cp = &c->next;
388 continue;
389 }
390
391 *cp = c->next;
392
393 ipmr_destroy_unres(c);
394 }
395
396 if (atomic_read(&cache_resolve_queue_len))
397 mod_timer(&ipmr_expire_timer, jiffies + expires);
398
399out:
400 spin_unlock(&mfc_unres_lock);
401}
402
403/* Fill oifs list. It is called under write locked mrt_lock. */
404
Baruch Evend1b04c02005-07-30 17:41:59 -0700405static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
407 int vifi;
408
409 cache->mfc_un.res.minvif = MAXVIFS;
410 cache->mfc_un.res.maxvif = 0;
411 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
412
413 for (vifi=0; vifi<maxvif; vifi++) {
414 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
415 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
416 if (cache->mfc_un.res.minvif > vifi)
417 cache->mfc_un.res.minvif = vifi;
418 if (cache->mfc_un.res.maxvif <= vifi)
419 cache->mfc_un.res.maxvif = vifi + 1;
420 }
421 }
422}
423
424static int vif_add(struct vifctl *vifc, int mrtsock)
425{
426 int vifi = vifc->vifc_vifi;
427 struct vif_device *v = &vif_table[vifi];
428 struct net_device *dev;
429 struct in_device *in_dev;
Wang Chend6070322008-07-14 20:55:26 -0700430 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 /* Is vif busy ? */
433 if (VIF_EXISTS(vifi))
434 return -EADDRINUSE;
435
436 switch (vifc->vifc_flags) {
437#ifdef CONFIG_IP_PIMSM
438 case VIFF_REGISTER:
439 /*
440 * Special Purpose VIF in PIM
441 * All the packets will be sent to the daemon
442 */
443 if (reg_vif_num >= 0)
444 return -EADDRINUSE;
445 dev = ipmr_reg_vif();
446 if (!dev)
447 return -ENOBUFS;
Wang Chend6070322008-07-14 20:55:26 -0700448 err = dev_set_allmulti(dev, 1);
449 if (err) {
450 unregister_netdevice(dev);
Wang Chen7dc00c82008-07-14 20:56:34 -0700451 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700452 return err;
453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 break;
455#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900456 case VIFF_TUNNEL:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 dev = ipmr_new_tunnel(vifc);
458 if (!dev)
459 return -ENOBUFS;
Wang Chend6070322008-07-14 20:55:26 -0700460 err = dev_set_allmulti(dev, 1);
461 if (err) {
462 ipmr_del_tunnel(dev, vifc);
Wang Chen7dc00c82008-07-14 20:56:34 -0700463 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700464 return err;
465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 break;
467 case 0:
Denis V. Lunev1ab35272008-01-22 22:04:30 -0800468 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (!dev)
470 return -EADDRNOTAVAIL;
Wang Chend6070322008-07-14 20:55:26 -0700471 err = dev_set_allmulti(dev, 1);
Wang Chen7dc00c82008-07-14 20:56:34 -0700472 if (err) {
473 dev_put(dev);
Wang Chend6070322008-07-14 20:55:26 -0700474 return err;
Wang Chen7dc00c82008-07-14 20:56:34 -0700475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 break;
477 default:
478 return -EINVAL;
479 }
480
Herbert Xue5ed6392005-10-03 14:35:55 -0700481 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 return -EADDRNOTAVAIL;
Herbert Xu42f811b2007-06-04 23:34:44 -0700483 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 ip_rt_multicast_event(in_dev);
485
486 /*
487 * Fill in the VIF structures
488 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800489 v->rate_limit = vifc->vifc_rate_limit;
490 v->local = vifc->vifc_lcl_addr.s_addr;
491 v->remote = vifc->vifc_rmt_addr.s_addr;
492 v->flags = vifc->vifc_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (!mrtsock)
494 v->flags |= VIFF_STATIC;
Jianjun Kongc354e122008-11-03 00:28:02 -0800495 v->threshold = vifc->vifc_threshold;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 v->bytes_in = 0;
497 v->bytes_out = 0;
498 v->pkt_in = 0;
499 v->pkt_out = 0;
500 v->link = dev->ifindex;
501 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
502 v->link = dev->iflink;
503
504 /* And finish update writing critical data */
505 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800506 v->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507#ifdef CONFIG_IP_PIMSM
508 if (v->flags&VIFF_REGISTER)
509 reg_vif_num = vifi;
510#endif
511 if (vifi+1 > maxvif)
512 maxvif = vifi+1;
513 write_unlock_bh(&mrt_lock);
514 return 0;
515}
516
Al Viro114c7842006-09-27 18:39:29 -0700517static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Jianjun Kongc354e122008-11-03 00:28:02 -0800519 int line = MFC_HASH(mcastgrp, origin);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 struct mfc_cache *c;
521
522 for (c=mfc_cache_array[line]; c; c = c->next) {
523 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
524 break;
525 }
526 return c;
527}
528
529/*
530 * Allocate a multicast cache entry
531 */
532static struct mfc_cache *ipmr_cache_alloc(void)
533{
Jianjun Kongc354e122008-11-03 00:28:02 -0800534 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
535 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 c->mfc_un.res.minvif = MAXVIFS;
538 return c;
539}
540
541static struct mfc_cache *ipmr_cache_alloc_unres(void)
542{
Jianjun Kongc354e122008-11-03 00:28:02 -0800543 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
544 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 skb_queue_head_init(&c->mfc_un.unres.unresolved);
547 c->mfc_un.unres.expires = jiffies + 10*HZ;
548 return c;
549}
550
551/*
552 * A cache entry has gone into a resolved state from queued
553 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
556{
557 struct sk_buff *skb;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700558 struct nlmsgerr *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 /*
561 * Play the pending entries through our router
562 */
563
Jianjun Kongc354e122008-11-03 00:28:02 -0800564 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700565 if (ip_hdr(skb)->version == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
567
568 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700569 nlh->nlmsg_len = (skb_tail_pointer(skb) -
570 (u8 *)nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 } else {
572 nlh->nlmsg_type = NLMSG_ERROR;
573 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
574 skb_trim(skb, nlh->nlmsg_len);
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -0700575 e = NLMSG_DATA(nlh);
576 e->error = -EMSGSIZE;
577 memset(&e->msg, 0, sizeof(e->msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
Thomas Graf2942e902006-08-15 00:30:25 -0700579
Denis V. Lunev97c53ca2007-11-19 22:26:51 -0800580 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 } else
582 ip_mr_forward(skb, c, 0);
583 }
584}
585
586/*
587 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
588 * expects the following bizarre scheme.
589 *
590 * Called under mrt_lock.
591 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
594{
595 struct sk_buff *skb;
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -0300596 const int ihl = ip_hdrlen(pkt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 struct igmphdr *igmp;
598 struct igmpmsg *msg;
599 int ret;
600
601#ifdef CONFIG_IP_PIMSM
602 if (assert == IGMPMSG_WHOLEPKT)
603 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
604 else
605#endif
606 skb = alloc_skb(128, GFP_ATOMIC);
607
Stephen Hemminger132adf52007-03-08 20:44:43 -0800608 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 return -ENOBUFS;
610
611#ifdef CONFIG_IP_PIMSM
612 if (assert == IGMPMSG_WHOLEPKT) {
613 /* Ugly, but we have no choice with this interface.
614 Duplicate old header, fix ihl, length etc.
615 And all this only to mangle msg->im_msgtype and
616 to set msg->im_mbz to "mbz" :-)
617 */
Arnaldo Carvalho de Melo878c8142007-03-11 22:38:29 -0300618 skb_push(skb, sizeof(struct iphdr));
619 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300620 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo0272ffc2007-03-12 20:05:39 -0300621 msg = (struct igmpmsg *)skb_network_header(skb);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700622 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 msg->im_msgtype = IGMPMSG_WHOLEPKT;
624 msg->im_mbz = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900625 msg->im_vif = reg_vif_num;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700626 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
627 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
628 sizeof(struct iphdr));
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900629 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630#endif
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900631 {
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 /*
634 * Copy the IP header
635 */
636
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700637 skb->network_header = skb->tail;
Arnaldo Carvalho de Meloddc7b8e2007-03-15 21:42:27 -0300638 skb_put(skb, ihl);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300639 skb_copy_to_linear_data(skb, pkt->data, ihl);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700640 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
641 msg = (struct igmpmsg *)skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 msg->im_vif = vifi;
643 skb->dst = dst_clone(pkt->dst);
644
645 /*
646 * Add our header
647 */
648
Jianjun Kongc354e122008-11-03 00:28:02 -0800649 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 igmp->type =
651 msg->im_msgtype = assert;
652 igmp->code = 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700653 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700654 skb->transport_header = skb->network_header;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 if (mroute_socket == NULL) {
658 kfree_skb(skb);
659 return -EINVAL;
660 }
661
662 /*
663 * Deliver to mrouted
664 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800665 if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (net_ratelimit())
667 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
668 kfree_skb(skb);
669 }
670
671 return ret;
672}
673
674/*
675 * Queue a packet for resolution. It gets locked cache entry!
676 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678static int
679ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
680{
681 int err;
682 struct mfc_cache *c;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700683 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
685 spin_lock_bh(&mfc_unres_lock);
686 for (c=mfc_unres_queue; c; c=c->next) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700687 if (c->mfc_mcastgrp == iph->daddr &&
688 c->mfc_origin == iph->saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 break;
690 }
691
692 if (c == NULL) {
693 /*
694 * Create a new entry if allowable
695 */
696
Jianjun Kongc354e122008-11-03 00:28:02 -0800697 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 (c=ipmr_cache_alloc_unres())==NULL) {
699 spin_unlock_bh(&mfc_unres_lock);
700
701 kfree_skb(skb);
702 return -ENOBUFS;
703 }
704
705 /*
706 * Fill in the new cache entry
707 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700708 c->mfc_parent = -1;
709 c->mfc_origin = iph->saddr;
710 c->mfc_mcastgrp = iph->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712 /*
713 * Reflect first query at mrouted.
714 */
715 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900716 /* If the report failed throw the cache entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 out - Brad Parker
718 */
719 spin_unlock_bh(&mfc_unres_lock);
720
721 kmem_cache_free(mrt_cachep, c);
722 kfree_skb(skb);
723 return err;
724 }
725
726 atomic_inc(&cache_resolve_queue_len);
727 c->next = mfc_unres_queue;
728 mfc_unres_queue = c;
729
730 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
731 }
732
733 /*
734 * See if we can append the packet
735 */
736 if (c->mfc_un.unres.unresolved.qlen>3) {
737 kfree_skb(skb);
738 err = -ENOBUFS;
739 } else {
Jianjun Kongc354e122008-11-03 00:28:02 -0800740 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 err = 0;
742 }
743
744 spin_unlock_bh(&mfc_unres_lock);
745 return err;
746}
747
748/*
749 * MFC cache manipulation by user space mroute daemon
750 */
751
752static int ipmr_mfc_delete(struct mfcctl *mfc)
753{
754 int line;
755 struct mfc_cache *c, **cp;
756
Jianjun Kongc354e122008-11-03 00:28:02 -0800757 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
759 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
760 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
761 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
762 write_lock_bh(&mrt_lock);
763 *cp = c->next;
764 write_unlock_bh(&mrt_lock);
765
766 kmem_cache_free(mrt_cachep, c);
767 return 0;
768 }
769 }
770 return -ENOENT;
771}
772
773static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
774{
775 int line;
776 struct mfc_cache *uc, *c, **cp;
777
Jianjun Kongc354e122008-11-03 00:28:02 -0800778 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
781 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
782 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
783 break;
784 }
785
786 if (c != NULL) {
787 write_lock_bh(&mrt_lock);
788 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700789 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 if (!mrtsock)
791 c->mfc_flags |= MFC_STATIC;
792 write_unlock_bh(&mrt_lock);
793 return 0;
794 }
795
Joe Perchesf97c1e02007-12-16 13:45:43 -0800796 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 return -EINVAL;
798
Jianjun Kongc354e122008-11-03 00:28:02 -0800799 c = ipmr_cache_alloc();
800 if (c == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 return -ENOMEM;
802
Jianjun Kongc354e122008-11-03 00:28:02 -0800803 c->mfc_origin = mfc->mfcc_origin.s_addr;
804 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
805 c->mfc_parent = mfc->mfcc_parent;
Baruch Evend1b04c02005-07-30 17:41:59 -0700806 ipmr_update_thresholds(c, mfc->mfcc_ttls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (!mrtsock)
808 c->mfc_flags |= MFC_STATIC;
809
810 write_lock_bh(&mrt_lock);
811 c->next = mfc_cache_array[line];
812 mfc_cache_array[line] = c;
813 write_unlock_bh(&mrt_lock);
814
815 /*
816 * Check to see if we resolved a queued list. If so we
817 * need to send on the frames and tidy up.
818 */
819 spin_lock_bh(&mfc_unres_lock);
820 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
821 cp = &uc->next) {
822 if (uc->mfc_origin == c->mfc_origin &&
823 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
824 *cp = uc->next;
825 if (atomic_dec_and_test(&cache_resolve_queue_len))
826 del_timer(&ipmr_expire_timer);
827 break;
828 }
829 }
830 spin_unlock_bh(&mfc_unres_lock);
831
832 if (uc) {
833 ipmr_cache_resolve(uc, c);
834 kmem_cache_free(mrt_cachep, uc);
835 }
836 return 0;
837}
838
839/*
840 * Close the multicast socket, and clear the vif tables etc
841 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843static void mroute_clean_tables(struct sock *sk)
844{
845 int i;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 /*
848 * Shut down all active vif entries
849 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800850 for (i=0; i<maxvif; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 if (!(vif_table[i].flags&VIFF_STATIC))
Wang Chen7dc00c82008-07-14 20:56:34 -0700852 vif_delete(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 }
854
855 /*
856 * Wipe the cache
857 */
Jianjun Kongc354e122008-11-03 00:28:02 -0800858 for (i=0; i<MFC_LINES; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 struct mfc_cache *c, **cp;
860
861 cp = &mfc_cache_array[i];
862 while ((c = *cp) != NULL) {
863 if (c->mfc_flags&MFC_STATIC) {
864 cp = &c->next;
865 continue;
866 }
867 write_lock_bh(&mrt_lock);
868 *cp = c->next;
869 write_unlock_bh(&mrt_lock);
870
871 kmem_cache_free(mrt_cachep, c);
872 }
873 }
874
875 if (atomic_read(&cache_resolve_queue_len) != 0) {
876 struct mfc_cache *c;
877
878 spin_lock_bh(&mfc_unres_lock);
879 while (mfc_unres_queue != NULL) {
880 c = mfc_unres_queue;
881 mfc_unres_queue = c->next;
882 spin_unlock_bh(&mfc_unres_lock);
883
884 ipmr_destroy_unres(c);
885
886 spin_lock_bh(&mfc_unres_lock);
887 }
888 spin_unlock_bh(&mfc_unres_lock);
889 }
890}
891
892static void mrtsock_destruct(struct sock *sk)
893{
894 rtnl_lock();
895 if (sk == mroute_socket) {
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900896 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
898 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800899 mroute_socket = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 write_unlock_bh(&mrt_lock);
901
902 mroute_clean_tables(sk);
903 }
904 rtnl_unlock();
905}
906
907/*
908 * Socket options and virtual interface manipulation. The whole
909 * virtual interface system is a complete heap, but unfortunately
910 * that's how BSD mrouted happens to think. Maybe one day with a proper
911 * MOSPF/PIM router set up we can clean this up.
912 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900913
Jianjun Kongc354e122008-11-03 00:28:02 -0800914int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915{
916 int ret;
917 struct vifctl vif;
918 struct mfcctl mfc;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900919
Stephen Hemminger132adf52007-03-08 20:44:43 -0800920 if (optname != MRT_INIT) {
921 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -EACCES;
923 }
924
Stephen Hemminger132adf52007-03-08 20:44:43 -0800925 switch (optname) {
926 case MRT_INIT:
927 if (sk->sk_type != SOCK_RAW ||
928 inet_sk(sk)->num != IPPROTO_IGMP)
929 return -EOPNOTSUPP;
Jianjun Kongc354e122008-11-03 00:28:02 -0800930 if (optlen != sizeof(int))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800931 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Stephen Hemminger132adf52007-03-08 20:44:43 -0800933 rtnl_lock();
934 if (mroute_socket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 rtnl_unlock();
Stephen Hemminger132adf52007-03-08 20:44:43 -0800936 return -EADDRINUSE;
937 }
938
939 ret = ip_ra_control(sk, 1, mrtsock_destruct);
940 if (ret == 0) {
941 write_lock_bh(&mrt_lock);
Jianjun Kongc354e122008-11-03 00:28:02 -0800942 mroute_socket = sk;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800943 write_unlock_bh(&mrt_lock);
944
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900945 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
Stephen Hemminger132adf52007-03-08 20:44:43 -0800946 }
947 rtnl_unlock();
948 return ret;
949 case MRT_DONE:
Jianjun Kongc354e122008-11-03 00:28:02 -0800950 if (sk != mroute_socket)
Stephen Hemminger132adf52007-03-08 20:44:43 -0800951 return -EACCES;
952 return ip_ra_control(sk, 0, NULL);
953 case MRT_ADD_VIF:
954 case MRT_DEL_VIF:
Jianjun Kongc354e122008-11-03 00:28:02 -0800955 if (optlen != sizeof(vif))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800956 return -EINVAL;
Jianjun Kongc354e122008-11-03 00:28:02 -0800957 if (copy_from_user(&vif, optval, sizeof(vif)))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800958 return -EFAULT;
959 if (vif.vifc_vifi >= MAXVIFS)
960 return -ENFILE;
961 rtnl_lock();
Jianjun Kongc354e122008-11-03 00:28:02 -0800962 if (optname == MRT_ADD_VIF) {
Stephen Hemminger132adf52007-03-08 20:44:43 -0800963 ret = vif_add(&vif, sk==mroute_socket);
964 } else {
Wang Chen7dc00c82008-07-14 20:56:34 -0700965 ret = vif_delete(vif.vifc_vifi, 0);
Stephen Hemminger132adf52007-03-08 20:44:43 -0800966 }
967 rtnl_unlock();
968 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
970 /*
971 * Manipulate the forwarding caches. These live
972 * in a sort of kernel/user symbiosis.
973 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800974 case MRT_ADD_MFC:
975 case MRT_DEL_MFC:
Jianjun Kongc354e122008-11-03 00:28:02 -0800976 if (optlen != sizeof(mfc))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800977 return -EINVAL;
Jianjun Kongc354e122008-11-03 00:28:02 -0800978 if (copy_from_user(&mfc, optval, sizeof(mfc)))
Stephen Hemminger132adf52007-03-08 20:44:43 -0800979 return -EFAULT;
980 rtnl_lock();
Jianjun Kongc354e122008-11-03 00:28:02 -0800981 if (optname == MRT_DEL_MFC)
Stephen Hemminger132adf52007-03-08 20:44:43 -0800982 ret = ipmr_mfc_delete(&mfc);
983 else
984 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
985 rtnl_unlock();
986 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 /*
988 * Control PIM assert.
989 */
Stephen Hemminger132adf52007-03-08 20:44:43 -0800990 case MRT_ASSERT:
991 {
992 int v;
993 if (get_user(v,(int __user *)optval))
994 return -EFAULT;
995 mroute_do_assert=(v)?1:0;
996 return 0;
997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998#ifdef CONFIG_IP_PIMSM
Stephen Hemminger132adf52007-03-08 20:44:43 -0800999 case MRT_PIM:
1000 {
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001001 int v;
1002
Stephen Hemminger132adf52007-03-08 20:44:43 -08001003 if (get_user(v,(int __user *)optval))
1004 return -EFAULT;
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001005 v = (v) ? 1 : 0;
1006
Stephen Hemminger132adf52007-03-08 20:44:43 -08001007 rtnl_lock();
1008 ret = 0;
1009 if (v != mroute_do_pim) {
1010 mroute_do_pim = v;
1011 mroute_do_assert = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012#ifdef CONFIG_IP_PIMSM_V2
Stephen Hemminger132adf52007-03-08 20:44:43 -08001013 if (mroute_do_pim)
1014 ret = inet_add_protocol(&pim_protocol,
1015 IPPROTO_PIM);
1016 else
1017 ret = inet_del_protocol(&pim_protocol,
1018 IPPROTO_PIM);
1019 if (ret < 0)
1020 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 }
Stephen Hemminger132adf52007-03-08 20:44:43 -08001023 rtnl_unlock();
1024 return ret;
1025 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026#endif
Stephen Hemminger132adf52007-03-08 20:44:43 -08001027 /*
1028 * Spurious command, or MRT_VERSION which you cannot
1029 * set.
1030 */
1031 default:
1032 return -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 }
1034}
1035
1036/*
1037 * Getsock opt support for the multicast routing system.
1038 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001039
Jianjun Kongc354e122008-11-03 00:28:02 -08001040int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
1042 int olr;
1043 int val;
1044
Jianjun Kongc354e122008-11-03 00:28:02 -08001045 if (optname != MRT_VERSION &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046#ifdef CONFIG_IP_PIMSM
1047 optname!=MRT_PIM &&
1048#endif
1049 optname!=MRT_ASSERT)
1050 return -ENOPROTOOPT;
1051
1052 if (get_user(olr, optlen))
1053 return -EFAULT;
1054
1055 olr = min_t(unsigned int, olr, sizeof(int));
1056 if (olr < 0)
1057 return -EINVAL;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001058
Jianjun Kongc354e122008-11-03 00:28:02 -08001059 if (put_user(olr, optlen))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return -EFAULT;
Jianjun Kongc354e122008-11-03 00:28:02 -08001061 if (optname == MRT_VERSION)
1062 val = 0x0305;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063#ifdef CONFIG_IP_PIMSM
Jianjun Kongc354e122008-11-03 00:28:02 -08001064 else if (optname == MRT_PIM)
1065 val = mroute_do_pim;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066#endif
1067 else
Jianjun Kongc354e122008-11-03 00:28:02 -08001068 val = mroute_do_assert;
1069 if (copy_to_user(optval, &val, olr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 return -EFAULT;
1071 return 0;
1072}
1073
1074/*
1075 * The IP multicast ioctl support routines.
1076 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1079{
1080 struct sioc_sg_req sr;
1081 struct sioc_vif_req vr;
1082 struct vif_device *vif;
1083 struct mfc_cache *c;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001084
Stephen Hemminger132adf52007-03-08 20:44:43 -08001085 switch (cmd) {
1086 case SIOCGETVIFCNT:
Jianjun Kongc354e122008-11-03 00:28:02 -08001087 if (copy_from_user(&vr, arg, sizeof(vr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001088 return -EFAULT;
Jianjun Kongc354e122008-11-03 00:28:02 -08001089 if (vr.vifi >= maxvif)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001090 return -EINVAL;
1091 read_lock(&mrt_lock);
1092 vif=&vif_table[vr.vifi];
1093 if (VIF_EXISTS(vr.vifi)) {
Jianjun Kongc354e122008-11-03 00:28:02 -08001094 vr.icount = vif->pkt_in;
1095 vr.ocount = vif->pkt_out;
1096 vr.ibytes = vif->bytes_in;
1097 vr.obytes = vif->bytes_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001099
Jianjun Kongc354e122008-11-03 00:28:02 -08001100 if (copy_to_user(arg, &vr, sizeof(vr)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 return -EFAULT;
Stephen Hemminger132adf52007-03-08 20:44:43 -08001102 return 0;
1103 }
1104 read_unlock(&mrt_lock);
1105 return -EADDRNOTAVAIL;
1106 case SIOCGETSGCNT:
Jianjun Kongc354e122008-11-03 00:28:02 -08001107 if (copy_from_user(&sr, arg, sizeof(sr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001108 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Stephen Hemminger132adf52007-03-08 20:44:43 -08001110 read_lock(&mrt_lock);
1111 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1112 if (c) {
1113 sr.pktcnt = c->mfc_un.res.pkt;
1114 sr.bytecnt = c->mfc_un.res.bytes;
1115 sr.wrong_if = c->mfc_un.res.wrong_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 read_unlock(&mrt_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001117
Jianjun Kongc354e122008-11-03 00:28:02 -08001118 if (copy_to_user(arg, &sr, sizeof(sr)))
Stephen Hemminger132adf52007-03-08 20:44:43 -08001119 return -EFAULT;
1120 return 0;
1121 }
1122 read_unlock(&mrt_lock);
1123 return -EADDRNOTAVAIL;
1124 default:
1125 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 }
1127}
1128
1129
1130static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1131{
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001132 struct net_device *dev = ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 struct vif_device *v;
1134 int ct;
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001135
YOSHIFUJI Hideaki721499e2008-07-19 22:34:43 -07001136 if (!net_eq(dev_net(dev), &init_net))
Eric W. Biedermane9dc8652007-09-12 13:02:17 +02001137 return NOTIFY_DONE;
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (event != NETDEV_UNREGISTER)
1140 return NOTIFY_DONE;
1141 v=&vif_table[0];
Jianjun Kongc354e122008-11-03 00:28:02 -08001142 for (ct=0; ct<maxvif; ct++,v++) {
1143 if (v->dev == dev)
Wang Chen7dc00c82008-07-14 20:56:34 -07001144 vif_delete(ct, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 }
1146 return NOTIFY_DONE;
1147}
1148
1149
Jianjun Kongc354e122008-11-03 00:28:02 -08001150static struct notifier_block ip_mr_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 .notifier_call = ipmr_device_event,
1152};
1153
1154/*
1155 * Encapsulate a packet by attaching a valid IPIP header to it.
1156 * This avoids tunnel drivers and other mess and gives us the speed so
1157 * important for multicast video.
1158 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001159
Al Viro114c7842006-09-27 18:39:29 -07001160static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001162 struct iphdr *iph;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001163 struct iphdr *old_iph = ip_hdr(skb);
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001164
1165 skb_push(skb, sizeof(struct iphdr));
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001166 skb->transport_header = skb->network_header;
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -03001167 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001168 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 iph->version = 4;
Arnaldo Carvalho de Meloe023dd62007-03-12 20:09:36 -03001171 iph->tos = old_iph->tos;
1172 iph->ttl = old_iph->ttl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 iph->frag_off = 0;
1174 iph->daddr = daddr;
1175 iph->saddr = saddr;
1176 iph->protocol = IPPROTO_IPIP;
1177 iph->ihl = 5;
1178 iph->tot_len = htons(skb->len);
1179 ip_select_ident(iph, skb->dst, NULL);
1180 ip_send_check(iph);
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1183 nf_reset(skb);
1184}
1185
1186static inline int ipmr_forward_finish(struct sk_buff *skb)
1187{
1188 struct ip_options * opt = &(IPCB(skb)->opt);
1189
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -07001190 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 if (unlikely(opt->optlen))
1193 ip_forward_options(skb);
1194
1195 return dst_output(skb);
1196}
1197
1198/*
1199 * Processing handlers for ipmr_forward
1200 */
1201
1202static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1203{
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001204 const struct iphdr *iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 struct vif_device *vif = &vif_table[vifi];
1206 struct net_device *dev;
1207 struct rtable *rt;
1208 int encap = 0;
1209
1210 if (vif->dev == NULL)
1211 goto out_free;
1212
1213#ifdef CONFIG_IP_PIMSM
1214 if (vif->flags & VIFF_REGISTER) {
1215 vif->pkt_out++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001216 vif->bytes_out += skb->len;
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001217 vif->dev->stats.tx_bytes += skb->len;
1218 vif->dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1220 kfree_skb(skb);
1221 return;
1222 }
1223#endif
1224
1225 if (vif->flags&VIFF_TUNNEL) {
1226 struct flowi fl = { .oif = vif->link,
1227 .nl_u = { .ip4_u =
1228 { .daddr = vif->remote,
1229 .saddr = vif->local,
1230 .tos = RT_TOS(iph->tos) } },
1231 .proto = IPPROTO_IPIP };
Denis V. Lunevf2063512008-01-22 22:07:34 -08001232 if (ip_route_output_key(&init_net, &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 goto out_free;
1234 encap = sizeof(struct iphdr);
1235 } else {
1236 struct flowi fl = { .oif = vif->link,
1237 .nl_u = { .ip4_u =
1238 { .daddr = iph->daddr,
1239 .tos = RT_TOS(iph->tos) } },
1240 .proto = IPPROTO_IPIP };
Denis V. Lunevf2063512008-01-22 22:07:34 -08001241 if (ip_route_output_key(&init_net, &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 goto out_free;
1243 }
1244
1245 dev = rt->u.dst.dev;
1246
1247 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1248 /* Do not fragment multicasts. Alas, IPv4 does not
1249 allow to send ICMP, so that packets will disappear
1250 to blackhole.
1251 */
1252
Pavel Emelyanov7c73a6f2008-07-16 20:20:11 -07001253 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 ip_rt_put(rt);
1255 goto out_free;
1256 }
1257
1258 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1259
1260 if (skb_cow(skb, encap)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001261 ip_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 goto out_free;
1263 }
1264
1265 vif->pkt_out++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001266 vif->bytes_out += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 dst_release(skb->dst);
1269 skb->dst = &rt->u.dst;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001270 ip_decrease_ttl(ip_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 /* FIXME: forward and output firewalls used to be called here.
1273 * What do we do with netfilter? -- RR */
1274 if (vif->flags & VIFF_TUNNEL) {
1275 ip_encap(skb, vif->local, vif->remote);
1276 /* FIXME: extra output firewall step used to be here. --RR */
Pavel Emelyanov2f4c02d2008-05-21 14:16:14 -07001277 vif->dev->stats.tx_packets++;
1278 vif->dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 }
1280
1281 IPCB(skb)->flags |= IPSKB_FORWARDED;
1282
1283 /*
1284 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1285 * not only before forwarding, but after forwarding on all output
1286 * interfaces. It is clear, if mrouter runs a multicasting
1287 * program, it should receive packets not depending to what interface
1288 * program is joined.
1289 * If we will not make it, the program will have to join on all
1290 * interfaces. On the other hand, multihoming host (or router, but
1291 * not mrouter) cannot join to more than one interface - it will
1292 * result in receiving multiple packets.
1293 */
Patrick McHardy6e23ae22007-11-19 18:53:30 -08001294 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 ipmr_forward_finish);
1296 return;
1297
1298out_free:
1299 kfree_skb(skb);
1300 return;
1301}
1302
1303static int ipmr_find_vif(struct net_device *dev)
1304{
1305 int ct;
1306 for (ct=maxvif-1; ct>=0; ct--) {
1307 if (vif_table[ct].dev == dev)
1308 break;
1309 }
1310 return ct;
1311}
1312
1313/* "local" means that we should preserve one skb (for local delivery) */
1314
1315static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1316{
1317 int psend = -1;
1318 int vif, ct;
1319
1320 vif = cache->mfc_parent;
1321 cache->mfc_un.res.pkt++;
1322 cache->mfc_un.res.bytes += skb->len;
1323
1324 /*
1325 * Wrong interface: drop packet and (maybe) send PIM assert.
1326 */
1327 if (vif_table[vif].dev != skb->dev) {
1328 int true_vifi;
1329
Eric Dumazetee6b9672008-03-05 18:30:47 -08001330 if (skb->rtable->fl.iif == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 /* It is our own packet, looped back.
1332 Very complicated situation...
1333
1334 The best workaround until routing daemons will be
1335 fixed is not to redistribute packet, if it was
1336 send through wrong interface. It means, that
1337 multicast applications WILL NOT work for
1338 (S,G), which have default multicast route pointing
1339 to wrong oif. In any case, it is not a good
1340 idea to use multicasting applications on router.
1341 */
1342 goto dont_forward;
1343 }
1344
1345 cache->mfc_un.res.wrong_if++;
1346 true_vifi = ipmr_find_vif(skb->dev);
1347
1348 if (true_vifi >= 0 && mroute_do_assert &&
1349 /* pimsm uses asserts, when switching from RPT to SPT,
1350 so that we cannot check that packet arrived on an oif.
1351 It is bad, but otherwise we would need to move pretty
1352 large chunk of pimd to kernel. Ough... --ANK
1353 */
1354 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001355 time_after(jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1357 cache->mfc_un.res.last_assert = jiffies;
1358 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1359 }
1360 goto dont_forward;
1361 }
1362
1363 vif_table[vif].pkt_in++;
Jianjun Kongc354e122008-11-03 00:28:02 -08001364 vif_table[vif].bytes_in += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
1366 /*
1367 * Forward the frame
1368 */
1369 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001370 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (psend != -1) {
1372 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1373 if (skb2)
1374 ipmr_queue_xmit(skb2, cache, psend);
1375 }
Jianjun Kongc354e122008-11-03 00:28:02 -08001376 psend = ct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 }
1378 }
1379 if (psend != -1) {
1380 if (local) {
1381 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1382 if (skb2)
1383 ipmr_queue_xmit(skb2, cache, psend);
1384 } else {
1385 ipmr_queue_xmit(skb, cache, psend);
1386 return 0;
1387 }
1388 }
1389
1390dont_forward:
1391 if (!local)
1392 kfree_skb(skb);
1393 return 0;
1394}
1395
1396
1397/*
1398 * Multicast packets for forwarding arrive here
1399 */
1400
1401int ip_mr_input(struct sk_buff *skb)
1402{
1403 struct mfc_cache *cache;
Eric Dumazetee6b9672008-03-05 18:30:47 -08001404 int local = skb->rtable->rt_flags&RTCF_LOCAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406 /* Packet is looped back after forward, it should not be
1407 forwarded second time, but still can be delivered locally.
1408 */
1409 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1410 goto dont_forward;
1411
1412 if (!local) {
1413 if (IPCB(skb)->opt.router_alert) {
1414 if (ip_call_ra_chain(skb))
1415 return 0;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001416 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 /* IGMPv1 (and broken IGMPv2 implementations sort of
1418 Cisco IOS <= 11.2(8)) do not put router alert
1419 option to IGMP packets destined to routable
1420 groups. It is very bad, because it means
1421 that we can forward NO IGMP messages.
1422 */
1423 read_lock(&mrt_lock);
1424 if (mroute_socket) {
Patrick McHardy2715bcf2005-06-21 14:06:24 -07001425 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 raw_rcv(mroute_socket, skb);
1427 read_unlock(&mrt_lock);
1428 return 0;
1429 }
1430 read_unlock(&mrt_lock);
1431 }
1432 }
1433
1434 read_lock(&mrt_lock);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001435 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437 /*
1438 * No usable cache entry
1439 */
Jianjun Kongc354e122008-11-03 00:28:02 -08001440 if (cache == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 int vif;
1442
1443 if (local) {
1444 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1445 ip_local_deliver(skb);
1446 if (skb2 == NULL) {
1447 read_unlock(&mrt_lock);
1448 return -ENOBUFS;
1449 }
1450 skb = skb2;
1451 }
1452
1453 vif = ipmr_find_vif(skb->dev);
1454 if (vif >= 0) {
1455 int err = ipmr_cache_unresolved(vif, skb);
1456 read_unlock(&mrt_lock);
1457
1458 return err;
1459 }
1460 read_unlock(&mrt_lock);
1461 kfree_skb(skb);
1462 return -ENODEV;
1463 }
1464
1465 ip_mr_forward(skb, cache, local);
1466
1467 read_unlock(&mrt_lock);
1468
1469 if (local)
1470 return ip_local_deliver(skb);
1471
1472 return 0;
1473
1474dont_forward:
1475 if (local)
1476 return ip_local_deliver(skb);
1477 kfree_skb(skb);
1478 return 0;
1479}
1480
1481#ifdef CONFIG_IP_PIMSM_V1
1482/*
1483 * Handle IGMP messages of PIMv1
1484 */
1485
1486int pim_rcv_v1(struct sk_buff * skb)
1487{
1488 struct igmphdr *pim;
1489 struct iphdr *encap;
1490 struct net_device *reg_dev = NULL;
1491
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001492 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 goto drop;
1494
Arnaldo Carvalho de Melod9edf9e2007-03-13 14:19:23 -03001495 pim = igmp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001497 if (!mroute_do_pim ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 skb->len < sizeof(*pim) + sizeof(*encap) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001499 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 goto drop;
1501
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001502 encap = (struct iphdr *)(skb_transport_header(skb) +
1503 sizeof(struct igmphdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 /*
1505 Check that:
1506 a. packet is really destinted to a multicast group
1507 b. packet is not a NULL-REGISTER
1508 c. packet is not truncated
1509 */
Joe Perchesf97c1e02007-12-16 13:45:43 -08001510 if (!ipv4_is_multicast(encap->daddr) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 encap->tot_len == 0 ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001512 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 goto drop;
1514
1515 read_lock(&mrt_lock);
1516 if (reg_vif_num >= 0)
1517 reg_dev = vif_table[reg_vif_num].dev;
1518 if (reg_dev)
1519 dev_hold(reg_dev);
1520 read_unlock(&mrt_lock);
1521
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001522 if (reg_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 goto drop;
1524
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001525 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001527 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 skb->protocol = htons(ETH_P_IP);
1530 skb->ip_summed = 0;
1531 skb->pkt_type = PACKET_HOST;
1532 dst_release(skb->dst);
1533 skb->dst = NULL;
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001534 reg_dev->stats.rx_bytes += skb->len;
1535 reg_dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 nf_reset(skb);
1537 netif_rx(skb);
1538 dev_put(reg_dev);
1539 return 0;
1540 drop:
1541 kfree_skb(skb);
1542 return 0;
1543}
1544#endif
1545
1546#ifdef CONFIG_IP_PIMSM_V2
1547static int pim_rcv(struct sk_buff * skb)
1548{
1549 struct pimreghdr *pim;
1550 struct iphdr *encap;
1551 struct net_device *reg_dev = NULL;
1552
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001553 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 goto drop;
1555
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001556 pim = (struct pimreghdr *)skb_transport_header(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001557 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 (pim->flags&PIM_NULL_REGISTER) ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001559 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
Al Virod3bc23e2006-11-14 21:24:49 -08001560 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 goto drop;
1562
1563 /* check if the inner packet is destined to mcast group */
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001564 encap = (struct iphdr *)(skb_transport_header(skb) +
1565 sizeof(struct pimreghdr));
Joe Perchesf97c1e02007-12-16 13:45:43 -08001566 if (!ipv4_is_multicast(encap->daddr) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 encap->tot_len == 0 ||
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001568 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 goto drop;
1570
1571 read_lock(&mrt_lock);
1572 if (reg_vif_num >= 0)
1573 reg_dev = vif_table[reg_vif_num].dev;
1574 if (reg_dev)
1575 dev_hold(reg_dev);
1576 read_unlock(&mrt_lock);
1577
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001578 if (reg_dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 goto drop;
1580
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001581 skb->mac_header = skb->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 skb_pull(skb, (u8*)encap - skb->data);
Arnaldo Carvalho de Melo31c77112007-03-10 19:04:55 -03001583 skb_reset_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 skb->dev = reg_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 skb->protocol = htons(ETH_P_IP);
1586 skb->ip_summed = 0;
1587 skb->pkt_type = PACKET_HOST;
1588 dst_release(skb->dst);
Pavel Emelyanovcf3677a2008-05-21 14:17:33 -07001589 reg_dev->stats.rx_bytes += skb->len;
1590 reg_dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 skb->dst = NULL;
1592 nf_reset(skb);
1593 netif_rx(skb);
1594 dev_put(reg_dev);
1595 return 0;
1596 drop:
1597 kfree_skb(skb);
1598 return 0;
1599}
1600#endif
1601
1602static int
1603ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1604{
1605 int ct;
1606 struct rtnexthop *nhp;
1607 struct net_device *dev = vif_table[c->mfc_parent].dev;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001608 u8 *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 struct rtattr *mp_head;
1610
1611 if (dev)
1612 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1613
Jianjun Kongc354e122008-11-03 00:28:02 -08001614 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1617 if (c->mfc_un.res.ttls[ct] < 255) {
1618 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1619 goto rtattr_failure;
Jianjun Kongc354e122008-11-03 00:28:02 -08001620 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 nhp->rtnh_flags = 0;
1622 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1623 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1624 nhp->rtnh_len = sizeof(*nhp);
1625 }
1626 }
1627 mp_head->rta_type = RTA_MULTIPATH;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001628 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 rtm->rtm_type = RTN_MULTICAST;
1630 return 1;
1631
1632rtattr_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001633 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 return -EMSGSIZE;
1635}
1636
1637int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1638{
1639 int err;
1640 struct mfc_cache *cache;
Eric Dumazetee6b9672008-03-05 18:30:47 -08001641 struct rtable *rt = skb->rtable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 read_lock(&mrt_lock);
1644 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1645
Jianjun Kongc354e122008-11-03 00:28:02 -08001646 if (cache == NULL) {
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001647 struct sk_buff *skb2;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001648 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 struct net_device *dev;
1650 int vif;
1651
1652 if (nowait) {
1653 read_unlock(&mrt_lock);
1654 return -EAGAIN;
1655 }
1656
1657 dev = skb->dev;
1658 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1659 read_unlock(&mrt_lock);
1660 return -ENODEV;
1661 }
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001662 skb2 = skb_clone(skb, GFP_ATOMIC);
1663 if (!skb2) {
1664 read_unlock(&mrt_lock);
1665 return -ENOMEM;
1666 }
1667
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -07001668 skb_push(skb2, sizeof(struct iphdr));
1669 skb_reset_network_header(skb2);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001670 iph = ip_hdr(skb2);
1671 iph->ihl = sizeof(struct iphdr) >> 2;
1672 iph->saddr = rt->rt_src;
1673 iph->daddr = rt->rt_dst;
1674 iph->version = 0;
Alexey Kuznetsov72287492006-07-25 16:45:12 -07001675 err = ipmr_cache_unresolved(vif, skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 read_unlock(&mrt_lock);
1677 return err;
1678 }
1679
1680 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1681 cache->mfc_flags |= MFC_NOTIFY;
1682 err = ipmr_fill_mroute(skb, cache, rtm);
1683 read_unlock(&mrt_lock);
1684 return err;
1685}
1686
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001687#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688/*
1689 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1690 */
1691struct ipmr_vif_iter {
1692 int ct;
1693};
1694
1695static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1696 loff_t pos)
1697{
1698 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001699 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 continue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001701 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return &vif_table[iter->ct];
1703 }
1704 return NULL;
1705}
1706
1707static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001708 __acquires(mrt_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
1710 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001711 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 : SEQ_START_TOKEN;
1713}
1714
1715static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1716{
1717 struct ipmr_vif_iter *iter = seq->private;
1718
1719 ++*pos;
1720 if (v == SEQ_START_TOKEN)
1721 return ipmr_vif_seq_idx(iter, 0);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001722
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 while (++iter->ct < maxvif) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001724 if (!VIF_EXISTS(iter->ct))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 continue;
1726 return &vif_table[iter->ct];
1727 }
1728 return NULL;
1729}
1730
1731static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerba93ef72008-01-21 17:28:59 -08001732 __releases(mrt_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733{
1734 read_unlock(&mrt_lock);
1735}
1736
1737static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1738{
1739 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001740 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1742 } else {
1743 const struct vif_device *vif = v;
1744 const char *name = vif->dev ? vif->dev->name : "none";
1745
1746 seq_printf(seq,
1747 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1748 vif - vif_table,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001749 name, vif->bytes_in, vif->pkt_in,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 vif->bytes_out, vif->pkt_out,
1751 vif->flags, vif->local, vif->remote);
1752 }
1753 return 0;
1754}
1755
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001756static const struct seq_operations ipmr_vif_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 .start = ipmr_vif_seq_start,
1758 .next = ipmr_vif_seq_next,
1759 .stop = ipmr_vif_seq_stop,
1760 .show = ipmr_vif_seq_show,
1761};
1762
1763static int ipmr_vif_open(struct inode *inode, struct file *file)
1764{
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -07001765 return seq_open_private(file, &ipmr_vif_seq_ops,
1766 sizeof(struct ipmr_vif_iter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767}
1768
Arjan van de Ven9a321442007-02-12 00:55:35 -08001769static const struct file_operations ipmr_vif_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 .owner = THIS_MODULE,
1771 .open = ipmr_vif_open,
1772 .read = seq_read,
1773 .llseek = seq_lseek,
1774 .release = seq_release_private,
1775};
1776
1777struct ipmr_mfc_iter {
1778 struct mfc_cache **cache;
1779 int ct;
1780};
1781
1782
1783static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1784{
1785 struct mfc_cache *mfc;
1786
1787 it->cache = mfc_cache_array;
1788 read_lock(&mrt_lock);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001789 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
Stephen Hemminger132adf52007-03-08 20:44:43 -08001790 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001791 if (pos-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 return mfc;
1793 read_unlock(&mrt_lock);
1794
1795 it->cache = &mfc_unres_queue;
1796 spin_lock_bh(&mfc_unres_lock);
Stephen Hemminger132adf52007-03-08 20:44:43 -08001797 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 if (pos-- == 0)
1799 return mfc;
1800 spin_unlock_bh(&mfc_unres_lock);
1801
1802 it->cache = NULL;
1803 return NULL;
1804}
1805
1806
1807static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1808{
1809 struct ipmr_mfc_iter *it = seq->private;
1810 it->cache = NULL;
1811 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001812 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 : SEQ_START_TOKEN;
1814}
1815
1816static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1817{
1818 struct mfc_cache *mfc = v;
1819 struct ipmr_mfc_iter *it = seq->private;
1820
1821 ++*pos;
1822
1823 if (v == SEQ_START_TOKEN)
1824 return ipmr_mfc_seq_idx(seq->private, 0);
1825
1826 if (mfc->next)
1827 return mfc->next;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001828
1829 if (it->cache == &mfc_unres_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 goto end_of_list;
1831
1832 BUG_ON(it->cache != mfc_cache_array);
1833
1834 while (++it->ct < MFC_LINES) {
1835 mfc = mfc_cache_array[it->ct];
1836 if (mfc)
1837 return mfc;
1838 }
1839
1840 /* exhausted cache_array, show unresolved */
1841 read_unlock(&mrt_lock);
1842 it->cache = &mfc_unres_queue;
1843 it->ct = 0;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 spin_lock_bh(&mfc_unres_lock);
1846 mfc = mfc_unres_queue;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001847 if (mfc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 return mfc;
1849
1850 end_of_list:
1851 spin_unlock_bh(&mfc_unres_lock);
1852 it->cache = NULL;
1853
1854 return NULL;
1855}
1856
1857static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1858{
1859 struct ipmr_mfc_iter *it = seq->private;
1860
1861 if (it->cache == &mfc_unres_queue)
1862 spin_unlock_bh(&mfc_unres_lock);
1863 else if (it->cache == mfc_cache_array)
1864 read_unlock(&mrt_lock);
1865}
1866
1867static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1868{
1869 int n;
1870
1871 if (v == SEQ_START_TOKEN) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001872 seq_puts(seq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1874 } else {
1875 const struct mfc_cache *mfc = v;
1876 const struct ipmr_mfc_iter *it = seq->private;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1879 (unsigned long) mfc->mfc_mcastgrp,
1880 (unsigned long) mfc->mfc_origin,
1881 mfc->mfc_parent,
1882 mfc->mfc_un.res.pkt,
1883 mfc->mfc_un.res.bytes,
1884 mfc->mfc_un.res.wrong_if);
1885
1886 if (it->cache != &mfc_unres_queue) {
Stephen Hemminger132adf52007-03-08 20:44:43 -08001887 for (n = mfc->mfc_un.res.minvif;
1888 n < mfc->mfc_un.res.maxvif; n++ ) {
1889 if (VIF_EXISTS(n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 && mfc->mfc_un.res.ttls[n] < 255)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001891 seq_printf(seq,
1892 " %2d:%-3d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 n, mfc->mfc_un.res.ttls[n]);
1894 }
1895 }
1896 seq_putc(seq, '\n');
1897 }
1898 return 0;
1899}
1900
Stephen Hemmingerf6908082007-03-12 14:34:29 -07001901static const struct seq_operations ipmr_mfc_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 .start = ipmr_mfc_seq_start,
1903 .next = ipmr_mfc_seq_next,
1904 .stop = ipmr_mfc_seq_stop,
1905 .show = ipmr_mfc_seq_show,
1906};
1907
1908static int ipmr_mfc_open(struct inode *inode, struct file *file)
1909{
Pavel Emelyanovcf7732e2007-10-10 02:29:29 -07001910 return seq_open_private(file, &ipmr_mfc_seq_ops,
1911 sizeof(struct ipmr_mfc_iter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912}
1913
Arjan van de Ven9a321442007-02-12 00:55:35 -08001914static const struct file_operations ipmr_mfc_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 .owner = THIS_MODULE,
1916 .open = ipmr_mfc_open,
1917 .read = seq_read,
1918 .llseek = seq_lseek,
1919 .release = seq_release_private,
1920};
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001921#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
1923#ifdef CONFIG_IP_PIMSM_V2
1924static struct net_protocol pim_protocol = {
1925 .handler = pim_rcv,
1926};
1927#endif
1928
1929
1930/*
1931 * Setup for IP multicast routing
1932 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001933
Wang Chen03d2f892008-07-03 12:13:36 +08001934int __init ip_mr_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
Wang Chen03d2f892008-07-03 12:13:36 +08001936 int err;
1937
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1939 sizeof(struct mfc_cache),
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07001940 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
Paul Mundt20c2df82007-07-20 10:11:58 +09001941 NULL);
Wang Chen03d2f892008-07-03 12:13:36 +08001942 if (!mrt_cachep)
1943 return -ENOMEM;
1944
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -08001945 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
Wang Chen03d2f892008-07-03 12:13:36 +08001946 err = register_netdevice_notifier(&ip_mr_notifier);
1947 if (err)
1948 goto reg_notif_fail;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001949#ifdef CONFIG_PROC_FS
Wang Chen03d2f892008-07-03 12:13:36 +08001950 err = -ENOMEM;
1951 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1952 goto proc_vif_fail;
1953 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1954 goto proc_cache_fail;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001955#endif
Wang Chen03d2f892008-07-03 12:13:36 +08001956 return 0;
Wang Chen03d2f892008-07-03 12:13:36 +08001957#ifdef CONFIG_PROC_FS
Wang Chen03d2f892008-07-03 12:13:36 +08001958proc_cache_fail:
1959 proc_net_remove(&init_net, "ip_mr_vif");
Benjamin Theryc3e38892008-11-19 14:07:41 -08001960proc_vif_fail:
1961 unregister_netdevice_notifier(&ip_mr_notifier);
Wang Chen03d2f892008-07-03 12:13:36 +08001962#endif
Benjamin Theryc3e38892008-11-19 14:07:41 -08001963reg_notif_fail:
1964 del_timer(&ipmr_expire_timer);
1965 kmem_cache_destroy(mrt_cachep);
Wang Chen03d2f892008-07-03 12:13:36 +08001966 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967}