blob: efd0b53d9ca4830f38a7d8f5a5b5eb98639be515 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
Joe Perchese005d192012-05-16 19:58:40 +000018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +030021#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/netdevice.h>
27#include <linux/proc_fs.h>
28#ifdef CONFIG_SYSCTL
29#include <linux/sysctl.h>
30#endif
31#include <linux/times.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020032#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <net/neighbour.h>
34#include <net/dst.h>
35#include <net/sock.h>
Tom Tucker8d717402006-07-30 20:43:36 -070036#include <net/netevent.h>
Thomas Grafa14a49d2006-08-07 17:53:08 -070037#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/rtnetlink.h>
39#include <linux/random.h>
Paulo Marques543537b2005-06-23 00:09:02 -070040#include <linux/string.h>
vignesh babuc3609d52007-08-24 22:27:55 -070041#include <linux/log2.h>
Jiri Pirko1d4c8c22013-12-07 19:26:56 +010042#include <linux/inetdevice.h>
Jiri Pirkobba24892013-12-07 19:26:57 +010043#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Roopa Prabhu56dd18a2019-02-14 09:15:11 -080045#include <trace/events/neigh.h>
46
Joe Perchesd5d427c2013-04-15 15:17:19 +000047#define DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#define NEIGH_DEBUG 1
Joe Perchesd5d427c2013-04-15 15:17:19 +000049#define neigh_dbg(level, fmt, ...) \
50do { \
51 if (level <= NEIGH_DEBUG) \
52 pr_debug(fmt, ##__VA_ARGS__); \
53} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#define PNEIGH_HASHMASK 0xF
56
Kees Cooke99e88a2017-10-16 14:43:17 -070057static void neigh_timer_handler(struct timer_list *t);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -070058static void __neigh_notify(struct neighbour *n, int type, int flags,
59 u32 pid);
60static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +020061static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
62 struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Amos Waterland45fc3b12005-09-24 16:53:16 -070064#ifdef CONFIG_PROC_FS
Christoph Hellwig71a50532018-04-15 10:16:41 +020065static const struct seq_operations neigh_stat_seq_ops;
Amos Waterland45fc3b12005-09-24 16:53:16 -070066#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68/*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 */
95
David S. Miller8f40b162011-07-17 13:34:11 -070096static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
98 kfree_skb(skb);
99 return -ENETDOWN;
100}
101
Thomas Graf4f494552007-08-08 23:12:36 -0700102static void neigh_cleanup_and_release(struct neighbour *neigh)
103{
104 if (neigh->parms->neigh_cleanup)
105 neigh->parms->neigh_cleanup(neigh);
106
Roopa Prabhu56dd18a2019-02-14 09:15:11 -0800107 trace_neigh_cleanup_and_release(neigh, 0);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -0700108 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
Ido Schimmel53f800e2016-12-23 09:32:48 +0100109 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
Thomas Graf4f494552007-08-08 23:12:36 -0700110 neigh_release(neigh);
111}
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/*
114 * It is random distribution in the interval (1/2)*base...(3/2)*base.
115 * It corresponds to default IPv6 settings and is not overridable,
116 * because it is really reasonable choice.
117 */
118
119unsigned long neigh_rand_reach_time(unsigned long base)
120{
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500121 return base ? (prandom_u32() % base) + (base >> 1) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900123EXPORT_SYMBOL(neigh_rand_reach_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
David Ahern58956312018-12-07 12:24:57 -0800125static void neigh_mark_dead(struct neighbour *n)
126{
127 n->dead = 1;
128 if (!list_empty(&n->gc_list)) {
129 list_del_init(&n->gc_list);
130 atomic_dec(&n->tbl->gc_entries);
131 }
132}
133
David Ahern9c29a2f2018-12-11 18:57:21 -0700134static void neigh_update_gc_list(struct neighbour *n)
David Ahern58956312018-12-07 12:24:57 -0800135{
David Aherne997f8a2018-12-11 18:57:25 -0700136 bool on_gc_list, exempt_from_gc;
David Ahern58956312018-12-07 12:24:57 -0800137
David Ahern9c29a2f2018-12-11 18:57:21 -0700138 write_lock_bh(&n->tbl->lock);
139 write_lock(&n->lock);
David Ahern58956312018-12-07 12:24:57 -0800140
David Aherne997f8a2018-12-11 18:57:25 -0700141 /* remove from the gc list if new state is permanent or if neighbor
142 * is externally learned; otherwise entry should be on the gc list
David Ahern58956312018-12-07 12:24:57 -0800143 */
David Aherne997f8a2018-12-11 18:57:25 -0700144 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
145 n->flags & NTF_EXT_LEARNED;
David Ahern9c29a2f2018-12-11 18:57:21 -0700146 on_gc_list = !list_empty(&n->gc_list);
David Ahern8cc196d2018-12-10 13:54:07 -0800147
David Aherne997f8a2018-12-11 18:57:25 -0700148 if (exempt_from_gc && on_gc_list) {
David Ahern9c29a2f2018-12-11 18:57:21 -0700149 list_del_init(&n->gc_list);
David Ahern58956312018-12-07 12:24:57 -0800150 atomic_dec(&n->tbl->gc_entries);
David Aherne997f8a2018-12-11 18:57:25 -0700151 } else if (!exempt_from_gc && !on_gc_list) {
David Ahern58956312018-12-07 12:24:57 -0800152 /* add entries to the tail; cleaning removes from the front */
153 list_add_tail(&n->gc_list, &n->tbl->gc_list);
154 atomic_inc(&n->tbl->gc_entries);
155 }
David Ahern9c29a2f2018-12-11 18:57:21 -0700156
157 write_unlock(&n->lock);
158 write_unlock_bh(&n->tbl->lock);
David Ahern58956312018-12-07 12:24:57 -0800159}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
David Aherne997f8a2018-12-11 18:57:25 -0700161static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
David Ahern526f1b52018-12-11 18:57:24 -0700162 int *notify)
163{
David Aherne997f8a2018-12-11 18:57:25 -0700164 bool rc = false;
David Ahern526f1b52018-12-11 18:57:24 -0700165 u8 ndm_flags;
166
167 if (!(flags & NEIGH_UPDATE_F_ADMIN))
David Aherne997f8a2018-12-11 18:57:25 -0700168 return rc;
David Ahern526f1b52018-12-11 18:57:24 -0700169
170 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
171 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
172 if (ndm_flags & NTF_EXT_LEARNED)
173 neigh->flags |= NTF_EXT_LEARNED;
174 else
175 neigh->flags &= ~NTF_EXT_LEARNED;
David Aherne997f8a2018-12-11 18:57:25 -0700176 rc = true;
David Ahern526f1b52018-12-11 18:57:24 -0700177 *notify = 1;
178 }
David Aherne997f8a2018-12-11 18:57:25 -0700179
180 return rc;
David Ahern526f1b52018-12-11 18:57:24 -0700181}
182
David Ahern7e6f1822018-12-11 18:57:23 -0700183static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
184 struct neigh_table *tbl)
Sowmini Varadhan50710342017-06-02 09:01:49 -0700185{
186 bool retval = false;
187
188 write_lock(&n->lock);
David Ahern7e6f1822018-12-11 18:57:23 -0700189 if (refcount_read(&n->refcnt) == 1) {
Sowmini Varadhan50710342017-06-02 09:01:49 -0700190 struct neighbour *neigh;
191
192 neigh = rcu_dereference_protected(n->next,
193 lockdep_is_held(&tbl->lock));
194 rcu_assign_pointer(*np, neigh);
David Ahern58956312018-12-07 12:24:57 -0800195 neigh_mark_dead(n);
Sowmini Varadhan50710342017-06-02 09:01:49 -0700196 retval = true;
197 }
198 write_unlock(&n->lock);
199 if (retval)
200 neigh_cleanup_and_release(n);
201 return retval;
202}
203
204bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
205{
206 struct neigh_hash_table *nht;
207 void *pkey = ndel->primary_key;
208 u32 hash_val;
209 struct neighbour *n;
210 struct neighbour __rcu **np;
211
212 nht = rcu_dereference_protected(tbl->nht,
213 lockdep_is_held(&tbl->lock));
214 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
215 hash_val = hash_val >> (32 - nht->hash_shift);
216
217 np = &nht->hash_buckets[hash_val];
218 while ((n = rcu_dereference_protected(*np,
219 lockdep_is_held(&tbl->lock)))) {
220 if (n == ndel)
David Ahern7e6f1822018-12-11 18:57:23 -0700221 return neigh_del(n, np, tbl);
Sowmini Varadhan50710342017-06-02 09:01:49 -0700222 np = &n->next;
223 }
224 return false;
225}
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static int neigh_forced_gc(struct neigh_table *tbl)
228{
David Ahern58956312018-12-07 12:24:57 -0800229 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
230 unsigned long tref = jiffies - 5 * HZ;
David Ahern58956312018-12-07 12:24:57 -0800231 struct neighbour *n, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 int shrunk = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
235
236 write_lock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
David Ahern58956312018-12-07 12:24:57 -0800238 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
239 if (refcount_read(&n->refcnt) == 1) {
240 bool remove = false;
241
242 write_lock(&n->lock);
David Ahern758a7f02018-12-11 18:57:22 -0700243 if ((n->nud_state == NUD_FAILED) ||
David Aherne997f8a2018-12-11 18:57:25 -0700244 time_after(tref, n->updated))
David Ahern58956312018-12-07 12:24:57 -0800245 remove = true;
246 write_unlock(&n->lock);
247
248 if (remove && neigh_remove_one(n, tbl))
249 shrunk++;
250 if (shrunk >= max_clean)
251 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 }
253 }
254
255 tbl->last_flush = jiffies;
256
257 write_unlock_bh(&tbl->lock);
258
259 return shrunk;
260}
261
Pavel Emelyanova43d8992007-12-20 15:49:05 -0800262static void neigh_add_timer(struct neighbour *n, unsigned long when)
263{
264 neigh_hold(n);
265 if (unlikely(mod_timer(&n->timer, when))) {
266 printk("NEIGH: BUG, double timer add, state is %x\n",
267 n->nud_state);
268 dump_stack();
269 }
270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272static int neigh_del_timer(struct neighbour *n)
273{
274 if ((n->nud_state & NUD_IN_TIMER) &&
275 del_timer(&n->timer)) {
276 neigh_release(n);
277 return 1;
278 }
279 return 0;
280}
281
282static void pneigh_queue_purge(struct sk_buff_head *list)
283{
284 struct sk_buff *skb;
285
286 while ((skb = skb_dequeue(list)) != NULL) {
287 dev_put(skb->dev);
288 kfree_skb(skb);
289 }
290}
291
David Ahern859bd2e2018-10-11 20:33:49 -0700292static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
293 bool skip_perm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000296 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000298 nht = rcu_dereference_protected(tbl->nht,
299 lockdep_is_held(&tbl->lock));
300
David S. Millercd089332011-07-11 01:28:12 -0700301 for (i = 0; i < (1 << nht->hash_shift); i++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700302 struct neighbour *n;
303 struct neighbour __rcu **np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Eric Dumazet767e97e2010-10-06 17:49:21 -0700305 while ((n = rcu_dereference_protected(*np,
306 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 if (dev && n->dev != dev) {
308 np = &n->next;
309 continue;
310 }
David Ahern859bd2e2018-10-11 20:33:49 -0700311 if (skip_perm && n->nud_state & NUD_PERMANENT) {
312 np = &n->next;
313 continue;
314 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700315 rcu_assign_pointer(*np,
316 rcu_dereference_protected(n->next,
317 lockdep_is_held(&tbl->lock)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 write_lock(&n->lock);
319 neigh_del_timer(n);
David Ahern58956312018-12-07 12:24:57 -0800320 neigh_mark_dead(n);
Reshetova, Elena9f237432017-06-30 13:07:55 +0300321 if (refcount_read(&n->refcnt) != 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /* The most unpleasant situation.
323 We must destroy neighbour entry,
324 but someone still uses it.
325
326 The destroy will be delayed until
327 the last user releases us, but
328 we must kill timers etc. and move
329 it to safe state.
330 */
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700331 __skb_queue_purge(&n->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000332 n->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 n->output = neigh_blackhole;
334 if (n->nud_state & NUD_VALID)
335 n->nud_state = NUD_NOARP;
336 else
337 n->nud_state = NUD_NONE;
Joe Perchesd5d427c2013-04-15 15:17:19 +0000338 neigh_dbg(2, "neigh %p is stray\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700341 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343 }
Herbert Xu49636bb2005-10-23 17:18:00 +1000344}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Herbert Xu49636bb2005-10-23 17:18:00 +1000346void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
347{
348 write_lock_bh(&tbl->lock);
David Ahern859bd2e2018-10-11 20:33:49 -0700349 neigh_flush_dev(tbl, dev, false);
Herbert Xu49636bb2005-10-23 17:18:00 +1000350 write_unlock_bh(&tbl->lock);
351}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900352EXPORT_SYMBOL(neigh_changeaddr);
Herbert Xu49636bb2005-10-23 17:18:00 +1000353
David Ahern859bd2e2018-10-11 20:33:49 -0700354static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
355 bool skip_perm)
Herbert Xu49636bb2005-10-23 17:18:00 +1000356{
357 write_lock_bh(&tbl->lock);
David Ahern859bd2e2018-10-11 20:33:49 -0700358 neigh_flush_dev(tbl, dev, skip_perm);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200359 pneigh_ifdown_and_unlock(tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 del_timer_sync(&tbl->proxy_timer);
362 pneigh_queue_purge(&tbl->proxy_queue);
363 return 0;
364}
David Ahern859bd2e2018-10-11 20:33:49 -0700365
366int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
367{
368 __neigh_ifdown(tbl, dev, true);
369 return 0;
370}
371EXPORT_SYMBOL(neigh_carrier_down);
372
373int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
374{
375 __neigh_ifdown(tbl, dev, false);
376 return 0;
377}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900378EXPORT_SYMBOL(neigh_ifdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
David Ahern58956312018-12-07 12:24:57 -0800380static struct neighbour *neigh_alloc(struct neigh_table *tbl,
381 struct net_device *dev,
David Aherne997f8a2018-12-11 18:57:25 -0700382 bool exempt_from_gc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383{
384 struct neighbour *n = NULL;
385 unsigned long now = jiffies;
386 int entries;
387
David Aherne997f8a2018-12-11 18:57:25 -0700388 if (exempt_from_gc)
David Ahern58956312018-12-07 12:24:57 -0800389 goto do_alloc;
390
391 entries = atomic_inc_return(&tbl->gc_entries) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 if (entries >= tbl->gc_thresh3 ||
393 (entries >= tbl->gc_thresh2 &&
394 time_after(now, tbl->last_flush + 5 * HZ))) {
395 if (!neigh_forced_gc(tbl) &&
Rick Jonesfb811392015-08-07 11:10:37 -0700396 entries >= tbl->gc_thresh3) {
397 net_info_ratelimited("%s: neighbor table overflow!\n",
398 tbl->id);
399 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 goto out_entries;
Rick Jonesfb811392015-08-07 11:10:37 -0700401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
403
David Ahern58956312018-12-07 12:24:57 -0800404do_alloc:
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +0000405 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (!n)
407 goto out_entries;
408
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700409 __skb_queue_head_init(&n->arp_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 rwlock_init(&n->lock);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000411 seqlock_init(&n->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 n->updated = n->used = now;
413 n->nud_state = NUD_NONE;
414 n->output = neigh_blackhole;
David S. Millerf6b72b622011-07-14 07:53:20 -0700415 seqlock_init(&n->hh.hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 n->parms = neigh_parms_clone(&tbl->parms);
Kees Cooke99e88a2017-10-16 14:43:17 -0700417 timer_setup(&n->timer, neigh_timer_handler, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 NEIGH_CACHE_STAT_INC(tbl, allocs);
420 n->tbl = tbl;
Reshetova, Elena9f237432017-06-30 13:07:55 +0300421 refcount_set(&n->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 n->dead = 1;
David Ahern8cc196d2018-12-10 13:54:07 -0800423 INIT_LIST_HEAD(&n->gc_list);
David Ahern58956312018-12-07 12:24:57 -0800424
425 atomic_inc(&tbl->entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426out:
427 return n;
428
429out_entries:
David Aherne997f8a2018-12-11 18:57:25 -0700430 if (!exempt_from_gc)
David Ahern58956312018-12-07 12:24:57 -0800431 atomic_dec(&tbl->gc_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 goto out;
433}
434
David S. Miller2c2aba62011-12-28 15:06:58 -0500435static void neigh_get_hash_rnd(u32 *x)
436{
Jason A. Donenfeldb3d0f782017-06-07 23:00:05 -0400437 *x = get_random_u32() | 1;
David S. Miller2c2aba62011-12-28 15:06:58 -0500438}
439
David S. Millercd089332011-07-11 01:28:12 -0700440static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
David S. Millercd089332011-07-11 01:28:12 -0700442 size_t size = (1 << shift) * sizeof(struct neighbour *);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000443 struct neigh_hash_table *ret;
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000444 struct neighbour __rcu **buckets;
David S. Miller2c2aba62011-12-28 15:06:58 -0500445 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000447 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
448 if (!ret)
449 return NULL;
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300450 if (size <= PAGE_SIZE) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000451 buckets = kzalloc(size, GFP_ATOMIC);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300452 } else {
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000453 buckets = (struct neighbour __rcu **)
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000454 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
455 get_order(size));
Konstantin Khlebnikov01b833a2019-01-14 13:38:43 +0300456 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300457 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000458 if (!buckets) {
459 kfree(ret);
460 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 }
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000462 ret->hash_buckets = buckets;
David S. Millercd089332011-07-11 01:28:12 -0700463 ret->hash_shift = shift;
David S. Miller2c2aba62011-12-28 15:06:58 -0500464 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
465 neigh_get_hash_rnd(&ret->hash_rnd[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 return ret;
467}
468
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000469static void neigh_hash_free_rcu(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000471 struct neigh_hash_table *nht = container_of(head,
472 struct neigh_hash_table,
473 rcu);
David S. Millercd089332011-07-11 01:28:12 -0700474 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000475 struct neighbour __rcu **buckets = nht->hash_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300477 if (size <= PAGE_SIZE) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000478 kfree(buckets);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300479 } else {
480 kmemleak_free(buckets);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000481 free_pages((unsigned long)buckets, get_order(size));
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300482 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000483 kfree(nht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000486static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
David S. Millercd089332011-07-11 01:28:12 -0700487 unsigned long new_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000489 unsigned int i, hash;
490 struct neigh_hash_table *new_nht, *old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
493
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000494 old_nht = rcu_dereference_protected(tbl->nht,
495 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -0700496 new_nht = neigh_hash_alloc(new_shift);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000497 if (!new_nht)
498 return old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
David S. Millercd089332011-07-11 01:28:12 -0700500 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 struct neighbour *n, *next;
502
Eric Dumazet767e97e2010-10-06 17:49:21 -0700503 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
504 lockdep_is_held(&tbl->lock));
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000505 n != NULL;
506 n = next) {
507 hash = tbl->hash(n->primary_key, n->dev,
508 new_nht->hash_rnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
David S. Millercd089332011-07-11 01:28:12 -0700510 hash >>= (32 - new_nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700511 next = rcu_dereference_protected(n->next,
512 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Eric Dumazet767e97e2010-10-06 17:49:21 -0700514 rcu_assign_pointer(n->next,
515 rcu_dereference_protected(
516 new_nht->hash_buckets[hash],
517 lockdep_is_held(&tbl->lock)));
518 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000522 rcu_assign_pointer(tbl->nht, new_nht);
523 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
524 return new_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525}
526
527struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
528 struct net_device *dev)
529{
530 struct neighbour *n;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 NEIGH_CACHE_STAT_INC(tbl, lookups);
533
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000534 rcu_read_lock_bh();
Eric W. Biederman60395a22015-03-03 17:10:44 -0600535 n = __neigh_lookup_noref(tbl, pkey, dev);
536 if (n) {
Reshetova, Elena9f237432017-06-30 13:07:55 +0300537 if (!refcount_inc_not_zero(&n->refcnt))
Eric W. Biederman60395a22015-03-03 17:10:44 -0600538 n = NULL;
539 NEIGH_CACHE_STAT_INC(tbl, hits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700541
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000542 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 return n;
544}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900545EXPORT_SYMBOL(neigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Eric W. Biederman426b5302008-01-24 00:13:18 -0800547struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
548 const void *pkey)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
550 struct neighbour *n;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300551 unsigned int key_len = tbl->key_len;
Pavel Emelyanovbc4bf5f2008-02-23 19:57:02 -0800552 u32 hash_val;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000553 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 NEIGH_CACHE_STAT_INC(tbl, lookups);
556
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000557 rcu_read_lock_bh();
558 nht = rcu_dereference_bh(tbl->nht);
David S. Millercd089332011-07-11 01:28:12 -0700559 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700560
561 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
562 n != NULL;
563 n = rcu_dereference_bh(n->next)) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800564 if (!memcmp(n->primary_key, pkey, key_len) &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900565 net_eq(dev_net(n->dev), net)) {
Reshetova, Elena9f237432017-06-30 13:07:55 +0300566 if (!refcount_inc_not_zero(&n->refcnt))
Eric Dumazet767e97e2010-10-06 17:49:21 -0700567 n = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 NEIGH_CACHE_STAT_INC(tbl, hits);
569 break;
570 }
571 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700572
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000573 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 return n;
575}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900576EXPORT_SYMBOL(neigh_lookup_nodev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
David Ahern58956312018-12-07 12:24:57 -0800578static struct neighbour *___neigh_create(struct neigh_table *tbl,
579 const void *pkey,
580 struct net_device *dev,
David Aherne997f8a2018-12-11 18:57:25 -0700581 bool exempt_from_gc, bool want_ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
David Aherne997f8a2018-12-11 18:57:25 -0700583 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 u32 hash_val;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300585 unsigned int key_len = tbl->key_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 int error;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000587 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 if (!n) {
590 rc = ERR_PTR(-ENOBUFS);
591 goto out;
592 }
593
594 memcpy(n->primary_key, pkey, key_len);
595 n->dev = dev;
596 dev_hold(dev);
597
598 /* Protocol specific setup. */
599 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
600 rc = ERR_PTR(error);
601 goto out_neigh_release;
602 }
603
David Millerda6a8fa2011-07-25 00:01:38 +0000604 if (dev->netdev_ops->ndo_neigh_construct) {
Jiri Pirko503eebc2016-07-05 11:27:37 +0200605 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
David Millerda6a8fa2011-07-25 00:01:38 +0000606 if (error < 0) {
607 rc = ERR_PTR(error);
608 goto out_neigh_release;
609 }
610 }
611
David S. Miller447f2192011-12-19 15:04:41 -0500612 /* Device specific setup. */
613 if (n->parms->neigh_setup &&
614 (error = n->parms->neigh_setup(n)) < 0) {
615 rc = ERR_PTR(error);
616 goto out_neigh_release;
617 }
618
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100619 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000622 nht = rcu_dereference_protected(tbl->nht,
623 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
David S. Millercd089332011-07-11 01:28:12 -0700625 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
626 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Jim Westfall096b9852018-01-14 04:18:50 -0800628 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 if (n->parms->dead) {
631 rc = ERR_PTR(-EINVAL);
632 goto out_tbl_unlock;
633 }
634
Eric Dumazet767e97e2010-10-06 17:49:21 -0700635 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
636 lockdep_is_held(&tbl->lock));
637 n1 != NULL;
638 n1 = rcu_dereference_protected(n1->next,
639 lockdep_is_held(&tbl->lock))) {
Jim Westfall096b9852018-01-14 04:18:50 -0800640 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
David S. Millera263b302012-07-02 02:02:15 -0700641 if (want_ref)
642 neigh_hold(n1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 rc = n1;
644 goto out_tbl_unlock;
645 }
646 }
647
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 n->dead = 0;
David Aherne997f8a2018-12-11 18:57:25 -0700649 if (!exempt_from_gc)
David Ahern8cc196d2018-12-10 13:54:07 -0800650 list_add_tail(&n->gc_list, &n->tbl->gc_list);
651
David S. Millera263b302012-07-02 02:02:15 -0700652 if (want_ref)
653 neigh_hold(n);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700654 rcu_assign_pointer(n->next,
655 rcu_dereference_protected(nht->hash_buckets[hash_val],
656 lockdep_is_held(&tbl->lock)));
657 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 write_unlock_bh(&tbl->lock);
Joe Perchesd5d427c2013-04-15 15:17:19 +0000659 neigh_dbg(2, "neigh %p is created\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 rc = n;
661out:
662 return rc;
663out_tbl_unlock:
664 write_unlock_bh(&tbl->lock);
665out_neigh_release:
666 neigh_release(n);
667 goto out;
668}
David Ahern58956312018-12-07 12:24:57 -0800669
670struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
671 struct net_device *dev, bool want_ref)
672{
673 return ___neigh_create(tbl, pkey, dev, false, want_ref);
674}
David S. Millera263b302012-07-02 02:02:15 -0700675EXPORT_SYMBOL(__neigh_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300677static u32 pneigh_hash(const void *pkey, unsigned int key_len)
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700678{
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700679 u32 hash_val = *(u32 *)(pkey + key_len - 4);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700680 hash_val ^= (hash_val >> 16);
681 hash_val ^= hash_val >> 8;
682 hash_val ^= hash_val >> 4;
683 hash_val &= PNEIGH_HASHMASK;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900684 return hash_val;
685}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700686
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900687static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
688 struct net *net,
689 const void *pkey,
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300690 unsigned int key_len,
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900691 struct net_device *dev)
692{
693 while (n) {
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700694 if (!memcmp(n->key, pkey, key_len) &&
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900695 net_eq(pneigh_net(n), net) &&
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700696 (n->dev == dev || !n->dev))
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900697 return n;
698 n = n->next;
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700699 }
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900700 return NULL;
701}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700702
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900703struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
704 struct net *net, const void *pkey, struct net_device *dev)
705{
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300706 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900707 u32 hash_val = pneigh_hash(pkey, key_len);
708
709 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
710 net, pkey, key_len, dev);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700711}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900712EXPORT_SYMBOL_GPL(__pneigh_lookup);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700713
Eric W. Biederman426b5302008-01-24 00:13:18 -0800714struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
715 struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 struct net_device *dev, int creat)
717{
718 struct pneigh_entry *n;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300719 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900720 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 read_lock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900723 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
724 net, pkey, key_len, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 read_unlock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900726
727 if (n || !creat)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 goto out;
729
Pavel Emelyanov4ae28942007-10-15 12:54:15 -0700730 ASSERT_RTNL();
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
733 if (!n)
734 goto out;
735
David Ahern754d5da2018-12-19 15:53:22 -0800736 n->protocol = 0;
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -0500737 write_pnet(&n->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 memcpy(n->key, pkey, key_len);
739 n->dev = dev;
740 if (dev)
741 dev_hold(dev);
742
743 if (tbl->pconstructor && tbl->pconstructor(n)) {
744 if (dev)
745 dev_put(dev);
746 kfree(n);
747 n = NULL;
748 goto out;
749 }
750
751 write_lock_bh(&tbl->lock);
752 n->next = tbl->phash_buckets[hash_val];
753 tbl->phash_buckets[hash_val] = n;
754 write_unlock_bh(&tbl->lock);
755out:
756 return n;
757}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900758EXPORT_SYMBOL(pneigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760
Eric W. Biederman426b5302008-01-24 00:13:18 -0800761int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 struct net_device *dev)
763{
764 struct pneigh_entry *n, **np;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300765 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900766 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 write_lock_bh(&tbl->lock);
769 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
770 np = &n->next) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800771 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900772 net_eq(pneigh_net(n), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 *np = n->next;
774 write_unlock_bh(&tbl->lock);
775 if (tbl->pdestructor)
776 tbl->pdestructor(n);
777 if (n->dev)
778 dev_put(n->dev);
779 kfree(n);
780 return 0;
781 }
782 }
783 write_unlock_bh(&tbl->lock);
784 return -ENOENT;
785}
786
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200787static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
788 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789{
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200790 struct pneigh_entry *n, **np, *freelist = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 u32 h;
792
793 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
794 np = &tbl->phash_buckets[h];
795 while ((n = *np) != NULL) {
796 if (!dev || n->dev == dev) {
797 *np = n->next;
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200798 n->next = freelist;
799 freelist = n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 continue;
801 }
802 np = &n->next;
803 }
804 }
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200805 write_unlock_bh(&tbl->lock);
806 while ((n = freelist)) {
807 freelist = n->next;
808 n->next = NULL;
809 if (tbl->pdestructor)
810 tbl->pdestructor(n);
811 if (n->dev)
812 dev_put(n->dev);
813 kfree(n);
814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return -ENOENT;
816}
817
Denis V. Lunev06f05112008-01-24 00:30:58 -0800818static void neigh_parms_destroy(struct neigh_parms *parms);
819
820static inline void neigh_parms_put(struct neigh_parms *parms)
821{
Reshetova, Elena63439442017-06-30 13:07:56 +0300822 if (refcount_dec_and_test(&parms->refcnt))
Denis V. Lunev06f05112008-01-24 00:30:58 -0800823 neigh_parms_destroy(parms);
824}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826/*
827 * neighbour must already be out of the table;
828 *
829 */
830void neigh_destroy(struct neighbour *neigh)
831{
David Millerda6a8fa2011-07-25 00:01:38 +0000832 struct net_device *dev = neigh->dev;
833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
835
836 if (!neigh->dead) {
Joe Perchese005d192012-05-16 19:58:40 +0000837 pr_warn("Destroying alive neighbour %p\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 dump_stack();
839 return;
840 }
841
842 if (neigh_del_timer(neigh))
Joe Perchese005d192012-05-16 19:58:40 +0000843 pr_warn("Impossible event\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700845 write_lock_bh(&neigh->lock);
846 __skb_queue_purge(&neigh->arp_queue);
847 write_unlock_bh(&neigh->lock);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000848 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
David S. Miller447f2192011-12-19 15:04:41 -0500850 if (dev->netdev_ops->ndo_neigh_destroy)
Jiri Pirko503eebc2016-07-05 11:27:37 +0200851 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
David S. Miller447f2192011-12-19 15:04:41 -0500852
David Millerda6a8fa2011-07-25 00:01:38 +0000853 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 neigh_parms_put(neigh->parms);
855
Joe Perchesd5d427c2013-04-15 15:17:19 +0000856 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858 atomic_dec(&neigh->tbl->entries);
David Miller5b8b0062011-07-25 00:01:22 +0000859 kfree_rcu(neigh, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900861EXPORT_SYMBOL(neigh_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863/* Neighbour state is suspicious;
864 disable fast path.
865
866 Called with write_locked neigh.
867 */
868static void neigh_suspect(struct neighbour *neigh)
869{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000870 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 neigh->output = neigh->ops->output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873}
874
875/* Neighbour state is OK;
876 enable fast path.
877
878 Called with write_locked neigh.
879 */
880static void neigh_connect(struct neighbour *neigh)
881{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000882 neigh_dbg(2, "neigh %p is connected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884 neigh->output = neigh->ops->connected_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885}
886
Eric Dumazete4c4e442009-07-30 03:15:07 +0000887static void neigh_periodic_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888{
Eric Dumazete4c4e442009-07-30 03:15:07 +0000889 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700890 struct neighbour *n;
891 struct neighbour __rcu **np;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000892 unsigned int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000893 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
896
Eric Dumazete4c4e442009-07-30 03:15:07 +0000897 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000898 nht = rcu_dereference_protected(tbl->nht,
899 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
901 /*
902 * periodically recompute ReachableTime from random function
903 */
904
Eric Dumazete4c4e442009-07-30 03:15:07 +0000905 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 struct neigh_parms *p;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000907 tbl->last_rand = jiffies;
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +0100908 list_for_each_entry(p, &tbl->parms_list, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 p->reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100910 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 }
912
Duan Jiongfeff9ab2014-02-27 17:14:41 +0800913 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
914 goto out;
915
David S. Millercd089332011-07-11 01:28:12 -0700916 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000917 np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
Eric Dumazet767e97e2010-10-06 17:49:21 -0700919 while ((n = rcu_dereference_protected(*np,
920 lockdep_is_held(&tbl->lock))) != NULL) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000921 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Eric Dumazete4c4e442009-07-30 03:15:07 +0000923 write_lock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Eric Dumazete4c4e442009-07-30 03:15:07 +0000925 state = n->nud_state;
Roopa Prabhu9ce33e42018-04-24 13:49:34 -0700926 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
927 (n->flags & NTF_EXT_LEARNED)) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000928 write_unlock(&n->lock);
929 goto next_elt;
930 }
931
932 if (time_before(n->used, n->confirmed))
933 n->used = n->confirmed;
934
Reshetova, Elena9f237432017-06-30 13:07:55 +0300935 if (refcount_read(&n->refcnt) == 1 &&
Eric Dumazete4c4e442009-07-30 03:15:07 +0000936 (state == NUD_FAILED ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100937 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000938 *np = n->next;
David Ahern58956312018-12-07 12:24:57 -0800939 neigh_mark_dead(n);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000940 write_unlock(&n->lock);
941 neigh_cleanup_and_release(n);
942 continue;
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 write_unlock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946next_elt:
Eric Dumazete4c4e442009-07-30 03:15:07 +0000947 np = &n->next;
948 }
949 /*
950 * It's fine to release lock here, even if hash table
951 * grows while we are preempted.
952 */
953 write_unlock_bh(&tbl->lock);
954 cond_resched();
955 write_lock_bh(&tbl->lock);
Michel Machado84338a62012-02-21 16:04:13 -0500956 nht = rcu_dereference_protected(tbl->nht,
957 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
YOSHIFUJI Hideaki / 吉藤英明27246802013-01-22 05:20:05 +0000959out:
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100960 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
961 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
962 * BASE_REACHABLE_TIME.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 */
viresh kumarf6180022014-01-22 12:23:33 +0530964 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100965 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000966 write_unlock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967}
968
969static __inline__ int neigh_max_probes(struct neighbour *n)
970{
971 struct neigh_parms *p = n->parms;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +0900972 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
973 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
974 NEIGH_VAR(p, MCAST_PROBES));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Timo Teras5ef12d92009-06-11 04:16:28 -0700977static void neigh_invalidate(struct neighbour *neigh)
Eric Dumazet0a141502010-03-09 19:40:54 +0000978 __releases(neigh->lock)
979 __acquires(neigh->lock)
Timo Teras5ef12d92009-06-11 04:16:28 -0700980{
981 struct sk_buff *skb;
982
983 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
Joe Perchesd5d427c2013-04-15 15:17:19 +0000984 neigh_dbg(2, "neigh %p is failed\n", neigh);
Timo Teras5ef12d92009-06-11 04:16:28 -0700985 neigh->updated = jiffies;
986
987 /* It is very thin place. report_unreachable is very complicated
988 routine. Particularly, it can hit the same neighbour entry!
989
990 So that, we try to be accurate and avoid dead loop. --ANK
991 */
992 while (neigh->nud_state == NUD_FAILED &&
993 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
994 write_unlock(&neigh->lock);
995 neigh->ops->error_report(neigh, skb);
996 write_lock(&neigh->lock);
997 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700998 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000999 neigh->arp_queue_len_bytes = 0;
Timo Teras5ef12d92009-06-11 04:16:28 -07001000}
1001
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001002static void neigh_probe(struct neighbour *neigh)
1003 __releases(neigh->lock)
1004{
Hannes Frederic Sowa4ed377e2013-09-21 06:32:34 +02001005 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001006 /* keep skb alive even if arp_queue overflows */
1007 if (skb)
Martin Zhang19125c12015-11-17 20:49:30 +08001008 skb = skb_clone(skb, GFP_ATOMIC);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001009 write_unlock(&neigh->lock);
Eric Dumazet48481c82017-03-23 12:39:21 -07001010 if (neigh->ops->solicit)
1011 neigh->ops->solicit(neigh, skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001012 atomic_inc(&neigh->probes);
Yang Wei87fff3ca2019-01-17 23:11:30 +08001013 consume_skb(skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001014}
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016/* Called when a timer expires for a neighbour entry. */
1017
Kees Cooke99e88a2017-10-16 14:43:17 -07001018static void neigh_timer_handler(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
1020 unsigned long now, next;
Kees Cooke99e88a2017-10-16 14:43:17 -07001021 struct neighbour *neigh = from_timer(neigh, t, timer);
Eric Dumazet95c96172012-04-15 05:58:06 +00001022 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 int notify = 0;
1024
1025 write_lock(&neigh->lock);
1026
1027 state = neigh->nud_state;
1028 now = jiffies;
1029 next = now + HZ;
1030
David S. Miller045f7b32011-11-01 17:45:55 -04001031 if (!(state & NUD_IN_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
1034 if (state & NUD_REACHABLE) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001035 if (time_before_eq(now,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 neigh->confirmed + neigh->parms->reachable_time)) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001037 neigh_dbg(2, "neigh %p is still alive\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 next = neigh->confirmed + neigh->parms->reachable_time;
1039 } else if (time_before_eq(now,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001040 neigh->used +
1041 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001042 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001044 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 neigh_suspect(neigh);
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001046 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001048 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 neigh->nud_state = NUD_STALE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001050 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 neigh_suspect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001052 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 }
1054 } else if (state & NUD_DELAY) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001055 if (time_before_eq(now,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001056 neigh->confirmed +
1057 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001058 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 neigh->nud_state = NUD_REACHABLE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001060 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 neigh_connect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001062 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 next = neigh->confirmed + neigh->parms->reachable_time;
1064 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001065 neigh_dbg(2, "neigh %p is probed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 neigh->nud_state = NUD_PROBE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001067 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 atomic_set(&neigh->probes, 0);
Erik Kline765c9c62015-05-18 19:44:41 +09001069 notify = 1;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001070 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 }
1072 } else {
1073 /* NUD_PROBE|NUD_INCOMPLETE */
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001074 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 }
1076
1077 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1078 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 neigh->nud_state = NUD_FAILED;
1080 notify = 1;
Timo Teras5ef12d92009-06-11 04:16:28 -07001081 neigh_invalidate(neigh);
Duan Jiong5e2c21d2014-02-27 17:03:03 +08001082 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
1084
1085 if (neigh->nud_state & NUD_IN_TIMER) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 if (time_before(next, jiffies + HZ/2))
1087 next = jiffies + HZ/2;
Herbert Xu6fb99742005-10-23 16:37:48 +10001088 if (!mod_timer(&neigh->timer, next))
1089 neigh_hold(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 }
1091 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001092 neigh_probe(neigh);
David S. Miller9ff56602008-02-17 18:39:54 -08001093 } else {
David S. Miller69cc64d2008-02-11 21:45:44 -08001094out:
David S. Miller9ff56602008-02-17 18:39:54 -08001095 write_unlock(&neigh->lock);
1096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Thomas Grafd961db32007-08-08 23:12:56 -07001098 if (notify)
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001099 neigh_update_notify(neigh, 0);
Thomas Grafd961db32007-08-08 23:12:56 -07001100
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001101 trace_neigh_timer_handler(neigh, 0);
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 neigh_release(neigh);
1104}
1105
1106int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1107{
1108 int rc;
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001109 bool immediate_probe = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 write_lock_bh(&neigh->lock);
1112
1113 rc = 0;
1114 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1115 goto out_unlock_bh;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001116 if (neigh->dead)
1117 goto out_dead;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001120 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1121 NEIGH_VAR(neigh->parms, APP_PROBES)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001122 unsigned long next, now = jiffies;
1123
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001124 atomic_set(&neigh->probes,
1125 NEIGH_VAR(neigh->parms, UCAST_PROBES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 neigh->nud_state = NUD_INCOMPLETE;
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001127 neigh->updated = now;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001128 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1129 HZ/2);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001130 neigh_add_timer(neigh, next);
1131 immediate_probe = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 } else {
1133 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001134 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 write_unlock_bh(&neigh->lock);
1136
Wei Yongjunf3fbbe02009-02-25 00:37:32 +00001137 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 return 1;
1139 }
1140 } else if (neigh->nud_state & NUD_STALE) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001141 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001143 neigh->updated = jiffies;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001144 neigh_add_timer(neigh, jiffies +
1145 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 }
1147
1148 if (neigh->nud_state == NUD_INCOMPLETE) {
1149 if (skb) {
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001150 while (neigh->arp_queue_len_bytes + skb->truesize >
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001151 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 struct sk_buff *buff;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001153
David S. Millerf72051b2008-09-23 01:11:18 -07001154 buff = __skb_dequeue(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001155 if (!buff)
1156 break;
1157 neigh->arp_queue_len_bytes -= buff->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 kfree_skb(buff);
Neil Horman9a6d2762008-07-16 20:50:49 -07001159 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 }
Eric Dumazeta4731132010-05-27 16:09:39 -07001161 skb_dst_force(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 __skb_queue_tail(&neigh->arp_queue, skb);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001163 neigh->arp_queue_len_bytes += skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 }
1165 rc = 1;
1166 }
1167out_unlock_bh:
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001168 if (immediate_probe)
1169 neigh_probe(neigh);
1170 else
1171 write_unlock(&neigh->lock);
1172 local_bh_enable();
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001173 trace_neigh_event_send_done(neigh, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 return rc;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001175
1176out_dead:
1177 if (neigh->nud_state & NUD_STALE)
1178 goto out_unlock_bh;
1179 write_unlock_bh(&neigh->lock);
1180 kfree_skb(skb);
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001181 trace_neigh_event_send_dead(neigh, 1);
Julian Anastasov2c51a972015-06-16 22:56:39 +03001182 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001184EXPORT_SYMBOL(__neigh_event_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
David S. Millerf6b72b622011-07-14 07:53:20 -07001186static void neigh_update_hhs(struct neighbour *neigh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
1188 struct hh_cache *hh;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001189 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
Doug Kehn91a72a72010-07-14 18:02:16 -07001190 = NULL;
1191
1192 if (neigh->dev->header_ops)
1193 update = neigh->dev->header_ops->cache_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
1195 if (update) {
David S. Millerf6b72b622011-07-14 07:53:20 -07001196 hh = &neigh->hh;
1197 if (hh->hh_len) {
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001198 write_seqlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 update(hh, neigh->dev, neigh->ha);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001200 write_sequnlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
1202 }
1203}
1204
1205
1206
1207/* Generic update routine.
1208 -- lladdr is new lladdr or NULL, if it is not supplied.
1209 -- new is new state.
1210 -- flags
1211 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1212 if it is different.
1213 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001214 lladdr instead of overriding it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 if it is different.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1217
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001218 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 NTF_ROUTER flag.
1220 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1221 a router.
1222
1223 Caller MUST hold reference count on the entry.
1224 */
1225
David Ahern7a35a502018-12-05 20:02:29 -08001226static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1227 u8 new, u32 flags, u32 nlmsg_pid,
1228 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
David Aherne997f8a2018-12-11 18:57:25 -07001230 bool ext_learn_change = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 u8 old;
1232 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 int notify = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 struct net_device *dev;
1235 int update_isrouter = 0;
1236
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001237 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 write_lock_bh(&neigh->lock);
1240
1241 dev = neigh->dev;
1242 old = neigh->nud_state;
1243 err = -EPERM;
1244
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001245 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 (old & (NUD_NOARP | NUD_PERMANENT)))
1247 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001248 if (neigh->dead) {
1249 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
Julian Anastasov2c51a972015-06-16 22:56:39 +03001250 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
David Aherne997f8a2018-12-11 18:57:25 -07001253 ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
Roopa Prabhu9ce33e42018-04-24 13:49:34 -07001254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 if (!(new & NUD_VALID)) {
1256 neigh_del_timer(neigh);
1257 if (old & NUD_CONNECTED)
1258 neigh_suspect(neigh);
David Ahern9c29a2f2018-12-11 18:57:21 -07001259 neigh->nud_state = new;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 notify = old & NUD_VALID;
Roopa Prabhud2fb4fb2018-10-20 18:09:31 -07001262 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
Timo Teras5ef12d92009-06-11 04:16:28 -07001263 (new & NUD_FAILED)) {
1264 neigh_invalidate(neigh);
1265 notify = 1;
1266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 goto out;
1268 }
1269
1270 /* Compare new lladdr with cached one */
1271 if (!dev->addr_len) {
1272 /* First case: device needs no address. */
1273 lladdr = neigh->ha;
1274 } else if (lladdr) {
1275 /* The second case: if something is already cached
1276 and a new address is proposed:
1277 - compare new & old
1278 - if they are different, check override flag
1279 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001280 if ((old & NUD_VALID) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 !memcmp(lladdr, neigh->ha, dev->addr_len))
1282 lladdr = neigh->ha;
1283 } else {
1284 /* No address is supplied; if we know something,
1285 use it, otherwise discard the request.
1286 */
1287 err = -EINVAL;
David Ahern7a35a502018-12-05 20:02:29 -08001288 if (!(old & NUD_VALID)) {
1289 NL_SET_ERR_MSG(extack, "No link layer address given");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 lladdr = neigh->ha;
1293 }
1294
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001295 /* Update confirmed timestamp for neighbour entry after we
1296 * received ARP packet even if it doesn't change IP to MAC binding.
1297 */
1298 if (new & NUD_CONNECTED)
1299 neigh->confirmed = jiffies;
1300
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 /* If entry was valid and address is not changed,
1302 do not change entry state, if new one is STALE.
1303 */
1304 err = 0;
1305 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1306 if (old & NUD_VALID) {
1307 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1308 update_isrouter = 0;
1309 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1310 (old & NUD_CONNECTED)) {
1311 lladdr = neigh->ha;
1312 new = NUD_STALE;
1313 } else
1314 goto out;
1315 } else {
Julian Anastasov0e7bbcc2016-07-27 09:56:50 +03001316 if (lladdr == neigh->ha && new == NUD_STALE &&
1317 !(flags & NEIGH_UPDATE_F_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 new = old;
1319 }
1320 }
1321
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001322 /* Update timestamp only once we know we will make a change to the
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001323 * neighbour entry. Otherwise we risk to move the locktime window with
1324 * noop updates and ignore relevant ARP updates.
1325 */
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001326 if (new != old || lladdr != neigh->ha)
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001327 neigh->updated = jiffies;
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 if (new != old) {
1330 neigh_del_timer(neigh);
Erik Kline765c9c62015-05-18 19:44:41 +09001331 if (new & NUD_PROBE)
1332 atomic_set(&neigh->probes, 0);
Pavel Emelyanova43d8992007-12-20 15:49:05 -08001333 if (new & NUD_IN_TIMER)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001334 neigh_add_timer(neigh, (jiffies +
1335 ((new & NUD_REACHABLE) ?
David S. Miller667347f2005-09-27 12:07:44 -07001336 neigh->parms->reachable_time :
1337 0)));
David Ahern9c29a2f2018-12-11 18:57:21 -07001338 neigh->nud_state = new;
Bob Gilligan53385d22013-12-15 13:39:56 -08001339 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 }
1341
1342 if (lladdr != neigh->ha) {
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001343 write_seqlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 memcpy(&neigh->ha, lladdr, dev->addr_len);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001345 write_sequnlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 neigh_update_hhs(neigh);
1347 if (!(new & NUD_CONNECTED))
1348 neigh->confirmed = jiffies -
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001349 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
1352 if (new == old)
1353 goto out;
1354 if (new & NUD_CONNECTED)
1355 neigh_connect(neigh);
1356 else
1357 neigh_suspect(neigh);
1358 if (!(old & NUD_VALID)) {
1359 struct sk_buff *skb;
1360
1361 /* Again: avoid dead loop if something went wrong */
1362
1363 while (neigh->nud_state & NUD_VALID &&
1364 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
David S. Miller69cce1d2011-07-17 23:09:49 -07001365 struct dst_entry *dst = skb_dst(skb);
1366 struct neighbour *n2, *n1 = neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 write_unlock_bh(&neigh->lock);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001368
1369 rcu_read_lock();
David S. Miller13a43d92012-07-02 22:15:37 -07001370
1371 /* Why not just use 'neigh' as-is? The problem is that
1372 * things such as shaper, eql, and sch_teql can end up
1373 * using alternative, different, neigh objects to output
1374 * the packet in the output path. So what we need to do
1375 * here is re-lookup the top-level neigh in the path so
1376 * we can reinject the packet there.
1377 */
1378 n2 = NULL;
1379 if (dst) {
1380 n2 = dst_neigh_lookup_skb(dst, skb);
1381 if (n2)
1382 n1 = n2;
1383 }
David S. Miller8f40b162011-07-17 13:34:11 -07001384 n1->output(n1, skb);
David S. Miller13a43d92012-07-02 22:15:37 -07001385 if (n2)
1386 neigh_release(n2);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001387 rcu_read_unlock();
1388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 write_lock_bh(&neigh->lock);
1390 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -07001391 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001392 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
1394out:
Roopa Prabhufc6e8072018-09-22 21:26:20 -07001395 if (update_isrouter)
1396 neigh_update_is_router(neigh, flags, &notify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 write_unlock_bh(&neigh->lock);
Tom Tucker8d717402006-07-30 20:43:36 -07001398
David Aherne997f8a2018-12-11 18:57:25 -07001399 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
David Ahern9c29a2f2018-12-11 18:57:21 -07001400 neigh_update_gc_list(neigh);
1401
Tom Tucker8d717402006-07-30 20:43:36 -07001402 if (notify)
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001403 neigh_update_notify(neigh, nlmsg_pid);
Thomas Grafd961db32007-08-08 23:12:56 -07001404
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001405 trace_neigh_update_done(neigh, err);
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 return err;
1408}
David Ahern7a35a502018-12-05 20:02:29 -08001409
1410int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1411 u32 flags, u32 nlmsg_pid)
1412{
1413 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1414}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001415EXPORT_SYMBOL(neigh_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
Jiri Benc7e980562013-12-11 13:48:20 +01001417/* Update the neigh to listen temporarily for probe responses, even if it is
1418 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1419 */
1420void __neigh_set_probe_once(struct neighbour *neigh)
1421{
Julian Anastasov2c51a972015-06-16 22:56:39 +03001422 if (neigh->dead)
1423 return;
Jiri Benc7e980562013-12-11 13:48:20 +01001424 neigh->updated = jiffies;
1425 if (!(neigh->nud_state & NUD_FAILED))
1426 return;
Duan Jiong2176d5d2014-05-09 13:16:48 +08001427 neigh->nud_state = NUD_INCOMPLETE;
1428 atomic_set(&neigh->probes, neigh_max_probes(neigh));
Jiri Benc7e980562013-12-11 13:48:20 +01001429 neigh_add_timer(neigh,
1430 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1431}
1432EXPORT_SYMBOL(__neigh_set_probe_once);
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1435 u8 *lladdr, void *saddr,
1436 struct net_device *dev)
1437{
1438 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1439 lladdr || !dev->addr_len);
1440 if (neigh)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001441 neigh_update(neigh, lladdr, NUD_STALE,
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001442 NEIGH_UPDATE_F_OVERRIDE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 return neigh;
1444}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001445EXPORT_SYMBOL(neigh_event_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Eric Dumazet34d101d2010-10-11 09:16:57 -07001447/* called with read_lock_bh(&n->lock); */
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001448static void neigh_hh_init(struct neighbour *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449{
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001450 struct net_device *dev = n->dev;
1451 __be16 prot = n->tbl->protocol;
David S. Millerf6b72b622011-07-14 07:53:20 -07001452 struct hh_cache *hh = &n->hh;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001453
1454 write_lock_bh(&n->lock);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001455
David S. Millerf6b72b622011-07-14 07:53:20 -07001456 /* Only one thread can come in here and initialize the
1457 * hh_cache entry.
1458 */
David S. Millerb23b5452011-07-16 17:45:02 -07001459 if (!hh->hh_len)
1460 dev->header_ops->cache(n, hh, prot);
David S. Millerf6b72b622011-07-14 07:53:20 -07001461
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001462 write_unlock_bh(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463}
1464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465/* Slow and careful. */
1466
David S. Miller8f40b162011-07-17 13:34:11 -07001467int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 int rc = 0;
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 if (!neigh_event_send(neigh, skb)) {
1472 int err;
1473 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001474 unsigned int seq;
Eric Dumazet34d101d2010-10-11 09:16:57 -07001475
David S. Millerf6b72b622011-07-14 07:53:20 -07001476 if (dev->header_ops->cache && !neigh->hh.hh_len)
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001477 neigh_hh_init(neigh);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001478
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001479 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001480 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001481 seq = read_seqbegin(&neigh->ha_lock);
1482 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1483 neigh->ha, NULL, skb->len);
1484 } while (read_seqretry(&neigh->ha_lock, seq));
Eric Dumazet34d101d2010-10-11 09:16:57 -07001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001487 rc = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 else
1489 goto out_kfree_skb;
1490 }
1491out:
1492 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493out_kfree_skb:
1494 rc = -EINVAL;
1495 kfree_skb(skb);
1496 goto out;
1497}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001498EXPORT_SYMBOL(neigh_resolve_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500/* As fast as possible without hh cache */
1501
David S. Miller8f40b162011-07-17 13:34:11 -07001502int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001505 unsigned int seq;
David S. Miller8f40b162011-07-17 13:34:11 -07001506 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001508 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001509 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001510 seq = read_seqbegin(&neigh->ha_lock);
1511 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1512 neigh->ha, NULL, skb->len);
1513 } while (read_seqretry(&neigh->ha_lock, seq));
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001516 err = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 else {
1518 err = -EINVAL;
1519 kfree_skb(skb);
1520 }
1521 return err;
1522}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001523EXPORT_SYMBOL(neigh_connected_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
David S. Miller8f40b162011-07-17 13:34:11 -07001525int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1526{
1527 return dev_queue_xmit(skb);
1528}
1529EXPORT_SYMBOL(neigh_direct_output);
1530
Kees Cooke99e88a2017-10-16 14:43:17 -07001531static void neigh_proxy_process(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532{
Kees Cooke99e88a2017-10-16 14:43:17 -07001533 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 long sched_next = 0;
1535 unsigned long now = jiffies;
David S. Millerf72051b2008-09-23 01:11:18 -07001536 struct sk_buff *skb, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 spin_lock(&tbl->proxy_queue.lock);
1539
David S. Millerf72051b2008-09-23 01:11:18 -07001540 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1541 long tdif = NEIGH_CB(skb)->sched_next - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 if (tdif <= 0) {
David S. Millerf72051b2008-09-23 01:11:18 -07001544 struct net_device *dev = skb->dev;
Eric Dumazet20e60742011-08-22 19:32:42 +00001545
David S. Millerf72051b2008-09-23 01:11:18 -07001546 __skb_unlink(skb, &tbl->proxy_queue);
Eric Dumazet20e60742011-08-22 19:32:42 +00001547 if (tbl->proxy_redo && netif_running(dev)) {
1548 rcu_read_lock();
David S. Millerf72051b2008-09-23 01:11:18 -07001549 tbl->proxy_redo(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001550 rcu_read_unlock();
1551 } else {
David S. Millerf72051b2008-09-23 01:11:18 -07001552 kfree_skb(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
1555 dev_put(dev);
1556 } else if (!sched_next || tdif < sched_next)
1557 sched_next = tdif;
1558 }
1559 del_timer(&tbl->proxy_timer);
1560 if (sched_next)
1561 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1562 spin_unlock(&tbl->proxy_queue.lock);
1563}
1564
1565void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1566 struct sk_buff *skb)
1567{
1568 unsigned long now = jiffies;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -05001569
1570 unsigned long sched_next = now + (prandom_u32() %
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001571 NEIGH_VAR(p, PROXY_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001573 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 kfree_skb(skb);
1575 return;
1576 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001577
1578 NEIGH_CB(skb)->sched_next = sched_next;
1579 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
1581 spin_lock(&tbl->proxy_queue.lock);
1582 if (del_timer(&tbl->proxy_timer)) {
1583 if (time_before(tbl->proxy_timer.expires, sched_next))
1584 sched_next = tbl->proxy_timer.expires;
1585 }
Eric Dumazetadf30902009-06-02 05:19:30 +00001586 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 dev_hold(skb->dev);
1588 __skb_queue_tail(&tbl->proxy_queue, skb);
1589 mod_timer(&tbl->proxy_timer, sched_next);
1590 spin_unlock(&tbl->proxy_queue.lock);
1591}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001592EXPORT_SYMBOL(pneigh_enqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07001594static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
Eric W. Biederman426b5302008-01-24 00:13:18 -08001595 struct net *net, int ifindex)
1596{
1597 struct neigh_parms *p;
1598
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001599 list_for_each_entry(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001600 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
Gao feng170d6f92013-06-20 10:01:33 +08001601 (!p->dev && !ifindex && net_eq(net, &init_net)))
Eric W. Biederman426b5302008-01-24 00:13:18 -08001602 return p;
1603 }
1604
1605 return NULL;
1606}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
1608struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1609 struct neigh_table *tbl)
1610{
Gao fengcf89d6b2013-06-20 10:01:32 +08001611 struct neigh_parms *p;
Stephen Hemminger00829822008-11-20 20:14:53 -08001612 struct net *net = dev_net(dev);
1613 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Gao fengcf89d6b2013-06-20 10:01:32 +08001615 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 p->tbl = tbl;
Reshetova, Elena63439442017-06-30 13:07:56 +03001618 refcount_set(&p->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 p->reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001620 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Denis V. Lunev486b51d2008-01-14 22:59:59 -08001621 dev_hold(dev);
1622 p->dev = dev;
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -05001623 write_pnet(&p->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 p->sysctl_table = NULL;
Veaceslav Falico63134802013-08-02 19:07:38 +02001625
1626 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
Veaceslav Falico63134802013-08-02 19:07:38 +02001627 dev_put(dev);
1628 kfree(p);
1629 return NULL;
1630 }
1631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001633 list_add(&p->list, &tbl->parms.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 write_unlock_bh(&tbl->lock);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01001635
1636 neigh_parms_data_state_cleanall(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 }
1638 return p;
1639}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001640EXPORT_SYMBOL(neigh_parms_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
1642static void neigh_rcu_free_parms(struct rcu_head *head)
1643{
1644 struct neigh_parms *parms =
1645 container_of(head, struct neigh_parms, rcu_head);
1646
1647 neigh_parms_put(parms);
1648}
1649
1650void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1651{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (!parms || parms == &tbl->parms)
1653 return;
1654 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001655 list_del(&parms->list);
1656 parms->dead = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 write_unlock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001658 if (parms->dev)
1659 dev_put(parms->dev);
1660 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001662EXPORT_SYMBOL(neigh_parms_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
Denis V. Lunev06f05112008-01-24 00:30:58 -08001664static void neigh_parms_destroy(struct neigh_parms *parms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
1666 kfree(parms);
1667}
1668
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001669static struct lock_class_key neigh_table_proxy_queue_class;
1670
WANG Congd7480fd2014-11-10 15:59:36 -08001671static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1672
1673void neigh_table_init(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674{
1675 unsigned long now = jiffies;
1676 unsigned long phsize;
1677
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001678 INIT_LIST_HEAD(&tbl->parms_list);
David Ahern58956312018-12-07 12:24:57 -08001679 INIT_LIST_HEAD(&tbl->gc_list);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001680 list_add(&tbl->parms.list, &tbl->parms_list);
Eric Dumazete42ea982008-11-12 00:54:54 -08001681 write_pnet(&tbl->parms.net, &init_net);
Reshetova, Elena63439442017-06-30 13:07:56 +03001682 refcount_set(&tbl->parms.refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 tbl->parms.reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001684 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 tbl->stats = alloc_percpu(struct neigh_statistics);
1687 if (!tbl->stats)
1688 panic("cannot create neighbour cache statistics");
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690#ifdef CONFIG_PROC_FS
Christoph Hellwig71a50532018-04-15 10:16:41 +02001691 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1692 &neigh_stat_seq_ops, tbl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 panic("cannot create neighbour proc dir entry");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694#endif
1695
David S. Millercd089332011-07-11 01:28:12 -07001696 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
Andrew Morton77d04bd2006-04-07 14:52:59 -07001699 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001701 if (!tbl->nht || !tbl->phash_buckets)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 panic("cannot allocate neighbour cache hashes");
1703
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +00001704 if (!tbl->entry_size)
1705 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1706 tbl->key_len, NEIGH_PRIV_ALIGN);
1707 else
1708 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 rwlock_init(&tbl->lock);
Tejun Heo203b42f2012-08-21 13:18:23 -07001711 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
viresh kumarf6180022014-01-22 12:23:33 +05301712 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1713 tbl->parms.reachable_time);
Kees Cooke99e88a2017-10-16 14:43:17 -07001714 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001715 skb_queue_head_init_class(&tbl->proxy_queue,
1716 &neigh_table_proxy_queue_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 tbl->last_flush = now;
1719 tbl->last_rand = now + tbl->parms.reachable_time * 20;
Simon Kelleybd89efc2006-05-12 14:56:08 -07001720
WANG Congd7480fd2014-11-10 15:59:36 -08001721 neigh_tables[index] = tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001723EXPORT_SYMBOL(neigh_table_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
WANG Congd7480fd2014-11-10 15:59:36 -08001725int neigh_table_clear(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
WANG Congd7480fd2014-11-10 15:59:36 -08001727 neigh_tables[index] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 /* It is not clean... Fix it to unload IPv6 module safely */
Tejun Heoa5c30b32010-10-19 06:04:42 +00001729 cancel_delayed_work_sync(&tbl->gc_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 del_timer_sync(&tbl->proxy_timer);
1731 pneigh_queue_purge(&tbl->proxy_queue);
1732 neigh_ifdown(tbl, NULL);
1733 if (atomic_read(&tbl->entries))
Joe Perchese005d192012-05-16 19:58:40 +00001734 pr_crit("neighbour leakage\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Eric Dumazet6193d2b2011-01-19 22:02:47 +00001736 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1737 neigh_hash_free_rcu);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001738 tbl->nht = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
1740 kfree(tbl->phash_buckets);
1741 tbl->phash_buckets = NULL;
1742
Alexey Dobriyan3f192b52007-11-05 21:28:13 -08001743 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1744
Kirill Korotaev3fcde742006-09-01 01:34:10 -07001745 free_percpu(tbl->stats);
1746 tbl->stats = NULL;
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 return 0;
1749}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001750EXPORT_SYMBOL(neigh_table_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
WANG Congd7480fd2014-11-10 15:59:36 -08001752static struct neigh_table *neigh_find_table(int family)
1753{
1754 struct neigh_table *tbl = NULL;
1755
1756 switch (family) {
1757 case AF_INET:
1758 tbl = neigh_tables[NEIGH_ARP_TABLE];
1759 break;
1760 case AF_INET6:
1761 tbl = neigh_tables[NEIGH_ND_TABLE];
1762 break;
1763 case AF_DECnet:
1764 tbl = neigh_tables[NEIGH_DN_TABLE];
1765 break;
1766 }
1767
1768 return tbl;
1769}
1770
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001771const struct nla_policy nda_policy[NDA_MAX+1] = {
1772 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1773 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1774 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1775 [NDA_PROBES] = { .type = NLA_U32 },
1776 [NDA_VLAN] = { .type = NLA_U16 },
1777 [NDA_PORT] = { .type = NLA_U16 },
1778 [NDA_VNI] = { .type = NLA_U32 },
1779 [NDA_IFINDEX] = { .type = NLA_U32 },
1780 [NDA_MASTER] = { .type = NLA_U32 },
David Aherna9cd3432018-12-19 20:02:36 -08001781 [NDA_PROTOCOL] = { .type = NLA_U8 },
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001782};
1783
David Ahernc21ef3e2017-04-16 09:48:24 -07001784static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1785 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001787 struct net *net = sock_net(skb->sk);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001788 struct ndmsg *ndm;
1789 struct nlattr *dst_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 struct neigh_table *tbl;
WANG Congd7480fd2014-11-10 15:59:36 -08001791 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 struct net_device *dev = NULL;
Thomas Grafa14a49d2006-08-07 17:53:08 -07001793 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
Eric Dumazet110b2492010-10-04 04:27:36 +00001795 ASSERT_RTNL();
Thomas Grafa14a49d2006-08-07 17:53:08 -07001796 if (nlmsg_len(nlh) < sizeof(*ndm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 goto out;
1798
Thomas Grafa14a49d2006-08-07 17:53:08 -07001799 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
David Ahern7a35a502018-12-05 20:02:29 -08001800 if (!dst_attr) {
1801 NL_SET_ERR_MSG(extack, "Network address not specified");
Thomas Grafa14a49d2006-08-07 17:53:08 -07001802 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001803 }
Thomas Grafa14a49d2006-08-07 17:53:08 -07001804
1805 ndm = nlmsg_data(nlh);
1806 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001807 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001808 if (dev == NULL) {
1809 err = -ENODEV;
1810 goto out;
1811 }
1812 }
1813
WANG Congd7480fd2014-11-10 15:59:36 -08001814 tbl = neigh_find_table(ndm->ndm_family);
1815 if (tbl == NULL)
1816 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
David Ahern7a35a502018-12-05 20:02:29 -08001818 if (nla_len(dst_attr) < (int)tbl->key_len) {
1819 NL_SET_ERR_MSG(extack, "Invalid network address");
WANG Congd7480fd2014-11-10 15:59:36 -08001820 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
WANG Congd7480fd2014-11-10 15:59:36 -08001823 if (ndm->ndm_flags & NTF_PROXY) {
1824 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
Eric Dumazet110b2492010-10-04 04:27:36 +00001825 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 }
WANG Congd7480fd2014-11-10 15:59:36 -08001827
1828 if (dev == NULL)
1829 goto out;
1830
1831 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1832 if (neigh == NULL) {
1833 err = -ENOENT;
1834 goto out;
1835 }
1836
David Ahern7a35a502018-12-05 20:02:29 -08001837 err = __neigh_update(neigh, NULL, NUD_FAILED,
1838 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1839 NETLINK_CB(skb).portid, extack);
Sowmini Varadhan50710342017-06-02 09:01:49 -07001840 write_lock_bh(&tbl->lock);
WANG Congd7480fd2014-11-10 15:59:36 -08001841 neigh_release(neigh);
Sowmini Varadhan50710342017-06-02 09:01:49 -07001842 neigh_remove_one(neigh, tbl);
1843 write_unlock_bh(&tbl->lock);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845out:
1846 return err;
1847}
1848
David Ahernc21ef3e2017-04-16 09:48:24 -07001849static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1850 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07001852 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1853 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001854 struct net *net = sock_net(skb->sk);
Thomas Graf5208deb2006-08-07 17:55:40 -07001855 struct ndmsg *ndm;
1856 struct nlattr *tb[NDA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 struct neigh_table *tbl;
1858 struct net_device *dev = NULL;
WANG Congd7480fd2014-11-10 15:59:36 -08001859 struct neighbour *neigh;
1860 void *dst, *lladdr;
David Aherndf9b0e32018-12-15 14:09:06 -08001861 u8 protocol = 0;
Thomas Graf5208deb2006-08-07 17:55:40 -07001862 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Eric Dumazet110b2492010-10-04 04:27:36 +00001864 ASSERT_RTNL();
David Aherna9cd3432018-12-19 20:02:36 -08001865 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nda_policy, extack);
Thomas Graf5208deb2006-08-07 17:55:40 -07001866 if (err < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 goto out;
1868
Thomas Graf5208deb2006-08-07 17:55:40 -07001869 err = -EINVAL;
David Ahern7a35a502018-12-05 20:02:29 -08001870 if (!tb[NDA_DST]) {
1871 NL_SET_ERR_MSG(extack, "Network address not specified");
Thomas Graf5208deb2006-08-07 17:55:40 -07001872 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001873 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001874
1875 ndm = nlmsg_data(nlh);
1876 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001877 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Graf5208deb2006-08-07 17:55:40 -07001878 if (dev == NULL) {
1879 err = -ENODEV;
1880 goto out;
1881 }
1882
David Ahern7a35a502018-12-05 20:02:29 -08001883 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1884 NL_SET_ERR_MSG(extack, "Invalid link address");
Eric Dumazet110b2492010-10-04 04:27:36 +00001885 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001886 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001887 }
1888
WANG Congd7480fd2014-11-10 15:59:36 -08001889 tbl = neigh_find_table(ndm->ndm_family);
1890 if (tbl == NULL)
1891 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
David Ahern7a35a502018-12-05 20:02:29 -08001893 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1894 NL_SET_ERR_MSG(extack, "Invalid network address");
WANG Congd7480fd2014-11-10 15:59:36 -08001895 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001896 }
1897
WANG Congd7480fd2014-11-10 15:59:36 -08001898 dst = nla_data(tb[NDA_DST]);
1899 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
David Aherna9cd3432018-12-19 20:02:36 -08001901 if (tb[NDA_PROTOCOL])
David Aherndf9b0e32018-12-15 14:09:06 -08001902 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
David Aherndf9b0e32018-12-15 14:09:06 -08001903
WANG Congd7480fd2014-11-10 15:59:36 -08001904 if (ndm->ndm_flags & NTF_PROXY) {
1905 struct pneigh_entry *pn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
WANG Congd7480fd2014-11-10 15:59:36 -08001907 err = -ENOBUFS;
1908 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1909 if (pn) {
1910 pn->flags = ndm->ndm_flags;
David Aherndf9b0e32018-12-15 14:09:06 -08001911 if (protocol)
1912 pn->protocol = protocol;
Eric Biederman0c5c2d32009-03-04 00:03:08 -08001913 err = 0;
WANG Congd7480fd2014-11-10 15:59:36 -08001914 }
Eric Dumazet110b2492010-10-04 04:27:36 +00001915 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 }
1917
David Ahern7a35a502018-12-05 20:02:29 -08001918 if (!dev) {
1919 NL_SET_ERR_MSG(extack, "Device not specified");
WANG Congd7480fd2014-11-10 15:59:36 -08001920 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001921 }
WANG Congd7480fd2014-11-10 15:59:36 -08001922
David Ahernb8fb1ab2019-04-16 17:31:43 -07001923 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1924 err = -EINVAL;
1925 goto out;
1926 }
1927
WANG Congd7480fd2014-11-10 15:59:36 -08001928 neigh = neigh_lookup(tbl, dst, dev);
1929 if (neigh == NULL) {
David Aherne997f8a2018-12-11 18:57:25 -07001930 bool exempt_from_gc;
1931
WANG Congd7480fd2014-11-10 15:59:36 -08001932 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1933 err = -ENOENT;
1934 goto out;
1935 }
1936
David Aherne997f8a2018-12-11 18:57:25 -07001937 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1938 ndm->ndm_flags & NTF_EXT_LEARNED;
1939 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
WANG Congd7480fd2014-11-10 15:59:36 -08001940 if (IS_ERR(neigh)) {
1941 err = PTR_ERR(neigh);
1942 goto out;
1943 }
1944 } else {
1945 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1946 err = -EEXIST;
1947 neigh_release(neigh);
1948 goto out;
1949 }
1950
1951 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07001952 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1953 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
WANG Congd7480fd2014-11-10 15:59:36 -08001954 }
1955
Roopa Prabhu9ce33e42018-04-24 13:49:34 -07001956 if (ndm->ndm_flags & NTF_EXT_LEARNED)
1957 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1958
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07001959 if (ndm->ndm_flags & NTF_ROUTER)
1960 flags |= NEIGH_UPDATE_F_ISROUTER;
1961
WANG Congd7480fd2014-11-10 15:59:36 -08001962 if (ndm->ndm_flags & NTF_USE) {
1963 neigh_event_send(neigh, NULL);
1964 err = 0;
1965 } else
David Ahern7a35a502018-12-05 20:02:29 -08001966 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1967 NETLINK_CB(skb).portid, extack);
David Aherndf9b0e32018-12-15 14:09:06 -08001968
1969 if (protocol)
1970 neigh->protocol = protocol;
1971
WANG Congd7480fd2014-11-10 15:59:36 -08001972 neigh_release(neigh);
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974out:
1975 return err;
1976}
1977
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001978static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1979{
Thomas Grafca860fb2006-08-07 18:00:18 -07001980 struct nlattr *nest;
1981
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001982 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
Thomas Grafca860fb2006-08-07 18:00:18 -07001983 if (nest == NULL)
1984 return -ENOBUFS;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07001985
David S. Miller9a6308d2012-04-01 20:06:28 -04001986 if ((parms->dev &&
1987 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
Reshetova, Elena63439442017-06-30 13:07:56 +03001988 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001989 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1990 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04001991 /* approximative value for deprecated QUEUE_LEN (in packets) */
1992 nla_put_u32(skb, NDTPA_QUEUE_LEN,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001993 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1994 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1995 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1996 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1997 NEIGH_VAR(parms, UCAST_PROBES)) ||
1998 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1999 NEIGH_VAR(parms, MCAST_PROBES)) ||
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002000 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2001 NEIGH_VAR(parms, MCAST_REPROBES)) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002002 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2003 NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002004 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002005 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002006 nla_put_msecs(skb, NDTPA_GC_STALETIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002007 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002008 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002009 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002010 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002011 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002012 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002013 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002014 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002015 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002016 nla_put_msecs(skb, NDTPA_LOCKTIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002017 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04002018 goto nla_put_failure;
Thomas Grafca860fb2006-08-07 18:00:18 -07002019 return nla_nest_end(skb, nest);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002020
Thomas Grafca860fb2006-08-07 18:00:18 -07002021nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -07002022 nla_nest_cancel(skb, nest);
2023 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002024}
2025
Thomas Grafca860fb2006-08-07 18:00:18 -07002026static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2027 u32 pid, u32 seq, int type, int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002028{
2029 struct nlmsghdr *nlh;
2030 struct ndtmsg *ndtmsg;
2031
Thomas Grafca860fb2006-08-07 18:00:18 -07002032 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2033 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002034 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002035
Thomas Grafca860fb2006-08-07 18:00:18 -07002036 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002037
2038 read_lock_bh(&tbl->lock);
2039 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002040 ndtmsg->ndtm_pad1 = 0;
2041 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002042
David S. Miller9a6308d2012-04-01 20:06:28 -04002043 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002044 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002045 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2046 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2047 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2048 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002049 {
2050 unsigned long now = jiffies;
2051 unsigned int flush_delta = now - tbl->last_flush;
2052 unsigned int rand_delta = now - tbl->last_rand;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002053 struct neigh_hash_table *nht;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002054 struct ndt_config ndc = {
2055 .ndtc_key_len = tbl->key_len,
2056 .ndtc_entry_size = tbl->entry_size,
2057 .ndtc_entries = atomic_read(&tbl->entries),
2058 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2059 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002060 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2061 };
2062
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002063 rcu_read_lock_bh();
2064 nht = rcu_dereference_bh(tbl->nht);
David S. Miller2c2aba62011-12-28 15:06:58 -05002065 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
David S. Millercd089332011-07-11 01:28:12 -07002066 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002067 rcu_read_unlock_bh();
2068
David S. Miller9a6308d2012-04-01 20:06:28 -04002069 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2070 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002071 }
2072
2073 {
2074 int cpu;
2075 struct ndt_stats ndst;
2076
2077 memset(&ndst, 0, sizeof(ndst));
2078
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07002079 for_each_possible_cpu(cpu) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002080 struct neigh_statistics *st;
2081
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002082 st = per_cpu_ptr(tbl->stats, cpu);
2083 ndst.ndts_allocs += st->allocs;
2084 ndst.ndts_destroys += st->destroys;
2085 ndst.ndts_hash_grows += st->hash_grows;
2086 ndst.ndts_res_failed += st->res_failed;
2087 ndst.ndts_lookups += st->lookups;
2088 ndst.ndts_hits += st->hits;
2089 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2090 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2091 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2092 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
Rick Jonesfb811392015-08-07 11:10:37 -07002093 ndst.ndts_table_fulls += st->table_fulls;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002094 }
2095
Nicolas Dichtelb6763382016-04-26 10:06:17 +02002096 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2097 NDTA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04002098 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002099 }
2100
2101 BUG_ON(tbl->parms.dev);
2102 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
Thomas Grafca860fb2006-08-07 18:00:18 -07002103 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002104
2105 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01002106 nlmsg_end(skb, nlh);
2107 return 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002108
Thomas Grafca860fb2006-08-07 18:00:18 -07002109nla_put_failure:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002110 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08002111 nlmsg_cancel(skb, nlh);
2112 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002113}
2114
Thomas Grafca860fb2006-08-07 18:00:18 -07002115static int neightbl_fill_param_info(struct sk_buff *skb,
2116 struct neigh_table *tbl,
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002117 struct neigh_parms *parms,
Thomas Grafca860fb2006-08-07 18:00:18 -07002118 u32 pid, u32 seq, int type,
2119 unsigned int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002120{
2121 struct ndtmsg *ndtmsg;
2122 struct nlmsghdr *nlh;
2123
Thomas Grafca860fb2006-08-07 18:00:18 -07002124 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2125 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002126 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002127
Thomas Grafca860fb2006-08-07 18:00:18 -07002128 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002129
2130 read_lock_bh(&tbl->lock);
2131 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002132 ndtmsg->ndtm_pad1 = 0;
2133 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002134
Thomas Grafca860fb2006-08-07 18:00:18 -07002135 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2136 neightbl_fill_parms(skb, parms) < 0)
2137 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002138
2139 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01002140 nlmsg_end(skb, nlh);
2141 return 0;
Thomas Grafca860fb2006-08-07 18:00:18 -07002142errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002143 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08002144 nlmsg_cancel(skb, nlh);
2145 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002146}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002147
Patrick McHardyef7c79e2007-06-05 12:38:30 -07002148static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07002149 [NDTA_NAME] = { .type = NLA_STRING },
2150 [NDTA_THRESH1] = { .type = NLA_U32 },
2151 [NDTA_THRESH2] = { .type = NLA_U32 },
2152 [NDTA_THRESH3] = { .type = NLA_U32 },
2153 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2154 [NDTA_PARMS] = { .type = NLA_NESTED },
2155};
2156
Patrick McHardyef7c79e2007-06-05 12:38:30 -07002157static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07002158 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2159 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2160 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2161 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2162 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2163 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002164 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
Thomas Graf6b3f8672006-08-07 17:58:53 -07002165 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2166 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2167 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2168 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2169 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2170 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2171 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2172};
2173
David Ahernc21ef3e2017-04-16 09:48:24 -07002174static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2175 struct netlink_ext_ack *extack)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002176{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002177 struct net *net = sock_net(skb->sk);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002178 struct neigh_table *tbl;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002179 struct ndtmsg *ndtmsg;
2180 struct nlattr *tb[NDTA_MAX+1];
WANG Congd7480fd2014-11-10 15:59:36 -08002181 bool found = false;
2182 int err, tidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002183
Thomas Graf6b3f8672006-08-07 17:58:53 -07002184 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
David Ahernc21ef3e2017-04-16 09:48:24 -07002185 nl_neightbl_policy, extack);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002186 if (err < 0)
2187 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002188
Thomas Graf6b3f8672006-08-07 17:58:53 -07002189 if (tb[NDTA_NAME] == NULL) {
2190 err = -EINVAL;
2191 goto errout;
2192 }
2193
2194 ndtmsg = nlmsg_data(nlh);
WANG Congd7480fd2014-11-10 15:59:36 -08002195
2196 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2197 tbl = neigh_tables[tidx];
2198 if (!tbl)
2199 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002200 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2201 continue;
WANG Congd7480fd2014-11-10 15:59:36 -08002202 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2203 found = true;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002204 break;
WANG Congd7480fd2014-11-10 15:59:36 -08002205 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002206 }
2207
WANG Congd7480fd2014-11-10 15:59:36 -08002208 if (!found)
2209 return -ENOENT;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002210
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002211 /*
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002212 * We acquire tbl->lock to be nice to the periodic timers and
2213 * make sure they always see a consistent set of values.
2214 */
2215 write_lock_bh(&tbl->lock);
2216
Thomas Graf6b3f8672006-08-07 17:58:53 -07002217 if (tb[NDTA_PARMS]) {
2218 struct nlattr *tbp[NDTPA_MAX+1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002219 struct neigh_parms *p;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002220 int i, ifindex = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002221
Thomas Graf6b3f8672006-08-07 17:58:53 -07002222 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
David Ahernc21ef3e2017-04-16 09:48:24 -07002223 nl_ntbl_parm_policy, extack);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002224 if (err < 0)
2225 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002226
Thomas Graf6b3f8672006-08-07 17:58:53 -07002227 if (tbp[NDTPA_IFINDEX])
2228 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002229
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07002230 p = lookup_neigh_parms(tbl, net, ifindex);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002231 if (p == NULL) {
2232 err = -ENOENT;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002233 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002234 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002235
Thomas Graf6b3f8672006-08-07 17:58:53 -07002236 for (i = 1; i <= NDTPA_MAX; i++) {
2237 if (tbp[i] == NULL)
2238 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002239
Thomas Graf6b3f8672006-08-07 17:58:53 -07002240 switch (i) {
2241 case NDTPA_QUEUE_LEN:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002242 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2243 nla_get_u32(tbp[i]) *
2244 SKB_TRUESIZE(ETH_FRAME_LEN));
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002245 break;
2246 case NDTPA_QUEUE_LENBYTES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002247 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2248 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002249 break;
2250 case NDTPA_PROXY_QLEN:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002251 NEIGH_VAR_SET(p, PROXY_QLEN,
2252 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002253 break;
2254 case NDTPA_APP_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002255 NEIGH_VAR_SET(p, APP_PROBES,
2256 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002257 break;
2258 case NDTPA_UCAST_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002259 NEIGH_VAR_SET(p, UCAST_PROBES,
2260 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002261 break;
2262 case NDTPA_MCAST_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002263 NEIGH_VAR_SET(p, MCAST_PROBES,
2264 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002265 break;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002266 case NDTPA_MCAST_REPROBES:
2267 NEIGH_VAR_SET(p, MCAST_REPROBES,
2268 nla_get_u32(tbp[i]));
2269 break;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002270 case NDTPA_BASE_REACHABLE_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002271 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2272 nla_get_msecs(tbp[i]));
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01002273 /* update reachable_time as well, otherwise, the change will
2274 * only be effective after the next time neigh_periodic_work
2275 * decides to recompute it (can be multiple minutes)
2276 */
2277 p->reachable_time =
2278 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002279 break;
2280 case NDTPA_GC_STALETIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002281 NEIGH_VAR_SET(p, GC_STALETIME,
2282 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002283 break;
2284 case NDTPA_DELAY_PROBE_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002285 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2286 nla_get_msecs(tbp[i]));
Ido Schimmel2a4501a2016-07-05 11:27:42 +02002287 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002288 break;
2289 case NDTPA_RETRANS_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002290 NEIGH_VAR_SET(p, RETRANS_TIME,
2291 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002292 break;
2293 case NDTPA_ANYCAST_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002294 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2295 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002296 break;
2297 case NDTPA_PROXY_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002298 NEIGH_VAR_SET(p, PROXY_DELAY,
2299 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002300 break;
2301 case NDTPA_LOCKTIME:
Jiri Pirko39774582014-01-14 15:46:07 +01002302 NEIGH_VAR_SET(p, LOCKTIME,
2303 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002304 break;
2305 }
2306 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002307 }
2308
Gao fengdc25c672013-06-20 10:01:34 +08002309 err = -ENOENT;
2310 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2311 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2312 !net_eq(net, &init_net))
2313 goto errout_tbl_lock;
2314
Thomas Graf6b3f8672006-08-07 17:58:53 -07002315 if (tb[NDTA_THRESH1])
2316 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2317
2318 if (tb[NDTA_THRESH2])
2319 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2320
2321 if (tb[NDTA_THRESH3])
2322 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2323
2324 if (tb[NDTA_GC_INTERVAL])
2325 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2326
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002327 err = 0;
2328
Thomas Graf6b3f8672006-08-07 17:58:53 -07002329errout_tbl_lock:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002330 write_unlock_bh(&tbl->lock);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002331errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002332 return err;
2333}
2334
David Ahern9632d472018-10-07 20:16:37 -07002335static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2336 struct netlink_ext_ack *extack)
2337{
2338 struct ndtmsg *ndtm;
2339
2340 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2341 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2342 return -EINVAL;
2343 }
2344
2345 ndtm = nlmsg_data(nlh);
2346 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2347 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2348 return -EINVAL;
2349 }
2350
2351 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2352 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2353 return -EINVAL;
2354 }
2355
2356 return 0;
2357}
2358
Thomas Grafc8822a42007-03-22 11:50:06 -07002359static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002360{
David Ahern9632d472018-10-07 20:16:37 -07002361 const struct nlmsghdr *nlh = cb->nlh;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002362 struct net *net = sock_net(skb->sk);
Thomas Grafca860fb2006-08-07 18:00:18 -07002363 int family, tidx, nidx = 0;
2364 int tbl_skip = cb->args[0];
2365 int neigh_skip = cb->args[1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002366 struct neigh_table *tbl;
2367
David Ahern9632d472018-10-07 20:16:37 -07002368 if (cb->strict_check) {
2369 int err = neightbl_valid_dump_info(nlh, cb->extack);
2370
2371 if (err < 0)
2372 return err;
2373 }
2374
2375 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002376
WANG Congd7480fd2014-11-10 15:59:36 -08002377 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002378 struct neigh_parms *p;
2379
WANG Congd7480fd2014-11-10 15:59:36 -08002380 tbl = neigh_tables[tidx];
2381 if (!tbl)
2382 continue;
2383
Thomas Grafca860fb2006-08-07 18:00:18 -07002384 if (tidx < tbl_skip || (family && tbl->family != family))
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002385 continue;
2386
Eric W. Biederman15e47302012-09-07 20:12:54 +00002387 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
David Ahern9632d472018-10-07 20:16:37 -07002388 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002389 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002390 break;
2391
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01002392 nidx = 0;
2393 p = list_next_entry(&tbl->parms, list);
2394 list_for_each_entry_from(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002395 if (!net_eq(neigh_parms_net(p), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002396 continue;
2397
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002398 if (nidx < neigh_skip)
2399 goto next;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002400
Thomas Grafca860fb2006-08-07 18:00:18 -07002401 if (neightbl_fill_param_info(skb, tbl, p,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002402 NETLINK_CB(cb->skb).portid,
David Ahern9632d472018-10-07 20:16:37 -07002403 nlh->nlmsg_seq,
Thomas Grafca860fb2006-08-07 18:00:18 -07002404 RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002405 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002406 goto out;
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002407 next:
2408 nidx++;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002409 }
2410
Thomas Grafca860fb2006-08-07 18:00:18 -07002411 neigh_skip = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002412 }
2413out:
Thomas Grafca860fb2006-08-07 18:00:18 -07002414 cb->args[0] = tidx;
2415 cb->args[1] = nidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002416
2417 return skb->len;
2418}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
Thomas Graf8b8aec52006-08-07 17:56:37 -07002420static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2421 u32 pid, u32 seq, int type, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422{
2423 unsigned long now = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 struct nda_cacheinfo ci;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002425 struct nlmsghdr *nlh;
2426 struct ndmsg *ndm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
Thomas Graf8b8aec52006-08-07 17:56:37 -07002428 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2429 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002430 return -EMSGSIZE;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002431
2432 ndm = nlmsg_data(nlh);
2433 ndm->ndm_family = neigh->ops->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002434 ndm->ndm_pad1 = 0;
2435 ndm->ndm_pad2 = 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002436 ndm->ndm_flags = neigh->flags;
2437 ndm->ndm_type = neigh->type;
2438 ndm->ndm_ifindex = neigh->dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
David S. Miller9a6308d2012-04-01 20:06:28 -04002440 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2441 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002442
2443 read_lock_bh(&neigh->lock);
2444 ndm->ndm_state = neigh->nud_state;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00002445 if (neigh->nud_state & NUD_VALID) {
2446 char haddr[MAX_ADDR_LEN];
2447
2448 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2449 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2450 read_unlock_bh(&neigh->lock);
2451 goto nla_put_failure;
2452 }
Thomas Graf8b8aec52006-08-07 17:56:37 -07002453 }
2454
Stephen Hemmingerb9f5f522008-06-03 16:03:15 -07002455 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2456 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2457 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
Reshetova, Elena9f237432017-06-30 13:07:55 +03002458 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002459 read_unlock_bh(&neigh->lock);
2460
David S. Miller9a6308d2012-04-01 20:06:28 -04002461 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2462 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2463 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002464
David Aherndf9b0e32018-12-15 14:09:06 -08002465 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2466 goto nla_put_failure;
2467
Johannes Berg053c0952015-01-16 22:09:00 +01002468 nlmsg_end(skb, nlh);
2469 return 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002470
2471nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002472 nlmsg_cancel(skb, nlh);
2473 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475
Tony Zelenoff84920c12012-01-26 22:28:58 +00002476static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2477 u32 pid, u32 seq, int type, unsigned int flags,
2478 struct neigh_table *tbl)
2479{
2480 struct nlmsghdr *nlh;
2481 struct ndmsg *ndm;
2482
2483 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2484 if (nlh == NULL)
2485 return -EMSGSIZE;
2486
2487 ndm = nlmsg_data(nlh);
2488 ndm->ndm_family = tbl->family;
2489 ndm->ndm_pad1 = 0;
2490 ndm->ndm_pad2 = 0;
2491 ndm->ndm_flags = pn->flags | NTF_PROXY;
Jun Zhao545469f2014-07-26 00:38:59 +08002492 ndm->ndm_type = RTN_UNICAST;
Konstantin Khlebnikov6adc5fd2015-12-01 01:14:48 +03002493 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002494 ndm->ndm_state = NUD_NONE;
2495
David S. Miller9a6308d2012-04-01 20:06:28 -04002496 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2497 goto nla_put_failure;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002498
David Aherndf9b0e32018-12-15 14:09:06 -08002499 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2500 goto nla_put_failure;
2501
Johannes Berg053c0952015-01-16 22:09:00 +01002502 nlmsg_end(skb, nlh);
2503 return 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002504
2505nla_put_failure:
2506 nlmsg_cancel(skb, nlh);
2507 return -EMSGSIZE;
2508}
2509
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07002510static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
Thomas Grafd961db32007-08-08 23:12:56 -07002511{
2512 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07002513 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
Thomas Grafd961db32007-08-08 23:12:56 -07002514}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
David Ahern21fdd092015-09-29 09:32:03 -07002516static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2517{
2518 struct net_device *master;
2519
2520 if (!master_idx)
2521 return false;
2522
Eric Dumazetaab456d2018-10-26 09:33:27 -07002523 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
David Ahern21fdd092015-09-29 09:32:03 -07002524 if (!master || master->ifindex != master_idx)
2525 return true;
2526
2527 return false;
2528}
2529
David Ahern16660f02015-10-03 11:43:46 -07002530static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2531{
Eric Dumazetaab456d2018-10-26 09:33:27 -07002532 if (filter_idx && (!dev || dev->ifindex != filter_idx))
David Ahern16660f02015-10-03 11:43:46 -07002533 return true;
2534
2535 return false;
2536}
2537
David Ahern6f52f802018-10-03 15:33:12 -07002538struct neigh_dump_filter {
2539 int master_idx;
2540 int dev_idx;
2541};
2542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
David Ahern6f52f802018-10-03 15:33:12 -07002544 struct netlink_callback *cb,
2545 struct neigh_dump_filter *filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546{
Eric Dumazet767e97e2010-10-06 17:49:21 -07002547 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 struct neighbour *n;
2549 int rc, h, s_h = cb->args[1];
2550 int idx, s_idx = idx = cb->args[2];
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002551 struct neigh_hash_table *nht;
David Ahern21fdd092015-09-29 09:32:03 -07002552 unsigned int flags = NLM_F_MULTI;
David Ahern21fdd092015-09-29 09:32:03 -07002553
David Ahern6f52f802018-10-03 15:33:12 -07002554 if (filter->dev_idx || filter->master_idx)
2555 flags |= NLM_F_DUMP_FILTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002557 rcu_read_lock_bh();
2558 nht = rcu_dereference_bh(tbl->nht);
2559
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002560 for (h = s_h; h < (1 << nht->hash_shift); h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 if (h > s_h)
2562 s_idx = 0;
Eric Dumazet767e97e2010-10-06 17:49:21 -07002563 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2564 n != NULL;
2565 n = rcu_dereference_bh(n->next)) {
Zhang Shengju18502ac2016-11-30 11:24:42 +08002566 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2567 goto next;
David Ahern6f52f802018-10-03 15:33:12 -07002568 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2569 neigh_master_filtered(n->dev, filter->master_idx))
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002570 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002571 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 cb->nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002573 RTM_NEWNEIGH,
David Ahern21fdd092015-09-29 09:32:03 -07002574 flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 rc = -1;
2576 goto out;
2577 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07002578next:
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002579 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 }
2582 rc = skb->len;
2583out:
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002584 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 cb->args[1] = h;
2586 cb->args[2] = idx;
2587 return rc;
2588}
2589
Tony Zelenoff84920c12012-01-26 22:28:58 +00002590static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
David Ahern6f52f802018-10-03 15:33:12 -07002591 struct netlink_callback *cb,
2592 struct neigh_dump_filter *filter)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002593{
2594 struct pneigh_entry *n;
2595 struct net *net = sock_net(skb->sk);
2596 int rc, h, s_h = cb->args[3];
2597 int idx, s_idx = idx = cb->args[4];
David Ahern6f52f802018-10-03 15:33:12 -07002598 unsigned int flags = NLM_F_MULTI;
2599
2600 if (filter->dev_idx || filter->master_idx)
2601 flags |= NLM_F_DUMP_FILTERED;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002602
2603 read_lock_bh(&tbl->lock);
2604
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002605 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002606 if (h > s_h)
2607 s_idx = 0;
2608 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
Zhang Shengju18502ac2016-11-30 11:24:42 +08002609 if (idx < s_idx || pneigh_net(n) != net)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002610 goto next;
David Ahern6f52f802018-10-03 15:33:12 -07002611 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2612 neigh_master_filtered(n->dev, filter->master_idx))
2613 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002614 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Tony Zelenoff84920c12012-01-26 22:28:58 +00002615 cb->nlh->nlmsg_seq,
David Ahern6f52f802018-10-03 15:33:12 -07002616 RTM_NEWNEIGH, flags, tbl) < 0) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002617 read_unlock_bh(&tbl->lock);
2618 rc = -1;
2619 goto out;
2620 }
2621 next:
2622 idx++;
2623 }
2624 }
2625
2626 read_unlock_bh(&tbl->lock);
2627 rc = skb->len;
2628out:
2629 cb->args[3] = h;
2630 cb->args[4] = idx;
2631 return rc;
2632
2633}
2634
David Ahern51183d22018-10-07 20:16:36 -07002635static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2636 bool strict_check,
2637 struct neigh_dump_filter *filter,
2638 struct netlink_ext_ack *extack)
2639{
2640 struct nlattr *tb[NDA_MAX + 1];
2641 int err, i;
2642
2643 if (strict_check) {
2644 struct ndmsg *ndm;
2645
2646 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2647 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2648 return -EINVAL;
2649 }
2650
2651 ndm = nlmsg_data(nlh);
2652 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
David Ahernc0fde872018-12-19 16:54:38 -08002653 ndm->ndm_state || ndm->ndm_type) {
David Ahern51183d22018-10-07 20:16:36 -07002654 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2655 return -EINVAL;
2656 }
2657
David Ahernc0fde872018-12-19 16:54:38 -08002658 if (ndm->ndm_flags & ~NTF_PROXY) {
2659 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2660 return -EINVAL;
2661 }
2662
David Ahern51183d22018-10-07 20:16:36 -07002663 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
David Aherna9cd3432018-12-19 20:02:36 -08002664 nda_policy, extack);
David Ahern51183d22018-10-07 20:16:36 -07002665 } else {
2666 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
David Aherna9cd3432018-12-19 20:02:36 -08002667 nda_policy, extack);
David Ahern51183d22018-10-07 20:16:36 -07002668 }
2669 if (err < 0)
2670 return err;
2671
2672 for (i = 0; i <= NDA_MAX; ++i) {
2673 if (!tb[i])
2674 continue;
2675
2676 /* all new attributes should require strict_check */
2677 switch (i) {
2678 case NDA_IFINDEX:
David Ahern51183d22018-10-07 20:16:36 -07002679 filter->dev_idx = nla_get_u32(tb[i]);
2680 break;
2681 case NDA_MASTER:
David Ahern51183d22018-10-07 20:16:36 -07002682 filter->master_idx = nla_get_u32(tb[i]);
2683 break;
2684 default:
2685 if (strict_check) {
2686 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2687 return -EINVAL;
2688 }
2689 }
2690 }
2691
2692 return 0;
2693}
2694
Thomas Grafc8822a42007-03-22 11:50:06 -07002695static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696{
David Ahern6f52f802018-10-03 15:33:12 -07002697 const struct nlmsghdr *nlh = cb->nlh;
2698 struct neigh_dump_filter filter = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 struct neigh_table *tbl;
2700 int t, family, s_t;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002701 int proxy = 0;
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002702 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
David Ahern6f52f802018-10-03 15:33:12 -07002704 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002705
2706 /* check for full ndmsg structure presence, family member is
2707 * the same for both structures
2708 */
David Ahern6f52f802018-10-03 15:33:12 -07002709 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2710 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002711 proxy = 1;
2712
David Ahern51183d22018-10-07 20:16:36 -07002713 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2714 if (err < 0 && cb->strict_check)
2715 return err;
2716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 s_t = cb->args[0];
2718
WANG Congd7480fd2014-11-10 15:59:36 -08002719 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2720 tbl = neigh_tables[t];
2721
2722 if (!tbl)
2723 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 if (t < s_t || (family && tbl->family != family))
2725 continue;
2726 if (t > s_t)
2727 memset(&cb->args[1], 0, sizeof(cb->args) -
2728 sizeof(cb->args[0]));
Tony Zelenoff84920c12012-01-26 22:28:58 +00002729 if (proxy)
David Ahern6f52f802018-10-03 15:33:12 -07002730 err = pneigh_dump_table(tbl, skb, cb, &filter);
Tony Zelenoff84920c12012-01-26 22:28:58 +00002731 else
David Ahern6f52f802018-10-03 15:33:12 -07002732 err = neigh_dump_table(tbl, skb, cb, &filter);
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002733 if (err < 0)
2734 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
2737 cb->args[0] = t;
2738 return skb->len;
2739}
2740
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002741static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2742 struct neigh_table **tbl,
2743 void **dst, int *dev_idx, u8 *ndm_flags,
2744 struct netlink_ext_ack *extack)
2745{
2746 struct nlattr *tb[NDA_MAX + 1];
2747 struct ndmsg *ndm;
2748 int err, i;
2749
2750 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2751 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2752 return -EINVAL;
2753 }
2754
2755 ndm = nlmsg_data(nlh);
2756 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2757 ndm->ndm_type) {
2758 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2759 return -EINVAL;
2760 }
2761
2762 if (ndm->ndm_flags & ~NTF_PROXY) {
2763 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2764 return -EINVAL;
2765 }
2766
2767 err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
2768 nda_policy, extack);
2769 if (err < 0)
2770 return err;
2771
2772 *ndm_flags = ndm->ndm_flags;
2773 *dev_idx = ndm->ndm_ifindex;
2774 *tbl = neigh_find_table(ndm->ndm_family);
2775 if (*tbl == NULL) {
2776 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2777 return -EAFNOSUPPORT;
2778 }
2779
2780 for (i = 0; i <= NDA_MAX; ++i) {
2781 if (!tb[i])
2782 continue;
2783
2784 switch (i) {
2785 case NDA_DST:
2786 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2787 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2788 return -EINVAL;
2789 }
2790 *dst = nla_data(tb[i]);
2791 break;
2792 default:
2793 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2794 return -EINVAL;
2795 }
2796 }
2797
2798 return 0;
2799}
2800
2801static inline size_t neigh_nlmsg_size(void)
2802{
2803 return NLMSG_ALIGN(sizeof(struct ndmsg))
2804 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2805 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2806 + nla_total_size(sizeof(struct nda_cacheinfo))
2807 + nla_total_size(4) /* NDA_PROBES */
2808 + nla_total_size(1); /* NDA_PROTOCOL */
2809}
2810
2811static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2812 u32 pid, u32 seq)
2813{
2814 struct sk_buff *skb;
2815 int err = 0;
2816
2817 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2818 if (!skb)
2819 return -ENOBUFS;
2820
2821 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2822 if (err) {
2823 kfree_skb(skb);
2824 goto errout;
2825 }
2826
2827 err = rtnl_unicast(skb, net, pid);
2828errout:
2829 return err;
2830}
2831
2832static inline size_t pneigh_nlmsg_size(void)
2833{
2834 return NLMSG_ALIGN(sizeof(struct ndmsg))
Colin Ian King463561e2018-12-20 16:50:50 +00002835 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002836 + nla_total_size(1); /* NDA_PROTOCOL */
2837}
2838
2839static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2840 u32 pid, u32 seq, struct neigh_table *tbl)
2841{
2842 struct sk_buff *skb;
2843 int err = 0;
2844
2845 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2846 if (!skb)
2847 return -ENOBUFS;
2848
2849 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2850 if (err) {
2851 kfree_skb(skb);
2852 goto errout;
2853 }
2854
2855 err = rtnl_unicast(skb, net, pid);
2856errout:
2857 return err;
2858}
2859
2860static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2861 struct netlink_ext_ack *extack)
2862{
2863 struct net *net = sock_net(in_skb->sk);
2864 struct net_device *dev = NULL;
2865 struct neigh_table *tbl = NULL;
2866 struct neighbour *neigh;
2867 void *dst = NULL;
2868 u8 ndm_flags = 0;
2869 int dev_idx = 0;
2870 int err;
2871
2872 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2873 extack);
2874 if (err < 0)
2875 return err;
2876
2877 if (dev_idx) {
2878 dev = __dev_get_by_index(net, dev_idx);
2879 if (!dev) {
2880 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2881 return -ENODEV;
2882 }
2883 }
2884
2885 if (!dst) {
2886 NL_SET_ERR_MSG(extack, "Network address not specified");
2887 return -EINVAL;
2888 }
2889
2890 if (ndm_flags & NTF_PROXY) {
2891 struct pneigh_entry *pn;
2892
2893 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2894 if (!pn) {
2895 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2896 return -ENOENT;
2897 }
2898 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2899 nlh->nlmsg_seq, tbl);
2900 }
2901
2902 if (!dev) {
2903 NL_SET_ERR_MSG(extack, "No device specified");
2904 return -EINVAL;
2905 }
2906
2907 neigh = neigh_lookup(tbl, dst, dev);
2908 if (!neigh) {
2909 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2910 return -ENOENT;
2911 }
2912
2913 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2914 nlh->nlmsg_seq);
2915
2916 neigh_release(neigh);
2917
2918 return err;
2919}
2920
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2922{
2923 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002924 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002926 rcu_read_lock_bh();
2927 nht = rcu_dereference_bh(tbl->nht);
2928
Eric Dumazet767e97e2010-10-06 17:49:21 -07002929 read_lock(&tbl->lock); /* avoid resizes */
David S. Millercd089332011-07-11 01:28:12 -07002930 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 struct neighbour *n;
2932
Eric Dumazet767e97e2010-10-06 17:49:21 -07002933 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2934 n != NULL;
2935 n = rcu_dereference_bh(n->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 cb(n, cookie);
2937 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002938 read_unlock(&tbl->lock);
2939 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940}
2941EXPORT_SYMBOL(neigh_for_each);
2942
2943/* The tbl->lock must be held as a writer and BH disabled. */
2944void __neigh_for_each_release(struct neigh_table *tbl,
2945 int (*cb)(struct neighbour *))
2946{
2947 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002948 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002950 nht = rcu_dereference_protected(tbl->nht,
2951 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -07002952 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07002953 struct neighbour *n;
2954 struct neighbour __rcu **np;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002956 np = &nht->hash_buckets[chain];
Eric Dumazet767e97e2010-10-06 17:49:21 -07002957 while ((n = rcu_dereference_protected(*np,
2958 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 int release;
2960
2961 write_lock(&n->lock);
2962 release = cb(n);
2963 if (release) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07002964 rcu_assign_pointer(*np,
2965 rcu_dereference_protected(n->next,
2966 lockdep_is_held(&tbl->lock)));
David Ahern58956312018-12-07 12:24:57 -08002967 neigh_mark_dead(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 } else
2969 np = &n->next;
2970 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -07002971 if (release)
2972 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 }
2974 }
2975}
2976EXPORT_SYMBOL(__neigh_for_each_release);
2977
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002978int neigh_xmit(int index, struct net_device *dev,
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002979 const void *addr, struct sk_buff *skb)
2980{
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002981 int err = -EAFNOSUPPORT;
2982 if (likely(index < NEIGH_NR_TABLES)) {
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002983 struct neigh_table *tbl;
2984 struct neighbour *neigh;
2985
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06002986 tbl = neigh_tables[index];
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002987 if (!tbl)
2988 goto out;
David Barrosob560f032016-06-28 11:16:43 +03002989 rcu_read_lock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002990 neigh = __neigh_lookup_noref(tbl, addr, dev);
2991 if (!neigh)
2992 neigh = __neigh_create(tbl, addr, dev, false);
2993 err = PTR_ERR(neigh);
David Barrosob560f032016-06-28 11:16:43 +03002994 if (IS_ERR(neigh)) {
2995 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002996 goto out_kfree_skb;
David Barrosob560f032016-06-28 11:16:43 +03002997 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06002998 err = neigh->output(neigh, skb);
David Barrosob560f032016-06-28 11:16:43 +03002999 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003000 }
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003001 else if (index == NEIGH_LINK_TABLE) {
3002 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3003 addr, NULL, skb->len);
3004 if (err < 0)
3005 goto out_kfree_skb;
3006 err = dev_queue_xmit(skb);
3007 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003008out:
3009 return err;
3010out_kfree_skb:
3011 kfree_skb(skb);
3012 goto out;
3013}
3014EXPORT_SYMBOL(neigh_xmit);
3015
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016#ifdef CONFIG_PROC_FS
3017
3018static struct neighbour *neigh_get_first(struct seq_file *seq)
3019{
3020 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003021 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003022 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 struct neighbour *n = NULL;
3024 int bucket = state->bucket;
3025
3026 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
David S. Millercd089332011-07-11 01:28:12 -07003027 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003028 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029
3030 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003031 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003032 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 if (state->neigh_sub_iter) {
3034 loff_t fakep = 0;
3035 void *v;
3036
3037 v = state->neigh_sub_iter(state, n, &fakep);
3038 if (!v)
3039 goto next;
3040 }
3041 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3042 break;
3043 if (n->nud_state & ~NUD_NOARP)
3044 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07003045next:
3046 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 }
3048
3049 if (n)
3050 break;
3051 }
3052 state->bucket = bucket;
3053
3054 return n;
3055}
3056
3057static struct neighbour *neigh_get_next(struct seq_file *seq,
3058 struct neighbour *n,
3059 loff_t *pos)
3060{
3061 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003062 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003063 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064
3065 if (state->neigh_sub_iter) {
3066 void *v = state->neigh_sub_iter(state, n, pos);
3067 if (v)
3068 return n;
3069 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07003070 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071
3072 while (1) {
3073 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003074 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003075 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 if (state->neigh_sub_iter) {
3077 void *v = state->neigh_sub_iter(state, n, pos);
3078 if (v)
3079 return n;
3080 goto next;
3081 }
3082 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3083 break;
3084
3085 if (n->nud_state & ~NUD_NOARP)
3086 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07003087next:
3088 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 }
3090
3091 if (n)
3092 break;
3093
David S. Millercd089332011-07-11 01:28:12 -07003094 if (++state->bucket >= (1 << nht->hash_shift))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 break;
3096
Eric Dumazet767e97e2010-10-06 17:49:21 -07003097 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 }
3099
3100 if (n && pos)
3101 --(*pos);
3102 return n;
3103}
3104
3105static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3106{
3107 struct neighbour *n = neigh_get_first(seq);
3108
3109 if (n) {
Chris Larson745e2032008-08-03 01:10:55 -07003110 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 while (*pos) {
3112 n = neigh_get_next(seq, n, pos);
3113 if (!n)
3114 break;
3115 }
3116 }
3117 return *pos ? NULL : n;
3118}
3119
3120static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3121{
3122 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003123 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 struct neigh_table *tbl = state->tbl;
3125 struct pneigh_entry *pn = NULL;
3126 int bucket = state->bucket;
3127
3128 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3129 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3130 pn = tbl->phash_buckets[bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003131 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003132 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 if (pn)
3134 break;
3135 }
3136 state->bucket = bucket;
3137
3138 return pn;
3139}
3140
3141static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3142 struct pneigh_entry *pn,
3143 loff_t *pos)
3144{
3145 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003146 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 struct neigh_table *tbl = state->tbl;
3148
Jorge Boncompte [DTI2]df07a942011-11-25 13:24:49 -05003149 do {
3150 pn = pn->next;
3151 } while (pn && !net_eq(pneigh_net(pn), net));
3152
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 while (!pn) {
3154 if (++state->bucket > PNEIGH_HASHMASK)
3155 break;
3156 pn = tbl->phash_buckets[state->bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003157 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003158 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 if (pn)
3160 break;
3161 }
3162
3163 if (pn && pos)
3164 --(*pos);
3165
3166 return pn;
3167}
3168
3169static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3170{
3171 struct pneigh_entry *pn = pneigh_get_first(seq);
3172
3173 if (pn) {
Chris Larson745e2032008-08-03 01:10:55 -07003174 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 while (*pos) {
3176 pn = pneigh_get_next(seq, pn, pos);
3177 if (!pn)
3178 break;
3179 }
3180 }
3181 return *pos ? NULL : pn;
3182}
3183
3184static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3185{
3186 struct neigh_seq_state *state = seq->private;
3187 void *rc;
Chris Larson745e2032008-08-03 01:10:55 -07003188 loff_t idxpos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189
Chris Larson745e2032008-08-03 01:10:55 -07003190 rc = neigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
Chris Larson745e2032008-08-03 01:10:55 -07003192 rc = pneigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
3194 return rc;
3195}
3196
3197void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003198 __acquires(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199{
3200 struct neigh_seq_state *state = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201
3202 state->tbl = tbl;
3203 state->bucket = 0;
3204 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3205
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003206 rcu_read_lock_bh();
3207 state->nht = rcu_dereference_bh(tbl->nht);
Eric Dumazet767e97e2010-10-06 17:49:21 -07003208
Chris Larson745e2032008-08-03 01:10:55 -07003209 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210}
3211EXPORT_SYMBOL(neigh_seq_start);
3212
3213void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3214{
3215 struct neigh_seq_state *state;
3216 void *rc;
3217
3218 if (v == SEQ_START_TOKEN) {
Chris Larsonbff69732008-08-03 01:02:41 -07003219 rc = neigh_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 goto out;
3221 }
3222
3223 state = seq->private;
3224 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3225 rc = neigh_get_next(seq, v, NULL);
3226 if (rc)
3227 goto out;
3228 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3229 rc = pneigh_get_first(seq);
3230 } else {
3231 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3232 rc = pneigh_get_next(seq, v, NULL);
3233 }
3234out:
3235 ++(*pos);
3236 return rc;
3237}
3238EXPORT_SYMBOL(neigh_seq_next);
3239
3240void neigh_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003241 __releases(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003243 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244}
3245EXPORT_SYMBOL(neigh_seq_stop);
3246
3247/* statistics via seq_file */
3248
3249static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3250{
Christoph Hellwig71a50532018-04-15 10:16:41 +02003251 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 int cpu;
3253
3254 if (*pos == 0)
3255 return SEQ_START_TOKEN;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003256
Rusty Russell0f23174a2008-12-29 12:23:42 +00003257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 if (!cpu_possible(cpu))
3259 continue;
3260 *pos = cpu+1;
3261 return per_cpu_ptr(tbl->stats, cpu);
3262 }
3263 return NULL;
3264}
3265
3266static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3267{
Christoph Hellwig71a50532018-04-15 10:16:41 +02003268 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 int cpu;
3270
Rusty Russell0f23174a2008-12-29 12:23:42 +00003271 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 if (!cpu_possible(cpu))
3273 continue;
3274 *pos = cpu+1;
3275 return per_cpu_ptr(tbl->stats, cpu);
3276 }
3277 return NULL;
3278}
3279
3280static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3281{
3282
3283}
3284
3285static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3286{
Christoph Hellwig71a50532018-04-15 10:16:41 +02003287 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 struct neigh_statistics *st = v;
3289
3290 if (v == SEQ_START_TOKEN) {
Rick Jonesfb811392015-08-07 11:10:37 -07003291 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 return 0;
3293 }
3294
3295 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
Rick Jonesfb811392015-08-07 11:10:37 -07003296 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 atomic_read(&tbl->entries),
3298
3299 st->allocs,
3300 st->destroys,
3301 st->hash_grows,
3302
3303 st->lookups,
3304 st->hits,
3305
3306 st->res_failed,
3307
3308 st->rcv_probes_mcast,
3309 st->rcv_probes_ucast,
3310
3311 st->periodic_gc_runs,
Neil Horman9a6d2762008-07-16 20:50:49 -07003312 st->forced_gc_runs,
Rick Jonesfb811392015-08-07 11:10:37 -07003313 st->unres_discards,
3314 st->table_fulls
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 );
3316
3317 return 0;
3318}
3319
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003320static const struct seq_operations neigh_stat_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 .start = neigh_stat_seq_start,
3322 .next = neigh_stat_seq_next,
3323 .stop = neigh_stat_seq_stop,
3324 .show = neigh_stat_seq_show,
3325};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326#endif /* CONFIG_PROC_FS */
3327
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003328static void __neigh_notify(struct neighbour *n, int type, int flags,
3329 u32 pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003331 struct net *net = dev_net(n->dev);
Thomas Graf8b8aec52006-08-07 17:56:37 -07003332 struct sk_buff *skb;
Thomas Grafb8673312006-08-15 00:33:14 -07003333 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
Thomas Graf339bf982006-11-10 14:10:15 -08003335 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
Thomas Graf8b8aec52006-08-07 17:56:37 -07003336 if (skb == NULL)
Thomas Grafb8673312006-08-15 00:33:14 -07003337 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003339 err = neigh_fill_info(skb, n, pid, 0, type, flags);
Patrick McHardy26932562007-01-31 23:16:40 -08003340 if (err < 0) {
3341 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3342 WARN_ON(err == -EMSGSIZE);
3343 kfree_skb(skb);
3344 goto errout;
3345 }
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08003346 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3347 return;
Thomas Grafb8673312006-08-15 00:33:14 -07003348errout:
3349 if (err < 0)
Eric W. Biederman426b5302008-01-24 00:13:18 -08003350 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
Thomas Grafb8673312006-08-15 00:33:14 -07003351}
3352
3353void neigh_app_ns(struct neighbour *n)
3354{
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003355 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003357EXPORT_SYMBOL(neigh_app_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
3359#ifdef CONFIG_SYSCTL
Cong Wangb93196d2012-12-06 10:04:04 +08003360static int zero;
Francesco Fusco555445c2013-07-24 10:39:06 +02003361static int int_max = INT_MAX;
Cong Wangb93196d2012-12-06 10:04:04 +08003362static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
Joe Perchesfe2c6332013-06-11 23:04:25 -07003364static int proc_unres_qlen(struct ctl_table *ctl, int write,
3365 void __user *buffer, size_t *lenp, loff_t *ppos)
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003366{
3367 int size, ret;
Joe Perchesfe2c6332013-06-11 23:04:25 -07003368 struct ctl_table tmp = *ctl;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003369
Shan Weice46cc62012-12-04 18:49:15 +00003370 tmp.extra1 = &zero;
3371 tmp.extra2 = &unres_qlen_max;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003372 tmp.data = &size;
Shan Weice46cc62012-12-04 18:49:15 +00003373
3374 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3375 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3376
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003377 if (write && !ret)
3378 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3379 return ret;
3380}
3381
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003382static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3383 int family)
3384{
Jiri Pirkobba24892013-12-07 19:26:57 +01003385 switch (family) {
3386 case AF_INET:
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003387 return __in_dev_arp_parms_get_rcu(dev);
Jiri Pirkobba24892013-12-07 19:26:57 +01003388 case AF_INET6:
3389 return __in6_dev_nd_parms_get_rcu(dev);
3390 }
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003391 return NULL;
3392}
3393
3394static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3395 int index)
3396{
3397 struct net_device *dev;
3398 int family = neigh_parms_family(p);
3399
3400 rcu_read_lock();
3401 for_each_netdev_rcu(net, dev) {
3402 struct neigh_parms *dst_p =
3403 neigh_get_dev_parms_rcu(dev, family);
3404
3405 if (dst_p && !test_bit(index, dst_p->data_state))
3406 dst_p->data[index] = p->data[index];
3407 }
3408 rcu_read_unlock();
3409}
3410
3411static void neigh_proc_update(struct ctl_table *ctl, int write)
3412{
3413 struct net_device *dev = ctl->extra1;
3414 struct neigh_parms *p = ctl->extra2;
Jiri Pirko77d47af2013-12-10 23:55:07 +01003415 struct net *net = neigh_parms_net(p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003416 int index = (int *) ctl->data - p->data;
3417
3418 if (!write)
3419 return;
3420
3421 set_bit(index, p->data_state);
Marcus Huewe7627ae62017-02-15 01:00:36 +01003422 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3423 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003424 if (!dev) /* NULL dev means this is default value */
3425 neigh_copy_dflt_parms(net, p, index);
3426}
3427
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003428static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3429 void __user *buffer,
3430 size_t *lenp, loff_t *ppos)
3431{
3432 struct ctl_table tmp = *ctl;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003433 int ret;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003434
3435 tmp.extra1 = &zero;
3436 tmp.extra2 = &int_max;
3437
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003438 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3439 neigh_proc_update(ctl, write);
3440 return ret;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003441}
3442
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003443int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3444 void __user *buffer, size_t *lenp, loff_t *ppos)
3445{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003446 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3447
3448 neigh_proc_update(ctl, write);
3449 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003450}
3451EXPORT_SYMBOL(neigh_proc_dointvec);
3452
3453int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3454 void __user *buffer,
3455 size_t *lenp, loff_t *ppos)
3456{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003457 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3458
3459 neigh_proc_update(ctl, write);
3460 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003461}
3462EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3463
3464static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3465 void __user *buffer,
3466 size_t *lenp, loff_t *ppos)
3467{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003468 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3469
3470 neigh_proc_update(ctl, write);
3471 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003472}
3473
3474int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3475 void __user *buffer,
3476 size_t *lenp, loff_t *ppos)
3477{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003478 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3479
3480 neigh_proc_update(ctl, write);
3481 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003482}
3483EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3484
3485static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3486 void __user *buffer,
3487 size_t *lenp, loff_t *ppos)
3488{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003489 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3490
3491 neigh_proc_update(ctl, write);
3492 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003493}
3494
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003495static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3496 void __user *buffer,
3497 size_t *lenp, loff_t *ppos)
3498{
3499 struct neigh_parms *p = ctl->extra2;
3500 int ret;
3501
3502 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3503 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3504 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3505 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3506 else
3507 ret = -1;
3508
3509 if (write && ret == 0) {
3510 /* update reachable_time as well, otherwise, the change will
3511 * only be effective after the next time neigh_periodic_work
3512 * decides to recompute it
3513 */
3514 p->reachable_time =
3515 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3516 }
3517 return ret;
3518}
3519
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003520#define NEIGH_PARMS_DATA_OFFSET(index) \
3521 (&((struct neigh_parms *) 0)->data[index])
3522
3523#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3524 [NEIGH_VAR_ ## attr] = { \
3525 .procname = name, \
3526 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3527 .maxlen = sizeof(int), \
3528 .mode = mval, \
3529 .proc_handler = proc, \
3530 }
3531
3532#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3533 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3534
3535#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003536 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003537
3538#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003539 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003540
3541#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003542 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003543
3544#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003545 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003546
3547#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003548 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
Eric W. Biederman54716e32010-02-14 03:27:03 +00003549
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550static struct neigh_sysctl_table {
3551 struct ctl_table_header *sysctl_header;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003552 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
Brian Haleyab32ea52006-09-22 14:15:41 -07003553} neigh_sysctl_template __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 .neigh_vars = {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003555 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3556 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3557 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09003558 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003559 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3560 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3561 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3562 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3563 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3564 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3565 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3566 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3567 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3568 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3569 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3570 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003571 [NEIGH_VAR_GC_INTERVAL] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 .procname = "gc_interval",
3573 .maxlen = sizeof(int),
3574 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003575 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003577 [NEIGH_VAR_GC_THRESH1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 .procname = "gc_thresh1",
3579 .maxlen = sizeof(int),
3580 .mode = 0644,
Francesco Fusco555445c2013-07-24 10:39:06 +02003581 .extra1 = &zero,
3582 .extra2 = &int_max,
3583 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003585 [NEIGH_VAR_GC_THRESH2] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 .procname = "gc_thresh2",
3587 .maxlen = sizeof(int),
3588 .mode = 0644,
Francesco Fusco555445c2013-07-24 10:39:06 +02003589 .extra1 = &zero,
3590 .extra2 = &int_max,
3591 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003593 [NEIGH_VAR_GC_THRESH3] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 .procname = "gc_thresh3",
3595 .maxlen = sizeof(int),
3596 .mode = 0644,
Francesco Fusco555445c2013-07-24 10:39:06 +02003597 .extra1 = &zero,
3598 .extra2 = &int_max,
3599 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 },
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11003601 {},
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 },
3603};
3604
3605int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
Jiri Pirko73af6142013-12-07 19:26:55 +01003606 proc_handler *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607{
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003608 int i;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003609 struct neigh_sysctl_table *t;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003610 const char *dev_name_source;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003611 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
Jiri Pirko73af6142013-12-07 19:26:55 +01003612 char *p_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003614 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 if (!t)
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003616 goto err;
3617
Jiri Pirkob194c1f2014-02-21 14:52:57 +01003618 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003619 t->neigh_vars[i].data += (long) p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003620 t->neigh_vars[i].extra1 = dev;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003621 t->neigh_vars[i].extra2 = p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623
3624 if (dev) {
3625 dev_name_source = dev->name;
Eric W. Biedermand12af672007-10-18 03:05:25 -07003626 /* Terminate the table early */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003627 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3628 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 } else {
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003630 struct neigh_table *tbl = p->tbl;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003631 dev_name_source = "default";
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003632 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3633 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3634 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3635 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 }
3637
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003638 if (handler) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 /* RetransTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003640 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 /* ReachableTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003642 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 /* RetransTime (in milliseconds)*/
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003644 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 /* ReachableTime (in milliseconds) */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003646 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003647 } else {
3648 /* Those handlers will update p->reachable_time after
3649 * base_reachable_time(_ms) is set to ensure the new timer starts being
3650 * applied after the next neighbour update instead of waiting for
3651 * neigh_periodic_work to update its value (can be multiple minutes)
3652 * So any handler that replaces them should do this as well
3653 */
3654 /* ReachableTime */
3655 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3656 neigh_proc_base_reachable_time;
3657 /* ReachableTime (in milliseconds) */
3658 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3659 neigh_proc_base_reachable_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 }
3661
Eric W. Biederman464dc802012-11-16 03:02:59 +00003662 /* Don't export sysctls to unprivileged users */
3663 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3664 t->neigh_vars[0].procname = NULL;
3665
Jiri Pirko73af6142013-12-07 19:26:55 +01003666 switch (neigh_parms_family(p)) {
3667 case AF_INET:
3668 p_name = "ipv4";
3669 break;
3670 case AF_INET6:
3671 p_name = "ipv6";
3672 break;
3673 default:
3674 BUG();
3675 }
3676
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003677 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3678 p_name, dev_name_source);
Denis V. Lunev4ab438f2008-02-28 20:48:01 -08003679 t->sysctl_header =
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003680 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003681 if (!t->sysctl_header)
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003682 goto free;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003683
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 p->sysctl_table = t;
3685 return 0;
3686
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003687free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 kfree(t);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003689err:
3690 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003692EXPORT_SYMBOL(neigh_sysctl_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693
3694void neigh_sysctl_unregister(struct neigh_parms *p)
3695{
3696 if (p->sysctl_table) {
3697 struct neigh_sysctl_table *t = p->sysctl_table;
3698 p->sysctl_table = NULL;
Eric W. Biederman5dd3df12012-04-19 13:24:33 +00003699 unregister_net_sysctl_table(t->sysctl_header);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 kfree(t);
3701 }
3702}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003703EXPORT_SYMBOL(neigh_sysctl_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
3705#endif /* CONFIG_SYSCTL */
3706
Thomas Grafc8822a42007-03-22 11:50:06 -07003707static int __init neigh_init(void)
3708{
Florian Westphalb97bac62017-08-09 20:41:48 +02003709 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3710 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08003711 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
Thomas Grafc8822a42007-03-22 11:50:06 -07003712
Greg Rosec7ac8672011-06-10 01:27:09 +00003713 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
Florian Westphalb97bac62017-08-09 20:41:48 +02003714 0);
3715 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
Thomas Grafc8822a42007-03-22 11:50:06 -07003716
3717 return 0;
3718}
3719
3720subsys_initcall(neigh_init);