blob: ec0bf737b076ac8f529248ba68d9138766115e47 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
Joe Perchese005d192012-05-16 19:58:40 +000014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +030017#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020028#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/neighbour.h>
David Ahern4b2a2bf2019-05-01 18:18:42 -070030#include <net/arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/dst.h>
32#include <net/sock.h>
Tom Tucker8d717402006-07-30 20:43:36 -070033#include <net/netevent.h>
Thomas Grafa14a49d2006-08-07 17:53:08 -070034#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/rtnetlink.h>
36#include <linux/random.h>
Paulo Marques543537b2005-06-23 00:09:02 -070037#include <linux/string.h>
vignesh babuc3609d52007-08-24 22:27:55 -070038#include <linux/log2.h>
Jiri Pirko1d4c8c22013-12-07 19:26:56 +010039#include <linux/inetdevice.h>
Jiri Pirkobba24892013-12-07 19:26:57 +010040#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Roopa Prabhu56dd18a2019-02-14 09:15:11 -080042#include <trace/events/neigh.h>
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define NEIGH_DEBUG 1
Joe Perchesd5d427c2013-04-15 15:17:19 +000045#define neigh_dbg(level, fmt, ...) \
46do { \
47 if (level <= NEIGH_DEBUG) \
48 pr_debug(fmt, ##__VA_ARGS__); \
49} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#define PNEIGH_HASHMASK 0xF
52
Kees Cooke99e88a2017-10-16 14:43:17 -070053static void neigh_timer_handler(struct timer_list *t);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -070054static void __neigh_notify(struct neighbour *n, int type, int flags,
55 u32 pid);
56static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +020057static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Amos Waterland45fc3b12005-09-24 16:53:16 -070060#ifdef CONFIG_PROC_FS
Christoph Hellwig71a50532018-04-15 10:16:41 +020061static const struct seq_operations neigh_stat_seq_ops;
Amos Waterland45fc3b12005-09-24 16:53:16 -070062#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64/*
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
71 cache.
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
74
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
78
79 Reference count prevents destruction.
80
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
83 - timer
84 - resolution queue
85
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 */
91
David S. Miller8f40b162011-07-17 13:34:11 -070092static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 kfree_skb(skb);
95 return -ENETDOWN;
96}
97
Thomas Graf4f494552007-08-08 23:12:36 -070098static void neigh_cleanup_and_release(struct neighbour *neigh)
99{
Roopa Prabhu56dd18a2019-02-14 09:15:11 -0800100 trace_neigh_cleanup_and_release(neigh, 0);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -0700101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
Ido Schimmel53f800e2016-12-23 09:32:48 +0100102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
Thomas Graf4f494552007-08-08 23:12:36 -0700103 neigh_release(neigh);
104}
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112unsigned long neigh_rand_reach_time(unsigned long base)
113{
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900116EXPORT_SYMBOL(neigh_rand_reach_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
David Ahern58956312018-12-07 12:24:57 -0800118static void neigh_mark_dead(struct neighbour *n)
119{
120 n->dead = 1;
121 if (!list_empty(&n->gc_list)) {
122 list_del_init(&n->gc_list);
123 atomic_dec(&n->tbl->gc_entries);
124 }
Daniel Borkmann7482e382021-10-11 14:12:38 +0200125 if (!list_empty(&n->managed_list))
126 list_del_init(&n->managed_list);
David Ahern58956312018-12-07 12:24:57 -0800127}
128
David Ahern9c29a2f2018-12-11 18:57:21 -0700129static void neigh_update_gc_list(struct neighbour *n)
David Ahern58956312018-12-07 12:24:57 -0800130{
David Aherne997f8a2018-12-11 18:57:25 -0700131 bool on_gc_list, exempt_from_gc;
David Ahern58956312018-12-07 12:24:57 -0800132
David Ahern9c29a2f2018-12-11 18:57:21 -0700133 write_lock_bh(&n->tbl->lock);
134 write_lock(&n->lock);
Chinmay Agarwaleefb45e2021-04-22 01:12:22 +0530135 if (n->dead)
136 goto out;
137
David Aherne997f8a2018-12-11 18:57:25 -0700138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
David Ahern58956312018-12-07 12:24:57 -0800140 */
David Aherne997f8a2018-12-11 18:57:25 -0700141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
David Ahern9c29a2f2018-12-11 18:57:21 -0700143 on_gc_list = !list_empty(&n->gc_list);
David Ahern8cc196d2018-12-10 13:54:07 -0800144
David Aherne997f8a2018-12-11 18:57:25 -0700145 if (exempt_from_gc && on_gc_list) {
David Ahern9c29a2f2018-12-11 18:57:21 -0700146 list_del_init(&n->gc_list);
David Ahern58956312018-12-07 12:24:57 -0800147 atomic_dec(&n->tbl->gc_entries);
David Aherne997f8a2018-12-11 18:57:25 -0700148 } else if (!exempt_from_gc && !on_gc_list) {
David Ahern58956312018-12-07 12:24:57 -0800149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
Chinmay Agarwaleefb45e2021-04-22 01:12:22 +0530153out:
David Ahern9c29a2f2018-12-11 18:57:21 -0700154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
David Ahern58956312018-12-07 12:24:57 -0800156}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Daniel Borkmann7482e382021-10-11 14:12:38 +0200158static void neigh_update_managed_list(struct neighbour *n)
David Ahern526f1b52018-12-11 18:57:24 -0700159{
Daniel Borkmann7482e382021-10-11 14:12:38 +0200160 bool on_managed_list, add_to_managed;
161
162 write_lock_bh(&n->tbl->lock);
163 write_lock(&n->lock);
164 if (n->dead)
165 goto out;
166
167 add_to_managed = n->flags & NTF_MANAGED;
168 on_managed_list = !list_empty(&n->managed_list);
169
170 if (!add_to_managed && on_managed_list)
171 list_del_init(&n->managed_list);
172 else if (add_to_managed && !on_managed_list)
173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
174out:
175 write_unlock(&n->lock);
176 write_unlock_bh(&n->tbl->lock);
177}
178
179static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 bool *gc_update, bool *managed_update)
181{
182 u32 ndm_flags, old_flags = neigh->flags;
David Ahern526f1b52018-12-11 18:57:24 -0700183
184 if (!(flags & NEIGH_UPDATE_F_ADMIN))
Daniel Borkmann7482e382021-10-11 14:12:38 +0200185 return;
David Ahern526f1b52018-12-11 18:57:24 -0700186
Daniel Borkmann7482e382021-10-11 14:12:38 +0200187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189
190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
David Ahern526f1b52018-12-11 18:57:24 -0700191 if (ndm_flags & NTF_EXT_LEARNED)
192 neigh->flags |= NTF_EXT_LEARNED;
193 else
194 neigh->flags &= ~NTF_EXT_LEARNED;
195 *notify = 1;
Daniel Borkmann7482e382021-10-11 14:12:38 +0200196 *gc_update = true;
David Ahern526f1b52018-12-11 18:57:24 -0700197 }
Daniel Borkmann7482e382021-10-11 14:12:38 +0200198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 if (ndm_flags & NTF_MANAGED)
200 neigh->flags |= NTF_MANAGED;
201 else
202 neigh->flags &= ~NTF_MANAGED;
203 *notify = 1;
204 *managed_update = true;
205 }
David Ahern526f1b52018-12-11 18:57:24 -0700206}
207
David Ahern7e6f1822018-12-11 18:57:23 -0700208static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 struct neigh_table *tbl)
Sowmini Varadhan50710342017-06-02 09:01:49 -0700210{
211 bool retval = false;
212
213 write_lock(&n->lock);
David Ahern7e6f1822018-12-11 18:57:23 -0700214 if (refcount_read(&n->refcnt) == 1) {
Sowmini Varadhan50710342017-06-02 09:01:49 -0700215 struct neighbour *neigh;
216
217 neigh = rcu_dereference_protected(n->next,
218 lockdep_is_held(&tbl->lock));
219 rcu_assign_pointer(*np, neigh);
David Ahern58956312018-12-07 12:24:57 -0800220 neigh_mark_dead(n);
Sowmini Varadhan50710342017-06-02 09:01:49 -0700221 retval = true;
222 }
223 write_unlock(&n->lock);
224 if (retval)
225 neigh_cleanup_and_release(n);
226 return retval;
227}
228
229bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230{
231 struct neigh_hash_table *nht;
232 void *pkey = ndel->primary_key;
233 u32 hash_val;
234 struct neighbour *n;
235 struct neighbour __rcu **np;
236
237 nht = rcu_dereference_protected(tbl->nht,
238 lockdep_is_held(&tbl->lock));
239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 hash_val = hash_val >> (32 - nht->hash_shift);
241
242 np = &nht->hash_buckets[hash_val];
243 while ((n = rcu_dereference_protected(*np,
244 lockdep_is_held(&tbl->lock)))) {
245 if (n == ndel)
David Ahern7e6f1822018-12-11 18:57:23 -0700246 return neigh_del(n, np, tbl);
Sowmini Varadhan50710342017-06-02 09:01:49 -0700247 np = &n->next;
248 }
249 return false;
250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252static int neigh_forced_gc(struct neigh_table *tbl)
253{
David Ahern58956312018-12-07 12:24:57 -0800254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 unsigned long tref = jiffies - 5 * HZ;
David Ahern58956312018-12-07 12:24:57 -0800256 struct neighbour *n, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 int shrunk = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260
261 write_lock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
David Ahern58956312018-12-07 12:24:57 -0800263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 if (refcount_read(&n->refcnt) == 1) {
265 bool remove = false;
266
267 write_lock(&n->lock);
David Ahern758a7f02018-12-11 18:57:22 -0700268 if ((n->nud_state == NUD_FAILED) ||
David Ahern7a6b1ab2021-06-07 11:35:30 -0600269 (n->nud_state == NUD_NOARP) ||
Jeff Dike8cf88212020-11-12 20:58:15 -0500270 (tbl->is_multicast &&
271 tbl->is_multicast(n->primary_key)) ||
David Aherne997f8a2018-12-11 18:57:25 -0700272 time_after(tref, n->updated))
David Ahern58956312018-12-07 12:24:57 -0800273 remove = true;
274 write_unlock(&n->lock);
275
276 if (remove && neigh_remove_one(n, tbl))
277 shrunk++;
278 if (shrunk >= max_clean)
279 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 }
281 }
282
283 tbl->last_flush = jiffies;
284
285 write_unlock_bh(&tbl->lock);
286
287 return shrunk;
288}
289
Pavel Emelyanova43d8992007-12-20 15:49:05 -0800290static void neigh_add_timer(struct neighbour *n, unsigned long when)
291{
292 neigh_hold(n);
293 if (unlikely(mod_timer(&n->timer, when))) {
294 printk("NEIGH: BUG, double timer add, state is %x\n",
295 n->nud_state);
296 dump_stack();
297 }
298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300static int neigh_del_timer(struct neighbour *n)
301{
302 if ((n->nud_state & NUD_IN_TIMER) &&
303 del_timer(&n->timer)) {
304 neigh_release(n);
305 return 1;
306 }
307 return 0;
308}
309
310static void pneigh_queue_purge(struct sk_buff_head *list)
311{
312 struct sk_buff *skb;
313
314 while ((skb = skb_dequeue(list)) != NULL) {
315 dev_put(skb->dev);
316 kfree_skb(skb);
317 }
318}
319
David Ahern859bd2e2018-10-11 20:33:49 -0700320static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
321 bool skip_perm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
323 int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000324 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000326 nht = rcu_dereference_protected(tbl->nht,
327 lockdep_is_held(&tbl->lock));
328
David S. Millercd089332011-07-11 01:28:12 -0700329 for (i = 0; i < (1 << nht->hash_shift); i++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700330 struct neighbour *n;
331 struct neighbour __rcu **np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Eric Dumazet767e97e2010-10-06 17:49:21 -0700333 while ((n = rcu_dereference_protected(*np,
334 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (dev && n->dev != dev) {
336 np = &n->next;
337 continue;
338 }
David Ahern859bd2e2018-10-11 20:33:49 -0700339 if (skip_perm && n->nud_state & NUD_PERMANENT) {
340 np = &n->next;
341 continue;
342 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700343 rcu_assign_pointer(*np,
344 rcu_dereference_protected(n->next,
345 lockdep_is_held(&tbl->lock)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 write_lock(&n->lock);
347 neigh_del_timer(n);
David Ahern58956312018-12-07 12:24:57 -0800348 neigh_mark_dead(n);
Reshetova, Elena9f237432017-06-30 13:07:55 +0300349 if (refcount_read(&n->refcnt) != 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /* The most unpleasant situation.
351 We must destroy neighbour entry,
352 but someone still uses it.
353
354 The destroy will be delayed until
355 the last user releases us, but
356 we must kill timers etc. and move
357 it to safe state.
358 */
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700359 __skb_queue_purge(&n->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000360 n->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 n->output = neigh_blackhole;
362 if (n->nud_state & NUD_VALID)
363 n->nud_state = NUD_NOARP;
364 else
365 n->nud_state = NUD_NONE;
Joe Perchesd5d427c2013-04-15 15:17:19 +0000366 neigh_dbg(2, "neigh %p is stray\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700369 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 }
371 }
Herbert Xu49636bb2005-10-23 17:18:00 +1000372}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Herbert Xu49636bb2005-10-23 17:18:00 +1000374void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
375{
376 write_lock_bh(&tbl->lock);
David Ahern859bd2e2018-10-11 20:33:49 -0700377 neigh_flush_dev(tbl, dev, false);
Herbert Xu49636bb2005-10-23 17:18:00 +1000378 write_unlock_bh(&tbl->lock);
379}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900380EXPORT_SYMBOL(neigh_changeaddr);
Herbert Xu49636bb2005-10-23 17:18:00 +1000381
David Ahern859bd2e2018-10-11 20:33:49 -0700382static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
383 bool skip_perm)
Herbert Xu49636bb2005-10-23 17:18:00 +1000384{
385 write_lock_bh(&tbl->lock);
David Ahern859bd2e2018-10-11 20:33:49 -0700386 neigh_flush_dev(tbl, dev, skip_perm);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200387 pneigh_ifdown_and_unlock(tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 del_timer_sync(&tbl->proxy_timer);
390 pneigh_queue_purge(&tbl->proxy_queue);
391 return 0;
392}
David Ahern859bd2e2018-10-11 20:33:49 -0700393
394int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
395{
396 __neigh_ifdown(tbl, dev, true);
397 return 0;
398}
399EXPORT_SYMBOL(neigh_carrier_down);
400
401int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
402{
403 __neigh_ifdown(tbl, dev, false);
404 return 0;
405}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900406EXPORT_SYMBOL(neigh_ifdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
David Ahern58956312018-12-07 12:24:57 -0800408static struct neighbour *neigh_alloc(struct neigh_table *tbl,
409 struct net_device *dev,
Roopa Prabhu2c611ad2021-10-11 14:12:37 +0200410 u32 flags, bool exempt_from_gc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
412 struct neighbour *n = NULL;
413 unsigned long now = jiffies;
414 int entries;
415
David Aherne997f8a2018-12-11 18:57:25 -0700416 if (exempt_from_gc)
David Ahern58956312018-12-07 12:24:57 -0800417 goto do_alloc;
418
419 entries = atomic_inc_return(&tbl->gc_entries) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 if (entries >= tbl->gc_thresh3 ||
421 (entries >= tbl->gc_thresh2 &&
422 time_after(now, tbl->last_flush + 5 * HZ))) {
423 if (!neigh_forced_gc(tbl) &&
Rick Jonesfb811392015-08-07 11:10:37 -0700424 entries >= tbl->gc_thresh3) {
425 net_info_ratelimited("%s: neighbor table overflow!\n",
426 tbl->id);
427 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 goto out_entries;
Rick Jonesfb811392015-08-07 11:10:37 -0700429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431
David Ahern58956312018-12-07 12:24:57 -0800432do_alloc:
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +0000433 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 if (!n)
435 goto out_entries;
436
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700437 __skb_queue_head_init(&n->arp_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 rwlock_init(&n->lock);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000439 seqlock_init(&n->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 n->updated = n->used = now;
441 n->nud_state = NUD_NONE;
442 n->output = neigh_blackhole;
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200443 n->flags = flags;
David S. Millerf6b72b622011-07-14 07:53:20 -0700444 seqlock_init(&n->hh.hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 n->parms = neigh_parms_clone(&tbl->parms);
Kees Cooke99e88a2017-10-16 14:43:17 -0700446 timer_setup(&n->timer, neigh_timer_handler, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 NEIGH_CACHE_STAT_INC(tbl, allocs);
449 n->tbl = tbl;
Reshetova, Elena9f237432017-06-30 13:07:55 +0300450 refcount_set(&n->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 n->dead = 1;
David Ahern8cc196d2018-12-10 13:54:07 -0800452 INIT_LIST_HEAD(&n->gc_list);
Daniel Borkmann7482e382021-10-11 14:12:38 +0200453 INIT_LIST_HEAD(&n->managed_list);
David Ahern58956312018-12-07 12:24:57 -0800454
455 atomic_inc(&tbl->entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456out:
457 return n;
458
459out_entries:
David Aherne997f8a2018-12-11 18:57:25 -0700460 if (!exempt_from_gc)
David Ahern58956312018-12-07 12:24:57 -0800461 atomic_dec(&tbl->gc_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 goto out;
463}
464
David S. Miller2c2aba62011-12-28 15:06:58 -0500465static void neigh_get_hash_rnd(u32 *x)
466{
Jason A. Donenfeldb3d0f782017-06-07 23:00:05 -0400467 *x = get_random_u32() | 1;
David S. Miller2c2aba62011-12-28 15:06:58 -0500468}
469
David S. Millercd089332011-07-11 01:28:12 -0700470static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471{
David S. Millercd089332011-07-11 01:28:12 -0700472 size_t size = (1 << shift) * sizeof(struct neighbour *);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000473 struct neigh_hash_table *ret;
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000474 struct neighbour __rcu **buckets;
David S. Miller2c2aba62011-12-28 15:06:58 -0500475 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000477 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
478 if (!ret)
479 return NULL;
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300480 if (size <= PAGE_SIZE) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000481 buckets = kzalloc(size, GFP_ATOMIC);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300482 } else {
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000483 buckets = (struct neighbour __rcu **)
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000484 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
485 get_order(size));
Konstantin Khlebnikov01b833a2019-01-14 13:38:43 +0300486 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300487 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000488 if (!buckets) {
489 kfree(ret);
490 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000492 ret->hash_buckets = buckets;
David S. Millercd089332011-07-11 01:28:12 -0700493 ret->hash_shift = shift;
David S. Miller2c2aba62011-12-28 15:06:58 -0500494 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
495 neigh_get_hash_rnd(&ret->hash_rnd[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 return ret;
497}
498
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000499static void neigh_hash_free_rcu(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000501 struct neigh_hash_table *nht = container_of(head,
502 struct neigh_hash_table,
503 rcu);
David S. Millercd089332011-07-11 01:28:12 -0700504 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000505 struct neighbour __rcu **buckets = nht->hash_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300507 if (size <= PAGE_SIZE) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000508 kfree(buckets);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300509 } else {
510 kmemleak_free(buckets);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000511 free_pages((unsigned long)buckets, get_order(size));
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300512 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000513 kfree(nht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000516static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
David S. Millercd089332011-07-11 01:28:12 -0700517 unsigned long new_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000519 unsigned int i, hash;
520 struct neigh_hash_table *new_nht, *old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
523
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000524 old_nht = rcu_dereference_protected(tbl->nht,
525 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -0700526 new_nht = neigh_hash_alloc(new_shift);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000527 if (!new_nht)
528 return old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
David S. Millercd089332011-07-11 01:28:12 -0700530 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct neighbour *n, *next;
532
Eric Dumazet767e97e2010-10-06 17:49:21 -0700533 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
534 lockdep_is_held(&tbl->lock));
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000535 n != NULL;
536 n = next) {
537 hash = tbl->hash(n->primary_key, n->dev,
538 new_nht->hash_rnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
David S. Millercd089332011-07-11 01:28:12 -0700540 hash >>= (32 - new_nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700541 next = rcu_dereference_protected(n->next,
542 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Eric Dumazet767e97e2010-10-06 17:49:21 -0700544 rcu_assign_pointer(n->next,
545 rcu_dereference_protected(
546 new_nht->hash_buckets[hash],
547 lockdep_is_held(&tbl->lock)));
548 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 }
550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000552 rcu_assign_pointer(tbl->nht, new_nht);
553 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
554 return new_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
557struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
558 struct net_device *dev)
559{
560 struct neighbour *n;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 NEIGH_CACHE_STAT_INC(tbl, lookups);
563
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000564 rcu_read_lock_bh();
Eric W. Biederman60395a22015-03-03 17:10:44 -0600565 n = __neigh_lookup_noref(tbl, pkey, dev);
566 if (n) {
Reshetova, Elena9f237432017-06-30 13:07:55 +0300567 if (!refcount_inc_not_zero(&n->refcnt))
Eric W. Biederman60395a22015-03-03 17:10:44 -0600568 n = NULL;
569 NEIGH_CACHE_STAT_INC(tbl, hits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700571
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000572 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 return n;
574}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900575EXPORT_SYMBOL(neigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Eric W. Biederman426b5302008-01-24 00:13:18 -0800577struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
578 const void *pkey)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
580 struct neighbour *n;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300581 unsigned int key_len = tbl->key_len;
Pavel Emelyanovbc4bf5f2008-02-23 19:57:02 -0800582 u32 hash_val;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000583 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 NEIGH_CACHE_STAT_INC(tbl, lookups);
586
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000587 rcu_read_lock_bh();
588 nht = rcu_dereference_bh(tbl->nht);
David S. Millercd089332011-07-11 01:28:12 -0700589 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700590
591 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
592 n != NULL;
593 n = rcu_dereference_bh(n->next)) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800594 if (!memcmp(n->primary_key, pkey, key_len) &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900595 net_eq(dev_net(n->dev), net)) {
Reshetova, Elena9f237432017-06-30 13:07:55 +0300596 if (!refcount_inc_not_zero(&n->refcnt))
Eric Dumazet767e97e2010-10-06 17:49:21 -0700597 n = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 NEIGH_CACHE_STAT_INC(tbl, hits);
599 break;
600 }
601 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700602
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000603 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return n;
605}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900606EXPORT_SYMBOL(neigh_lookup_nodev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200608static struct neighbour *
609___neigh_create(struct neigh_table *tbl, const void *pkey,
Roopa Prabhu2c611ad2021-10-11 14:12:37 +0200610 struct net_device *dev, u32 flags,
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200611 bool exempt_from_gc, bool want_ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200613 u32 hash_val, key_len = tbl->key_len;
614 struct neighbour *n1, *rc, *n;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000615 struct neigh_hash_table *nht;
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200616 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200618 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
David Ahernfc651002019-05-22 12:22:21 -0700619 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (!n) {
621 rc = ERR_PTR(-ENOBUFS);
622 goto out;
623 }
624
625 memcpy(n->primary_key, pkey, key_len);
626 n->dev = dev;
Eric Dumazet85662c92021-12-04 20:22:07 -0800627 dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
629 /* Protocol specific setup. */
630 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
631 rc = ERR_PTR(error);
632 goto out_neigh_release;
633 }
634
David Millerda6a8fa2011-07-25 00:01:38 +0000635 if (dev->netdev_ops->ndo_neigh_construct) {
Jiri Pirko503eebc2016-07-05 11:27:37 +0200636 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
David Millerda6a8fa2011-07-25 00:01:38 +0000637 if (error < 0) {
638 rc = ERR_PTR(error);
639 goto out_neigh_release;
640 }
641 }
642
David S. Miller447f2192011-12-19 15:04:41 -0500643 /* Device specific setup. */
644 if (n->parms->neigh_setup &&
645 (error = n->parms->neigh_setup(n)) < 0) {
646 rc = ERR_PTR(error);
647 goto out_neigh_release;
648 }
649
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100650 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000653 nht = rcu_dereference_protected(tbl->nht,
654 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
David S. Millercd089332011-07-11 01:28:12 -0700656 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
657 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Jim Westfall096b9852018-01-14 04:18:50 -0800659 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661 if (n->parms->dead) {
662 rc = ERR_PTR(-EINVAL);
663 goto out_tbl_unlock;
664 }
665
Eric Dumazet767e97e2010-10-06 17:49:21 -0700666 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
667 lockdep_is_held(&tbl->lock));
668 n1 != NULL;
669 n1 = rcu_dereference_protected(n1->next,
670 lockdep_is_held(&tbl->lock))) {
Jim Westfall096b9852018-01-14 04:18:50 -0800671 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
David S. Millera263b302012-07-02 02:02:15 -0700672 if (want_ref)
673 neigh_hold(n1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 rc = n1;
675 goto out_tbl_unlock;
676 }
677 }
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 n->dead = 0;
David Aherne997f8a2018-12-11 18:57:25 -0700680 if (!exempt_from_gc)
David Ahern8cc196d2018-12-10 13:54:07 -0800681 list_add_tail(&n->gc_list, &n->tbl->gc_list);
Daniel Borkmann7482e382021-10-11 14:12:38 +0200682 if (n->flags & NTF_MANAGED)
683 list_add_tail(&n->managed_list, &n->tbl->managed_list);
David S. Millera263b302012-07-02 02:02:15 -0700684 if (want_ref)
685 neigh_hold(n);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700686 rcu_assign_pointer(n->next,
687 rcu_dereference_protected(nht->hash_buckets[hash_val],
688 lockdep_is_held(&tbl->lock)));
689 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 write_unlock_bh(&tbl->lock);
Joe Perchesd5d427c2013-04-15 15:17:19 +0000691 neigh_dbg(2, "neigh %p is created\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 rc = n;
693out:
694 return rc;
695out_tbl_unlock:
696 write_unlock_bh(&tbl->lock);
697out_neigh_release:
David Ahern64c6f4b2019-05-01 18:08:34 -0700698 if (!exempt_from_gc)
699 atomic_dec(&tbl->gc_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 neigh_release(n);
701 goto out;
702}
David Ahern58956312018-12-07 12:24:57 -0800703
704struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
705 struct net_device *dev, bool want_ref)
706{
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200707 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
David Ahern58956312018-12-07 12:24:57 -0800708}
David S. Millera263b302012-07-02 02:02:15 -0700709EXPORT_SYMBOL(__neigh_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300711static u32 pneigh_hash(const void *pkey, unsigned int key_len)
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700712{
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700713 u32 hash_val = *(u32 *)(pkey + key_len - 4);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700714 hash_val ^= (hash_val >> 16);
715 hash_val ^= hash_val >> 8;
716 hash_val ^= hash_val >> 4;
717 hash_val &= PNEIGH_HASHMASK;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900718 return hash_val;
719}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700720
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900721static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
722 struct net *net,
723 const void *pkey,
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300724 unsigned int key_len,
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900725 struct net_device *dev)
726{
727 while (n) {
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700728 if (!memcmp(n->key, pkey, key_len) &&
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900729 net_eq(pneigh_net(n), net) &&
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700730 (n->dev == dev || !n->dev))
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900731 return n;
732 n = n->next;
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700733 }
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900734 return NULL;
735}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700736
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900737struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
738 struct net *net, const void *pkey, struct net_device *dev)
739{
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300740 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900741 u32 hash_val = pneigh_hash(pkey, key_len);
742
743 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
744 net, pkey, key_len, dev);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700745}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900746EXPORT_SYMBOL_GPL(__pneigh_lookup);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700747
Eric W. Biederman426b5302008-01-24 00:13:18 -0800748struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
749 struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 struct net_device *dev, int creat)
751{
752 struct pneigh_entry *n;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300753 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900754 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 read_lock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900757 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
758 net, pkey, key_len, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 read_unlock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900760
761 if (n || !creat)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 goto out;
763
Pavel Emelyanov4ae28942007-10-15 12:54:15 -0700764 ASSERT_RTNL();
765
Eric Dumazete195e9b2021-12-06 08:53:29 -0800766 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 if (!n)
768 goto out;
769
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -0500770 write_pnet(&n->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 memcpy(n->key, pkey, key_len);
772 n->dev = dev;
Eric Dumazet77a23b12021-12-04 20:22:08 -0800773 dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 if (tbl->pconstructor && tbl->pconstructor(n)) {
Eric Dumazet77a23b12021-12-04 20:22:08 -0800776 dev_put_track(dev, &n->dev_tracker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 kfree(n);
778 n = NULL;
779 goto out;
780 }
781
782 write_lock_bh(&tbl->lock);
783 n->next = tbl->phash_buckets[hash_val];
784 tbl->phash_buckets[hash_val] = n;
785 write_unlock_bh(&tbl->lock);
786out:
787 return n;
788}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900789EXPORT_SYMBOL(pneigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791
Eric W. Biederman426b5302008-01-24 00:13:18 -0800792int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 struct net_device *dev)
794{
795 struct pneigh_entry *n, **np;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300796 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900797 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 write_lock_bh(&tbl->lock);
800 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
801 np = &n->next) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800802 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900803 net_eq(pneigh_net(n), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 *np = n->next;
805 write_unlock_bh(&tbl->lock);
806 if (tbl->pdestructor)
807 tbl->pdestructor(n);
Eric Dumazet77a23b12021-12-04 20:22:08 -0800808 dev_put_track(n->dev, &n->dev_tracker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 kfree(n);
810 return 0;
811 }
812 }
813 write_unlock_bh(&tbl->lock);
814 return -ENOENT;
815}
816
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200817static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
818 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200820 struct pneigh_entry *n, **np, *freelist = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 u32 h;
822
823 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
824 np = &tbl->phash_buckets[h];
825 while ((n = *np) != NULL) {
826 if (!dev || n->dev == dev) {
827 *np = n->next;
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200828 n->next = freelist;
829 freelist = n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 continue;
831 }
832 np = &n->next;
833 }
834 }
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200835 write_unlock_bh(&tbl->lock);
836 while ((n = freelist)) {
837 freelist = n->next;
838 n->next = NULL;
839 if (tbl->pdestructor)
840 tbl->pdestructor(n);
Eric Dumazet77a23b12021-12-04 20:22:08 -0800841 dev_put_track(n->dev, &n->dev_tracker);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200842 kfree(n);
843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 return -ENOENT;
845}
846
Denis V. Lunev06f05112008-01-24 00:30:58 -0800847static void neigh_parms_destroy(struct neigh_parms *parms);
848
849static inline void neigh_parms_put(struct neigh_parms *parms)
850{
Reshetova, Elena63439442017-06-30 13:07:56 +0300851 if (refcount_dec_and_test(&parms->refcnt))
Denis V. Lunev06f05112008-01-24 00:30:58 -0800852 neigh_parms_destroy(parms);
853}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855/*
856 * neighbour must already be out of the table;
857 *
858 */
859void neigh_destroy(struct neighbour *neigh)
860{
David Millerda6a8fa2011-07-25 00:01:38 +0000861 struct net_device *dev = neigh->dev;
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
864
865 if (!neigh->dead) {
Joe Perchese005d192012-05-16 19:58:40 +0000866 pr_warn("Destroying alive neighbour %p\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 dump_stack();
868 return;
869 }
870
871 if (neigh_del_timer(neigh))
Joe Perchese005d192012-05-16 19:58:40 +0000872 pr_warn("Impossible event\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700874 write_lock_bh(&neigh->lock);
875 __skb_queue_purge(&neigh->arp_queue);
876 write_unlock_bh(&neigh->lock);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000877 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
David S. Miller447f2192011-12-19 15:04:41 -0500879 if (dev->netdev_ops->ndo_neigh_destroy)
Jiri Pirko503eebc2016-07-05 11:27:37 +0200880 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
David S. Miller447f2192011-12-19 15:04:41 -0500881
Eric Dumazet85662c92021-12-04 20:22:07 -0800882 dev_put_track(dev, &neigh->dev_tracker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 neigh_parms_put(neigh->parms);
884
Joe Perchesd5d427c2013-04-15 15:17:19 +0000885 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 atomic_dec(&neigh->tbl->entries);
David Miller5b8b0062011-07-25 00:01:22 +0000888 kfree_rcu(neigh, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900890EXPORT_SYMBOL(neigh_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
892/* Neighbour state is suspicious;
893 disable fast path.
894
895 Called with write_locked neigh.
896 */
897static void neigh_suspect(struct neighbour *neigh)
898{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000899 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
901 neigh->output = neigh->ops->output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902}
903
904/* Neighbour state is OK;
905 enable fast path.
906
907 Called with write_locked neigh.
908 */
909static void neigh_connect(struct neighbour *neigh)
910{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000911 neigh_dbg(2, "neigh %p is connected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 neigh->output = neigh->ops->connected_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914}
915
Eric Dumazete4c4e442009-07-30 03:15:07 +0000916static void neigh_periodic_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Eric Dumazete4c4e442009-07-30 03:15:07 +0000918 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700919 struct neighbour *n;
920 struct neighbour __rcu **np;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000921 unsigned int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000922 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
925
Eric Dumazete4c4e442009-07-30 03:15:07 +0000926 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000927 nht = rcu_dereference_protected(tbl->nht,
928 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 /*
931 * periodically recompute ReachableTime from random function
932 */
933
Eric Dumazete4c4e442009-07-30 03:15:07 +0000934 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 struct neigh_parms *p;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000936 tbl->last_rand = jiffies;
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +0100937 list_for_each_entry(p, &tbl->parms_list, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 p->reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100939 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
941
Duan Jiongfeff9ab2014-02-27 17:14:41 +0800942 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
943 goto out;
944
David S. Millercd089332011-07-11 01:28:12 -0700945 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000946 np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Eric Dumazet767e97e2010-10-06 17:49:21 -0700948 while ((n = rcu_dereference_protected(*np,
949 lockdep_is_held(&tbl->lock))) != NULL) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000950 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Eric Dumazete4c4e442009-07-30 03:15:07 +0000952 write_lock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Eric Dumazete4c4e442009-07-30 03:15:07 +0000954 state = n->nud_state;
Roopa Prabhu9ce33e42018-04-24 13:49:34 -0700955 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
956 (n->flags & NTF_EXT_LEARNED)) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000957 write_unlock(&n->lock);
958 goto next_elt;
959 }
960
961 if (time_before(n->used, n->confirmed))
962 n->used = n->confirmed;
963
Reshetova, Elena9f237432017-06-30 13:07:55 +0300964 if (refcount_read(&n->refcnt) == 1 &&
Eric Dumazete4c4e442009-07-30 03:15:07 +0000965 (state == NUD_FAILED ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100966 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000967 *np = n->next;
David Ahern58956312018-12-07 12:24:57 -0800968 neigh_mark_dead(n);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000969 write_unlock(&n->lock);
970 neigh_cleanup_and_release(n);
971 continue;
972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 write_unlock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975next_elt:
Eric Dumazete4c4e442009-07-30 03:15:07 +0000976 np = &n->next;
977 }
978 /*
979 * It's fine to release lock here, even if hash table
980 * grows while we are preempted.
981 */
982 write_unlock_bh(&tbl->lock);
983 cond_resched();
984 write_lock_bh(&tbl->lock);
Michel Machado84338a62012-02-21 16:04:13 -0500985 nht = rcu_dereference_protected(tbl->nht,
986 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 }
YOSHIFUJI Hideaki / 吉藤英明27246802013-01-22 05:20:05 +0000988out:
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100989 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
990 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
991 * BASE_REACHABLE_TIME.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 */
viresh kumarf6180022014-01-22 12:23:33 +0530993 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100994 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000995 write_unlock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997
998static __inline__ int neigh_max_probes(struct neighbour *n)
999{
1000 struct neigh_parms *p = n->parms;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09001001 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1002 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1003 NEIGH_VAR(p, MCAST_PROBES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
Timo Teras5ef12d92009-06-11 04:16:28 -07001006static void neigh_invalidate(struct neighbour *neigh)
Eric Dumazet0a141502010-03-09 19:40:54 +00001007 __releases(neigh->lock)
1008 __acquires(neigh->lock)
Timo Teras5ef12d92009-06-11 04:16:28 -07001009{
1010 struct sk_buff *skb;
1011
1012 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
Joe Perchesd5d427c2013-04-15 15:17:19 +00001013 neigh_dbg(2, "neigh %p is failed\n", neigh);
Timo Teras5ef12d92009-06-11 04:16:28 -07001014 neigh->updated = jiffies;
1015
1016 /* It is very thin place. report_unreachable is very complicated
1017 routine. Particularly, it can hit the same neighbour entry!
1018
1019 So that, we try to be accurate and avoid dead loop. --ANK
1020 */
1021 while (neigh->nud_state == NUD_FAILED &&
1022 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1023 write_unlock(&neigh->lock);
1024 neigh->ops->error_report(neigh, skb);
1025 write_lock(&neigh->lock);
1026 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -07001027 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001028 neigh->arp_queue_len_bytes = 0;
Timo Teras5ef12d92009-06-11 04:16:28 -07001029}
1030
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001031static void neigh_probe(struct neighbour *neigh)
1032 __releases(neigh->lock)
1033{
Hannes Frederic Sowa4ed377e2013-09-21 06:32:34 +02001034 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001035 /* keep skb alive even if arp_queue overflows */
1036 if (skb)
Martin Zhang19125c12015-11-17 20:49:30 +08001037 skb = skb_clone(skb, GFP_ATOMIC);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001038 write_unlock(&neigh->lock);
Eric Dumazet48481c82017-03-23 12:39:21 -07001039 if (neigh->ops->solicit)
1040 neigh->ops->solicit(neigh, skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001041 atomic_inc(&neigh->probes);
Yang Wei87fff3ca2019-01-17 23:11:30 +08001042 consume_skb(skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001043}
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045/* Called when a timer expires for a neighbour entry. */
1046
Kees Cooke99e88a2017-10-16 14:43:17 -07001047static void neigh_timer_handler(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048{
1049 unsigned long now, next;
Kees Cooke99e88a2017-10-16 14:43:17 -07001050 struct neighbour *neigh = from_timer(neigh, t, timer);
Eric Dumazet95c96172012-04-15 05:58:06 +00001051 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 int notify = 0;
1053
1054 write_lock(&neigh->lock);
1055
1056 state = neigh->nud_state;
1057 now = jiffies;
1058 next = now + HZ;
1059
David S. Miller045f7b32011-11-01 17:45:55 -04001060 if (!(state & NUD_IN_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 if (state & NUD_REACHABLE) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001064 if (time_before_eq(now,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 neigh->confirmed + neigh->parms->reachable_time)) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001066 neigh_dbg(2, "neigh %p is still alive\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 next = neigh->confirmed + neigh->parms->reachable_time;
1068 } else if (time_before_eq(now,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001069 neigh->used +
1070 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001071 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001073 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 neigh_suspect(neigh);
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001075 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001077 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 neigh->nud_state = NUD_STALE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001079 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 neigh_suspect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001081 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
1083 } else if (state & NUD_DELAY) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001084 if (time_before_eq(now,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001085 neigh->confirmed +
1086 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001087 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 neigh->nud_state = NUD_REACHABLE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001089 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 neigh_connect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001091 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 next = neigh->confirmed + neigh->parms->reachable_time;
1093 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001094 neigh_dbg(2, "neigh %p is probed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 neigh->nud_state = NUD_PROBE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001096 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 atomic_set(&neigh->probes, 0);
Erik Kline765c9c62015-05-18 19:44:41 +09001098 notify = 1;
Hangbin Liu19e16d22020-04-01 14:46:20 +08001099 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1100 HZ/100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 }
1102 } else {
1103 /* NUD_PROBE|NUD_INCOMPLETE */
Hangbin Liu19e16d22020-04-01 14:46:20 +08001104 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
1106
1107 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1108 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 neigh->nud_state = NUD_FAILED;
1110 notify = 1;
Timo Teras5ef12d92009-06-11 04:16:28 -07001111 neigh_invalidate(neigh);
Duan Jiong5e2c21d2014-02-27 17:03:03 +08001112 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
1114
1115 if (neigh->nud_state & NUD_IN_TIMER) {
Hangbin Liu96d10d52020-05-28 15:15:13 +08001116 if (time_before(next, jiffies + HZ/100))
1117 next = jiffies + HZ/100;
Herbert Xu6fb99742005-10-23 16:37:48 +10001118 if (!mod_timer(&neigh->timer, next))
1119 neigh_hold(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 }
1121 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001122 neigh_probe(neigh);
David S. Miller9ff56602008-02-17 18:39:54 -08001123 } else {
David S. Miller69cc64d2008-02-11 21:45:44 -08001124out:
David S. Miller9ff56602008-02-17 18:39:54 -08001125 write_unlock(&neigh->lock);
1126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Thomas Grafd961db32007-08-08 23:12:56 -07001128 if (notify)
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001129 neigh_update_notify(neigh, 0);
Thomas Grafd961db32007-08-08 23:12:56 -07001130
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001131 trace_neigh_timer_handler(neigh, 0);
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 neigh_release(neigh);
1134}
1135
Daniel Borkmann4a81f6d2022-02-01 20:39:42 +01001136int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1137 const bool immediate_ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138{
1139 int rc;
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001140 bool immediate_probe = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 write_lock_bh(&neigh->lock);
1143
1144 rc = 0;
1145 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1146 goto out_unlock_bh;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001147 if (neigh->dead)
1148 goto out_dead;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001151 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1152 NEIGH_VAR(neigh->parms, APP_PROBES)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001153 unsigned long next, now = jiffies;
1154
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001155 atomic_set(&neigh->probes,
1156 NEIGH_VAR(neigh->parms, UCAST_PROBES));
Lorenzo Bianconi071c3792019-07-14 23:36:11 +02001157 neigh_del_timer(neigh);
Daniel Borkmann4a81f6d2022-02-01 20:39:42 +01001158 neigh->nud_state = NUD_INCOMPLETE;
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001159 neigh->updated = now;
Daniel Borkmann4a81f6d2022-02-01 20:39:42 +01001160 if (!immediate_ok) {
1161 next = now + 1;
1162 } else {
1163 immediate_probe = true;
1164 next = now + max(NEIGH_VAR(neigh->parms,
1165 RETRANS_TIME),
1166 HZ / 100);
1167 }
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001168 neigh_add_timer(neigh, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 } else {
1170 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001171 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 write_unlock_bh(&neigh->lock);
1173
Wei Yongjunf3fbbe02009-02-25 00:37:32 +00001174 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 return 1;
1176 }
1177 } else if (neigh->nud_state & NUD_STALE) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001178 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Lorenzo Bianconi071c3792019-07-14 23:36:11 +02001179 neigh_del_timer(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001181 neigh->updated = jiffies;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001182 neigh_add_timer(neigh, jiffies +
1183 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
1185
1186 if (neigh->nud_state == NUD_INCOMPLETE) {
1187 if (skb) {
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001188 while (neigh->arp_queue_len_bytes + skb->truesize >
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001189 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 struct sk_buff *buff;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001191
David S. Millerf72051b2008-09-23 01:11:18 -07001192 buff = __skb_dequeue(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001193 if (!buff)
1194 break;
1195 neigh->arp_queue_len_bytes -= buff->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 kfree_skb(buff);
Neil Horman9a6d2762008-07-16 20:50:49 -07001197 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 }
Eric Dumazeta4731132010-05-27 16:09:39 -07001199 skb_dst_force(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 __skb_queue_tail(&neigh->arp_queue, skb);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001201 neigh->arp_queue_len_bytes += skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 }
1203 rc = 1;
1204 }
1205out_unlock_bh:
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001206 if (immediate_probe)
1207 neigh_probe(neigh);
1208 else
1209 write_unlock(&neigh->lock);
1210 local_bh_enable();
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001211 trace_neigh_event_send_done(neigh, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 return rc;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001213
1214out_dead:
1215 if (neigh->nud_state & NUD_STALE)
1216 goto out_unlock_bh;
1217 write_unlock_bh(&neigh->lock);
1218 kfree_skb(skb);
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001219 trace_neigh_event_send_dead(neigh, 1);
Julian Anastasov2c51a972015-06-16 22:56:39 +03001220 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001222EXPORT_SYMBOL(__neigh_event_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
David S. Millerf6b72b622011-07-14 07:53:20 -07001224static void neigh_update_hhs(struct neighbour *neigh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225{
1226 struct hh_cache *hh;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001227 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
Doug Kehn91a72a72010-07-14 18:02:16 -07001228 = NULL;
1229
1230 if (neigh->dev->header_ops)
1231 update = neigh->dev->header_ops->cache_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
1233 if (update) {
David S. Millerf6b72b622011-07-14 07:53:20 -07001234 hh = &neigh->hh;
Eric Dumazetc305c6ae2019-11-07 18:29:11 -08001235 if (READ_ONCE(hh->hh_len)) {
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001236 write_seqlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 update(hh, neigh->dev, neigh->ha);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001238 write_sequnlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 }
1240 }
1241}
1242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243/* Generic update routine.
1244 -- lladdr is new lladdr or NULL, if it is not supplied.
1245 -- new is new state.
1246 -- flags
1247 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1248 if it is different.
1249 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001250 lladdr instead of overriding it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 if it is different.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02001253 NEIGH_UPDATE_F_USE means that the entry is user triggered.
Daniel Borkmann7482e382021-10-11 14:12:38 +02001254 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001255 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 NTF_ROUTER flag.
1257 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1258 a router.
1259
1260 Caller MUST hold reference count on the entry.
1261 */
David Ahern7a35a502018-12-05 20:02:29 -08001262static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1263 u8 new, u32 flags, u32 nlmsg_pid,
1264 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265{
Daniel Borkmann7482e382021-10-11 14:12:38 +02001266 bool gc_update = false, managed_update = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 int update_isrouter = 0;
Daniel Borkmann7482e382021-10-11 14:12:38 +02001268 struct net_device *dev;
1269 int err, notify = 0;
1270 u8 old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001272 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 write_lock_bh(&neigh->lock);
1275
1276 dev = neigh->dev;
1277 old = neigh->nud_state;
1278 err = -EPERM;
1279
Chinmay Agarwaleb4e8fa2021-01-27 22:24:54 +05301280 if (neigh->dead) {
1281 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1282 new = old;
1283 goto out;
1284 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001285 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 (old & (NUD_NOARP | NUD_PERMANENT)))
1287 goto out;
1288
Daniel Borkmann7482e382021-10-11 14:12:38 +02001289 neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1290 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02001291 new = old & ~NUD_PERMANENT;
1292 neigh->nud_state = new;
1293 err = 0;
1294 goto out;
1295 }
Roopa Prabhu9ce33e42018-04-24 13:49:34 -07001296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 if (!(new & NUD_VALID)) {
1298 neigh_del_timer(neigh);
1299 if (old & NUD_CONNECTED)
1300 neigh_suspect(neigh);
David Ahern9c29a2f2018-12-11 18:57:21 -07001301 neigh->nud_state = new;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 notify = old & NUD_VALID;
Roopa Prabhud2fb4fb2018-10-20 18:09:31 -07001304 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
Timo Teras5ef12d92009-06-11 04:16:28 -07001305 (new & NUD_FAILED)) {
1306 neigh_invalidate(neigh);
1307 notify = 1;
1308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 goto out;
1310 }
1311
1312 /* Compare new lladdr with cached one */
1313 if (!dev->addr_len) {
1314 /* First case: device needs no address. */
1315 lladdr = neigh->ha;
1316 } else if (lladdr) {
1317 /* The second case: if something is already cached
1318 and a new address is proposed:
1319 - compare new & old
1320 - if they are different, check override flag
1321 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001322 if ((old & NUD_VALID) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 !memcmp(lladdr, neigh->ha, dev->addr_len))
1324 lladdr = neigh->ha;
1325 } else {
1326 /* No address is supplied; if we know something,
1327 use it, otherwise discard the request.
1328 */
1329 err = -EINVAL;
David Ahern7a35a502018-12-05 20:02:29 -08001330 if (!(old & NUD_VALID)) {
1331 NL_SET_ERR_MSG(extack, "No link layer address given");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 lladdr = neigh->ha;
1335 }
1336
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001337 /* Update confirmed timestamp for neighbour entry after we
1338 * received ARP packet even if it doesn't change IP to MAC binding.
1339 */
1340 if (new & NUD_CONNECTED)
1341 neigh->confirmed = jiffies;
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 /* If entry was valid and address is not changed,
1344 do not change entry state, if new one is STALE.
1345 */
1346 err = 0;
1347 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1348 if (old & NUD_VALID) {
1349 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1350 update_isrouter = 0;
1351 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1352 (old & NUD_CONNECTED)) {
1353 lladdr = neigh->ha;
1354 new = NUD_STALE;
1355 } else
1356 goto out;
1357 } else {
Julian Anastasov0e7bbcc2016-07-27 09:56:50 +03001358 if (lladdr == neigh->ha && new == NUD_STALE &&
1359 !(flags & NEIGH_UPDATE_F_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 new = old;
1361 }
1362 }
1363
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001364 /* Update timestamp only once we know we will make a change to the
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001365 * neighbour entry. Otherwise we risk to move the locktime window with
1366 * noop updates and ignore relevant ARP updates.
1367 */
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001368 if (new != old || lladdr != neigh->ha)
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001369 neigh->updated = jiffies;
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (new != old) {
1372 neigh_del_timer(neigh);
Erik Kline765c9c62015-05-18 19:44:41 +09001373 if (new & NUD_PROBE)
1374 atomic_set(&neigh->probes, 0);
Pavel Emelyanova43d8992007-12-20 15:49:05 -08001375 if (new & NUD_IN_TIMER)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001376 neigh_add_timer(neigh, (jiffies +
1377 ((new & NUD_REACHABLE) ?
David S. Miller667347f2005-09-27 12:07:44 -07001378 neigh->parms->reachable_time :
1379 0)));
David Ahern9c29a2f2018-12-11 18:57:21 -07001380 neigh->nud_state = new;
Bob Gilligan53385d22013-12-15 13:39:56 -08001381 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
1384 if (lladdr != neigh->ha) {
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001385 write_seqlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 memcpy(&neigh->ha, lladdr, dev->addr_len);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001387 write_sequnlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 neigh_update_hhs(neigh);
1389 if (!(new & NUD_CONNECTED))
1390 neigh->confirmed = jiffies -
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001391 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
1394 if (new == old)
1395 goto out;
1396 if (new & NUD_CONNECTED)
1397 neigh_connect(neigh);
1398 else
1399 neigh_suspect(neigh);
1400 if (!(old & NUD_VALID)) {
1401 struct sk_buff *skb;
1402
1403 /* Again: avoid dead loop if something went wrong */
1404
1405 while (neigh->nud_state & NUD_VALID &&
1406 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
David S. Miller69cce1d2011-07-17 23:09:49 -07001407 struct dst_entry *dst = skb_dst(skb);
1408 struct neighbour *n2, *n1 = neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 write_unlock_bh(&neigh->lock);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001410
1411 rcu_read_lock();
David S. Miller13a43d92012-07-02 22:15:37 -07001412
1413 /* Why not just use 'neigh' as-is? The problem is that
1414 * things such as shaper, eql, and sch_teql can end up
1415 * using alternative, different, neigh objects to output
1416 * the packet in the output path. So what we need to do
1417 * here is re-lookup the top-level neigh in the path so
1418 * we can reinject the packet there.
1419 */
1420 n2 = NULL;
Tong Zhud47ec7a2021-03-19 14:33:37 -04001421 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
David S. Miller13a43d92012-07-02 22:15:37 -07001422 n2 = dst_neigh_lookup_skb(dst, skb);
1423 if (n2)
1424 n1 = n2;
1425 }
David S. Miller8f40b162011-07-17 13:34:11 -07001426 n1->output(n1, skb);
David S. Miller13a43d92012-07-02 22:15:37 -07001427 if (n2)
1428 neigh_release(n2);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001429 rcu_read_unlock();
1430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 write_lock_bh(&neigh->lock);
1432 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -07001433 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001434 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 }
1436out:
Roopa Prabhufc6e8072018-09-22 21:26:20 -07001437 if (update_isrouter)
1438 neigh_update_is_router(neigh, flags, &notify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 write_unlock_bh(&neigh->lock);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001440 if (((new ^ old) & NUD_PERMANENT) || gc_update)
David Ahern9c29a2f2018-12-11 18:57:21 -07001441 neigh_update_gc_list(neigh);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001442 if (managed_update)
1443 neigh_update_managed_list(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001444 if (notify)
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001445 neigh_update_notify(neigh, nlmsg_pid);
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001446 trace_neigh_update_done(neigh, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return err;
1448}
David Ahern7a35a502018-12-05 20:02:29 -08001449
1450int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1451 u32 flags, u32 nlmsg_pid)
1452{
1453 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1454}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001455EXPORT_SYMBOL(neigh_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Jiri Benc7e980562013-12-11 13:48:20 +01001457/* Update the neigh to listen temporarily for probe responses, even if it is
1458 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1459 */
1460void __neigh_set_probe_once(struct neighbour *neigh)
1461{
Julian Anastasov2c51a972015-06-16 22:56:39 +03001462 if (neigh->dead)
1463 return;
Jiri Benc7e980562013-12-11 13:48:20 +01001464 neigh->updated = jiffies;
1465 if (!(neigh->nud_state & NUD_FAILED))
1466 return;
Duan Jiong2176d5d2014-05-09 13:16:48 +08001467 neigh->nud_state = NUD_INCOMPLETE;
1468 atomic_set(&neigh->probes, neigh_max_probes(neigh));
Jiri Benc7e980562013-12-11 13:48:20 +01001469 neigh_add_timer(neigh,
Hangbin Liu19e16d22020-04-01 14:46:20 +08001470 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1471 HZ/100));
Jiri Benc7e980562013-12-11 13:48:20 +01001472}
1473EXPORT_SYMBOL(__neigh_set_probe_once);
1474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1476 u8 *lladdr, void *saddr,
1477 struct net_device *dev)
1478{
1479 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1480 lladdr || !dev->addr_len);
1481 if (neigh)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001482 neigh_update(neigh, lladdr, NUD_STALE,
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001483 NEIGH_UPDATE_F_OVERRIDE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 return neigh;
1485}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001486EXPORT_SYMBOL(neigh_event_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Eric Dumazet34d101d2010-10-11 09:16:57 -07001488/* called with read_lock_bh(&n->lock); */
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001489static void neigh_hh_init(struct neighbour *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490{
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001491 struct net_device *dev = n->dev;
1492 __be16 prot = n->tbl->protocol;
David S. Millerf6b72b622011-07-14 07:53:20 -07001493 struct hh_cache *hh = &n->hh;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001494
1495 write_lock_bh(&n->lock);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001496
David S. Millerf6b72b622011-07-14 07:53:20 -07001497 /* Only one thread can come in here and initialize the
1498 * hh_cache entry.
1499 */
David S. Millerb23b5452011-07-16 17:45:02 -07001500 if (!hh->hh_len)
1501 dev->header_ops->cache(n, hh, prot);
David S. Millerf6b72b622011-07-14 07:53:20 -07001502
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001503 write_unlock_bh(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504}
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506/* Slow and careful. */
1507
David S. Miller8f40b162011-07-17 13:34:11 -07001508int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 int rc = 0;
1511
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if (!neigh_event_send(neigh, skb)) {
1513 int err;
1514 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001515 unsigned int seq;
Eric Dumazet34d101d2010-10-11 09:16:57 -07001516
Eric Dumazetc305c6ae2019-11-07 18:29:11 -08001517 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001518 neigh_hh_init(neigh);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001519
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001520 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001521 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001522 seq = read_seqbegin(&neigh->ha_lock);
1523 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1524 neigh->ha, NULL, skb->len);
1525 } while (read_seqretry(&neigh->ha_lock, seq));
Eric Dumazet34d101d2010-10-11 09:16:57 -07001526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001528 rc = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 else
1530 goto out_kfree_skb;
1531 }
1532out:
1533 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534out_kfree_skb:
1535 rc = -EINVAL;
1536 kfree_skb(skb);
1537 goto out;
1538}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001539EXPORT_SYMBOL(neigh_resolve_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541/* As fast as possible without hh cache */
1542
David S. Miller8f40b162011-07-17 13:34:11 -07001543int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001546 unsigned int seq;
David S. Miller8f40b162011-07-17 13:34:11 -07001547 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001549 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001550 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001551 seq = read_seqbegin(&neigh->ha_lock);
1552 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1553 neigh->ha, NULL, skb->len);
1554 } while (read_seqretry(&neigh->ha_lock, seq));
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001557 err = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 else {
1559 err = -EINVAL;
1560 kfree_skb(skb);
1561 }
1562 return err;
1563}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001564EXPORT_SYMBOL(neigh_connected_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
David S. Miller8f40b162011-07-17 13:34:11 -07001566int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1567{
1568 return dev_queue_xmit(skb);
1569}
1570EXPORT_SYMBOL(neigh_direct_output);
1571
Daniel Borkmann7482e382021-10-11 14:12:38 +02001572static void neigh_managed_work(struct work_struct *work)
1573{
1574 struct neigh_table *tbl = container_of(work, struct neigh_table,
1575 managed_work.work);
1576 struct neighbour *neigh;
1577
1578 write_lock_bh(&tbl->lock);
1579 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
Daniel Borkmann4a81f6d2022-02-01 20:39:42 +01001580 neigh_event_send_probe(neigh, NULL, false);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001581 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1582 NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
1583 write_unlock_bh(&tbl->lock);
1584}
1585
Kees Cooke99e88a2017-10-16 14:43:17 -07001586static void neigh_proxy_process(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587{
Kees Cooke99e88a2017-10-16 14:43:17 -07001588 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 long sched_next = 0;
1590 unsigned long now = jiffies;
David S. Millerf72051b2008-09-23 01:11:18 -07001591 struct sk_buff *skb, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
1593 spin_lock(&tbl->proxy_queue.lock);
1594
David S. Millerf72051b2008-09-23 01:11:18 -07001595 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1596 long tdif = NEIGH_CB(skb)->sched_next - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 if (tdif <= 0) {
David S. Millerf72051b2008-09-23 01:11:18 -07001599 struct net_device *dev = skb->dev;
Eric Dumazet20e60742011-08-22 19:32:42 +00001600
David S. Millerf72051b2008-09-23 01:11:18 -07001601 __skb_unlink(skb, &tbl->proxy_queue);
Eric Dumazet20e60742011-08-22 19:32:42 +00001602 if (tbl->proxy_redo && netif_running(dev)) {
1603 rcu_read_lock();
David S. Millerf72051b2008-09-23 01:11:18 -07001604 tbl->proxy_redo(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001605 rcu_read_unlock();
1606 } else {
David S. Millerf72051b2008-09-23 01:11:18 -07001607 kfree_skb(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610 dev_put(dev);
1611 } else if (!sched_next || tdif < sched_next)
1612 sched_next = tdif;
1613 }
1614 del_timer(&tbl->proxy_timer);
1615 if (sched_next)
1616 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1617 spin_unlock(&tbl->proxy_queue.lock);
1618}
1619
1620void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1621 struct sk_buff *skb)
1622{
weichenchena533b702020-12-25 13:44:45 +08001623 unsigned long sched_next = jiffies +
1624 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001626 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 kfree_skb(skb);
1628 return;
1629 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001630
1631 NEIGH_CB(skb)->sched_next = sched_next;
1632 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 spin_lock(&tbl->proxy_queue.lock);
1635 if (del_timer(&tbl->proxy_timer)) {
1636 if (time_before(tbl->proxy_timer.expires, sched_next))
1637 sched_next = tbl->proxy_timer.expires;
1638 }
Eric Dumazetadf30902009-06-02 05:19:30 +00001639 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 dev_hold(skb->dev);
1641 __skb_queue_tail(&tbl->proxy_queue, skb);
1642 mod_timer(&tbl->proxy_timer, sched_next);
1643 spin_unlock(&tbl->proxy_queue.lock);
1644}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001645EXPORT_SYMBOL(pneigh_enqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07001647static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
Eric W. Biederman426b5302008-01-24 00:13:18 -08001648 struct net *net, int ifindex)
1649{
1650 struct neigh_parms *p;
1651
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001652 list_for_each_entry(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001653 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
Gao feng170d6f92013-06-20 10:01:33 +08001654 (!p->dev && !ifindex && net_eq(net, &init_net)))
Eric W. Biederman426b5302008-01-24 00:13:18 -08001655 return p;
1656 }
1657
1658 return NULL;
1659}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1662 struct neigh_table *tbl)
1663{
Gao fengcf89d6b2013-06-20 10:01:32 +08001664 struct neigh_parms *p;
Stephen Hemminger00829822008-11-20 20:14:53 -08001665 struct net *net = dev_net(dev);
1666 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
Gao fengcf89d6b2013-06-20 10:01:32 +08001668 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 p->tbl = tbl;
Reshetova, Elena63439442017-06-30 13:07:56 +03001671 refcount_set(&p->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 p->reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001673 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Eric Dumazet08d62252021-12-04 20:22:09 -08001674 dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
Denis V. Lunev486b51d2008-01-14 22:59:59 -08001675 p->dev = dev;
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -05001676 write_pnet(&p->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 p->sysctl_table = NULL;
Veaceslav Falico63134802013-08-02 19:07:38 +02001678
1679 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
Eric Dumazet08d62252021-12-04 20:22:09 -08001680 dev_put_track(dev, &p->dev_tracker);
Veaceslav Falico63134802013-08-02 19:07:38 +02001681 kfree(p);
1682 return NULL;
1683 }
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001686 list_add(&p->list, &tbl->parms.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 write_unlock_bh(&tbl->lock);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01001688
1689 neigh_parms_data_state_cleanall(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 }
1691 return p;
1692}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001693EXPORT_SYMBOL(neigh_parms_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
1695static void neigh_rcu_free_parms(struct rcu_head *head)
1696{
1697 struct neigh_parms *parms =
1698 container_of(head, struct neigh_parms, rcu_head);
1699
1700 neigh_parms_put(parms);
1701}
1702
1703void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1704{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 if (!parms || parms == &tbl->parms)
1706 return;
1707 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001708 list_del(&parms->list);
1709 parms->dead = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 write_unlock_bh(&tbl->lock);
Eric Dumazet08d62252021-12-04 20:22:09 -08001711 dev_put_track(parms->dev, &parms->dev_tracker);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001712 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001714EXPORT_SYMBOL(neigh_parms_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
Denis V. Lunev06f05112008-01-24 00:30:58 -08001716static void neigh_parms_destroy(struct neigh_parms *parms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717{
1718 kfree(parms);
1719}
1720
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001721static struct lock_class_key neigh_table_proxy_queue_class;
1722
WANG Congd7480fd2014-11-10 15:59:36 -08001723static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1724
1725void neigh_table_init(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
1727 unsigned long now = jiffies;
1728 unsigned long phsize;
1729
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001730 INIT_LIST_HEAD(&tbl->parms_list);
David Ahern58956312018-12-07 12:24:57 -08001731 INIT_LIST_HEAD(&tbl->gc_list);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001732 INIT_LIST_HEAD(&tbl->managed_list);
1733
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001734 list_add(&tbl->parms.list, &tbl->parms_list);
Eric Dumazete42ea982008-11-12 00:54:54 -08001735 write_pnet(&tbl->parms.net, &init_net);
Reshetova, Elena63439442017-06-30 13:07:56 +03001736 refcount_set(&tbl->parms.refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 tbl->parms.reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001738 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 tbl->stats = alloc_percpu(struct neigh_statistics);
1741 if (!tbl->stats)
1742 panic("cannot create neighbour cache statistics");
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744#ifdef CONFIG_PROC_FS
Christoph Hellwig71a50532018-04-15 10:16:41 +02001745 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1746 &neigh_stat_seq_ops, tbl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 panic("cannot create neighbour proc dir entry");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748#endif
1749
David S. Millercd089332011-07-11 01:28:12 -07001750 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
Andrew Morton77d04bd2006-04-07 14:52:59 -07001753 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001755 if (!tbl->nht || !tbl->phash_buckets)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 panic("cannot allocate neighbour cache hashes");
1757
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +00001758 if (!tbl->entry_size)
1759 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1760 tbl->key_len, NEIGH_PRIV_ALIGN);
1761 else
1762 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 rwlock_init(&tbl->lock);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001765
Tejun Heo203b42f2012-08-21 13:18:23 -07001766 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
viresh kumarf6180022014-01-22 12:23:33 +05301767 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1768 tbl->parms.reachable_time);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001769 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1770 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1771
Kees Cooke99e88a2017-10-16 14:43:17 -07001772 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001773 skb_queue_head_init_class(&tbl->proxy_queue,
1774 &neigh_table_proxy_queue_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776 tbl->last_flush = now;
1777 tbl->last_rand = now + tbl->parms.reachable_time * 20;
Simon Kelleybd89efc2006-05-12 14:56:08 -07001778
WANG Congd7480fd2014-11-10 15:59:36 -08001779 neigh_tables[index] = tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001781EXPORT_SYMBOL(neigh_table_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
WANG Congd7480fd2014-11-10 15:59:36 -08001783int neigh_table_clear(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
WANG Congd7480fd2014-11-10 15:59:36 -08001785 neigh_tables[index] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 /* It is not clean... Fix it to unload IPv6 module safely */
Daniel Borkmann4177d5b2021-11-22 16:01:51 +01001787 cancel_delayed_work_sync(&tbl->managed_work);
Tejun Heoa5c30b32010-10-19 06:04:42 +00001788 cancel_delayed_work_sync(&tbl->gc_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 del_timer_sync(&tbl->proxy_timer);
1790 pneigh_queue_purge(&tbl->proxy_queue);
1791 neigh_ifdown(tbl, NULL);
1792 if (atomic_read(&tbl->entries))
Joe Perchese005d192012-05-16 19:58:40 +00001793 pr_crit("neighbour leakage\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
Eric Dumazet6193d2b2011-01-19 22:02:47 +00001795 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1796 neigh_hash_free_rcu);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001797 tbl->nht = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 kfree(tbl->phash_buckets);
1800 tbl->phash_buckets = NULL;
1801
Alexey Dobriyan3f192b52007-11-05 21:28:13 -08001802 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1803
Kirill Korotaev3fcde742006-09-01 01:34:10 -07001804 free_percpu(tbl->stats);
1805 tbl->stats = NULL;
1806
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 return 0;
1808}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001809EXPORT_SYMBOL(neigh_table_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
WANG Congd7480fd2014-11-10 15:59:36 -08001811static struct neigh_table *neigh_find_table(int family)
1812{
1813 struct neigh_table *tbl = NULL;
1814
1815 switch (family) {
1816 case AF_INET:
1817 tbl = neigh_tables[NEIGH_ARP_TABLE];
1818 break;
1819 case AF_INET6:
1820 tbl = neigh_tables[NEIGH_ND_TABLE];
1821 break;
1822 case AF_DECnet:
1823 tbl = neigh_tables[NEIGH_DN_TABLE];
1824 break;
1825 }
1826
1827 return tbl;
1828}
1829
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001830const struct nla_policy nda_policy[NDA_MAX+1] = {
Roopa Prabhu1274e1c2020-05-21 22:26:14 -07001831 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001832 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1833 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1834 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1835 [NDA_PROBES] = { .type = NLA_U32 },
1836 [NDA_VLAN] = { .type = NLA_U16 },
1837 [NDA_PORT] = { .type = NLA_U16 },
1838 [NDA_VNI] = { .type = NLA_U32 },
1839 [NDA_IFINDEX] = { .type = NLA_U32 },
1840 [NDA_MASTER] = { .type = NLA_U32 },
David Aherna9cd3432018-12-19 20:02:36 -08001841 [NDA_PROTOCOL] = { .type = NLA_U8 },
Roopa Prabhu1274e1c2020-05-21 22:26:14 -07001842 [NDA_NH_ID] = { .type = NLA_U32 },
Daniel Borkmannc8e80c12021-10-13 15:21:39 +02001843 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
Nikolay Aleksandrov899426b2020-06-23 23:47:16 +03001844 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001845};
1846
David Ahernc21ef3e2017-04-16 09:48:24 -07001847static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1848 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001850 struct net *net = sock_net(skb->sk);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001851 struct ndmsg *ndm;
1852 struct nlattr *dst_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 struct neigh_table *tbl;
WANG Congd7480fd2014-11-10 15:59:36 -08001854 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 struct net_device *dev = NULL;
Thomas Grafa14a49d2006-08-07 17:53:08 -07001856 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Eric Dumazet110b2492010-10-04 04:27:36 +00001858 ASSERT_RTNL();
Thomas Grafa14a49d2006-08-07 17:53:08 -07001859 if (nlmsg_len(nlh) < sizeof(*ndm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 goto out;
1861
Thomas Grafa14a49d2006-08-07 17:53:08 -07001862 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
David Ahern7a35a502018-12-05 20:02:29 -08001863 if (!dst_attr) {
1864 NL_SET_ERR_MSG(extack, "Network address not specified");
Thomas Grafa14a49d2006-08-07 17:53:08 -07001865 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001866 }
Thomas Grafa14a49d2006-08-07 17:53:08 -07001867
1868 ndm = nlmsg_data(nlh);
1869 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001870 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001871 if (dev == NULL) {
1872 err = -ENODEV;
1873 goto out;
1874 }
1875 }
1876
WANG Congd7480fd2014-11-10 15:59:36 -08001877 tbl = neigh_find_table(ndm->ndm_family);
1878 if (tbl == NULL)
1879 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
David Ahern7a35a502018-12-05 20:02:29 -08001881 if (nla_len(dst_attr) < (int)tbl->key_len) {
1882 NL_SET_ERR_MSG(extack, "Invalid network address");
WANG Congd7480fd2014-11-10 15:59:36 -08001883 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
WANG Congd7480fd2014-11-10 15:59:36 -08001886 if (ndm->ndm_flags & NTF_PROXY) {
1887 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
Eric Dumazet110b2492010-10-04 04:27:36 +00001888 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 }
WANG Congd7480fd2014-11-10 15:59:36 -08001890
1891 if (dev == NULL)
1892 goto out;
1893
1894 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1895 if (neigh == NULL) {
1896 err = -ENOENT;
1897 goto out;
1898 }
1899
David Ahern7a35a502018-12-05 20:02:29 -08001900 err = __neigh_update(neigh, NULL, NUD_FAILED,
1901 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1902 NETLINK_CB(skb).portid, extack);
Sowmini Varadhan50710342017-06-02 09:01:49 -07001903 write_lock_bh(&tbl->lock);
WANG Congd7480fd2014-11-10 15:59:36 -08001904 neigh_release(neigh);
Sowmini Varadhan50710342017-06-02 09:01:49 -07001905 neigh_remove_one(neigh, tbl);
1906 write_unlock_bh(&tbl->lock);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908out:
1909 return err;
1910}
1911
David Ahernc21ef3e2017-04-16 09:48:24 -07001912static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1913 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07001915 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001916 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001917 struct net *net = sock_net(skb->sk);
Thomas Graf5208deb2006-08-07 17:55:40 -07001918 struct ndmsg *ndm;
1919 struct nlattr *tb[NDA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 struct neigh_table *tbl;
1921 struct net_device *dev = NULL;
WANG Congd7480fd2014-11-10 15:59:36 -08001922 struct neighbour *neigh;
1923 void *dst, *lladdr;
David Aherndf9b0e32018-12-15 14:09:06 -08001924 u8 protocol = 0;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001925 u32 ndm_flags;
Thomas Graf5208deb2006-08-07 17:55:40 -07001926 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
Eric Dumazet110b2492010-10-04 04:27:36 +00001928 ASSERT_RTNL();
Johannes Berg8cb08172019-04-26 14:07:28 +02001929 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1930 nda_policy, extack);
Thomas Graf5208deb2006-08-07 17:55:40 -07001931 if (err < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 goto out;
1933
Thomas Graf5208deb2006-08-07 17:55:40 -07001934 err = -EINVAL;
David Ahern7a35a502018-12-05 20:02:29 -08001935 if (!tb[NDA_DST]) {
1936 NL_SET_ERR_MSG(extack, "Network address not specified");
Thomas Graf5208deb2006-08-07 17:55:40 -07001937 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001938 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001939
1940 ndm = nlmsg_data(nlh);
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001941 ndm_flags = ndm->ndm_flags;
1942 if (tb[NDA_FLAGS_EXT]) {
1943 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1944
Daniel Borkmann507c2f12021-10-13 15:21:38 +02001945 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1946 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1947 hweight32(NTF_EXT_MASK)));
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001948 ndm_flags |= (ext << NTF_EXT_SHIFT);
1949 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001950 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001951 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Graf5208deb2006-08-07 17:55:40 -07001952 if (dev == NULL) {
1953 err = -ENODEV;
1954 goto out;
1955 }
1956
David Ahern7a35a502018-12-05 20:02:29 -08001957 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1958 NL_SET_ERR_MSG(extack, "Invalid link address");
Eric Dumazet110b2492010-10-04 04:27:36 +00001959 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001960 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001961 }
1962
WANG Congd7480fd2014-11-10 15:59:36 -08001963 tbl = neigh_find_table(ndm->ndm_family);
1964 if (tbl == NULL)
1965 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
David Ahern7a35a502018-12-05 20:02:29 -08001967 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1968 NL_SET_ERR_MSG(extack, "Invalid network address");
WANG Congd7480fd2014-11-10 15:59:36 -08001969 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001970 }
1971
WANG Congd7480fd2014-11-10 15:59:36 -08001972 dst = nla_data(tb[NDA_DST]);
1973 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
David Aherna9cd3432018-12-19 20:02:36 -08001975 if (tb[NDA_PROTOCOL])
David Aherndf9b0e32018-12-15 14:09:06 -08001976 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001977 if (ndm_flags & NTF_PROXY) {
WANG Congd7480fd2014-11-10 15:59:36 -08001978 struct pneigh_entry *pn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Daniel Borkmann7482e382021-10-11 14:12:38 +02001980 if (ndm_flags & NTF_MANAGED) {
1981 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1982 goto out;
1983 }
1984
WANG Congd7480fd2014-11-10 15:59:36 -08001985 err = -ENOBUFS;
1986 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1987 if (pn) {
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001988 pn->flags = ndm_flags;
David Aherndf9b0e32018-12-15 14:09:06 -08001989 if (protocol)
1990 pn->protocol = protocol;
Eric Biederman0c5c2d32009-03-04 00:03:08 -08001991 err = 0;
WANG Congd7480fd2014-11-10 15:59:36 -08001992 }
Eric Dumazet110b2492010-10-04 04:27:36 +00001993 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
1995
David Ahern7a35a502018-12-05 20:02:29 -08001996 if (!dev) {
1997 NL_SET_ERR_MSG(extack, "Device not specified");
WANG Congd7480fd2014-11-10 15:59:36 -08001998 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001999 }
WANG Congd7480fd2014-11-10 15:59:36 -08002000
David Ahernb8fb1ab2019-04-16 17:31:43 -07002001 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2002 err = -EINVAL;
2003 goto out;
2004 }
2005
WANG Congd7480fd2014-11-10 15:59:36 -08002006 neigh = neigh_lookup(tbl, dst, dev);
2007 if (neigh == NULL) {
Daniel Borkmann30fc7ef2021-10-13 15:21:40 +02002008 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2009 bool exempt_from_gc = ndm_permanent ||
2010 ndm_flags & NTF_EXT_LEARNED;
David Aherne997f8a2018-12-11 18:57:25 -07002011
WANG Congd7480fd2014-11-10 15:59:36 -08002012 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2013 err = -ENOENT;
2014 goto out;
2015 }
Daniel Borkmann30fc7ef2021-10-13 15:21:40 +02002016 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2017 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2018 err = -EINVAL;
2019 goto out;
2020 }
WANG Congd7480fd2014-11-10 15:59:36 -08002021
Daniel Borkmanne4400bb2021-10-11 14:12:35 +02002022 neigh = ___neigh_create(tbl, dst, dev,
Daniel Borkmann7482e382021-10-11 14:12:38 +02002023 ndm_flags &
2024 (NTF_EXT_LEARNED | NTF_MANAGED),
Daniel Borkmanne4400bb2021-10-11 14:12:35 +02002025 exempt_from_gc, true);
WANG Congd7480fd2014-11-10 15:59:36 -08002026 if (IS_ERR(neigh)) {
2027 err = PTR_ERR(neigh);
2028 goto out;
2029 }
2030 } else {
2031 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2032 err = -EEXIST;
2033 neigh_release(neigh);
2034 goto out;
2035 }
2036
2037 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07002038 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2039 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
WANG Congd7480fd2014-11-10 15:59:36 -08002040 }
2041
Roman Mashak38212bb2020-05-01 21:34:18 -04002042 if (protocol)
2043 neigh->protocol = protocol;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002044 if (ndm_flags & NTF_EXT_LEARNED)
Roopa Prabhu9ce33e42018-04-24 13:49:34 -07002045 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002046 if (ndm_flags & NTF_ROUTER)
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07002047 flags |= NEIGH_UPDATE_F_ISROUTER;
Daniel Borkmann7482e382021-10-11 14:12:38 +02002048 if (ndm_flags & NTF_MANAGED)
2049 flags |= NEIGH_UPDATE_F_MANAGED;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002050 if (ndm_flags & NTF_USE)
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02002051 flags |= NEIGH_UPDATE_F_USE;
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07002052
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02002053 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2054 NETLINK_CB(skb).portid, extack);
Daniel Borkmann7482e382021-10-11 14:12:38 +02002055 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
WANG Congd7480fd2014-11-10 15:59:36 -08002056 neigh_event_send(neigh, NULL);
2057 err = 0;
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02002058 }
WANG Congd7480fd2014-11-10 15:59:36 -08002059 neigh_release(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060out:
2061 return err;
2062}
2063
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002064static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2065{
Thomas Grafca860fb2006-08-07 18:00:18 -07002066 struct nlattr *nest;
2067
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002068 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
Thomas Grafca860fb2006-08-07 18:00:18 -07002069 if (nest == NULL)
2070 return -ENOBUFS;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002071
David S. Miller9a6308d2012-04-01 20:06:28 -04002072 if ((parms->dev &&
2073 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
Reshetova, Elena63439442017-06-30 13:07:56 +03002074 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002075 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2076 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002077 /* approximative value for deprecated QUEUE_LEN (in packets) */
2078 nla_put_u32(skb, NDTPA_QUEUE_LEN,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002079 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2080 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2081 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2082 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2083 NEIGH_VAR(parms, UCAST_PROBES)) ||
2084 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2085 NEIGH_VAR(parms, MCAST_PROBES)) ||
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002086 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2087 NEIGH_VAR(parms, MCAST_REPROBES)) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002088 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2089 NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002090 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002091 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002092 nla_put_msecs(skb, NDTPA_GC_STALETIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002093 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002094 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002095 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002096 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002097 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002098 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002099 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002100 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002101 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002102 nla_put_msecs(skb, NDTPA_LOCKTIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002103 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04002104 goto nla_put_failure;
Thomas Grafca860fb2006-08-07 18:00:18 -07002105 return nla_nest_end(skb, nest);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002106
Thomas Grafca860fb2006-08-07 18:00:18 -07002107nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -07002108 nla_nest_cancel(skb, nest);
2109 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002110}
2111
Thomas Grafca860fb2006-08-07 18:00:18 -07002112static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2113 u32 pid, u32 seq, int type, int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002114{
2115 struct nlmsghdr *nlh;
2116 struct ndtmsg *ndtmsg;
2117
Thomas Grafca860fb2006-08-07 18:00:18 -07002118 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2119 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002120 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002121
Thomas Grafca860fb2006-08-07 18:00:18 -07002122 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002123
2124 read_lock_bh(&tbl->lock);
2125 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002126 ndtmsg->ndtm_pad1 = 0;
2127 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002128
David S. Miller9a6308d2012-04-01 20:06:28 -04002129 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002130 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002131 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2132 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2133 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2134 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002135 {
2136 unsigned long now = jiffies;
Eric Dumazet9d027e32019-11-05 14:11:49 -08002137 long flush_delta = now - tbl->last_flush;
2138 long rand_delta = now - tbl->last_rand;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002139 struct neigh_hash_table *nht;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002140 struct ndt_config ndc = {
2141 .ndtc_key_len = tbl->key_len,
2142 .ndtc_entry_size = tbl->entry_size,
2143 .ndtc_entries = atomic_read(&tbl->entries),
2144 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2145 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002146 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2147 };
2148
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002149 rcu_read_lock_bh();
2150 nht = rcu_dereference_bh(tbl->nht);
David S. Miller2c2aba62011-12-28 15:06:58 -05002151 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
David S. Millercd089332011-07-11 01:28:12 -07002152 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002153 rcu_read_unlock_bh();
2154
David S. Miller9a6308d2012-04-01 20:06:28 -04002155 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2156 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002157 }
2158
2159 {
2160 int cpu;
2161 struct ndt_stats ndst;
2162
2163 memset(&ndst, 0, sizeof(ndst));
2164
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07002165 for_each_possible_cpu(cpu) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002166 struct neigh_statistics *st;
2167
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002168 st = per_cpu_ptr(tbl->stats, cpu);
2169 ndst.ndts_allocs += st->allocs;
2170 ndst.ndts_destroys += st->destroys;
2171 ndst.ndts_hash_grows += st->hash_grows;
2172 ndst.ndts_res_failed += st->res_failed;
2173 ndst.ndts_lookups += st->lookups;
2174 ndst.ndts_hits += st->hits;
2175 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2176 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2177 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2178 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
Rick Jonesfb811392015-08-07 11:10:37 -07002179 ndst.ndts_table_fulls += st->table_fulls;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002180 }
2181
Nicolas Dichtelb6763382016-04-26 10:06:17 +02002182 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2183 NDTA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04002184 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002185 }
2186
2187 BUG_ON(tbl->parms.dev);
2188 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
Thomas Grafca860fb2006-08-07 18:00:18 -07002189 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002190
2191 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01002192 nlmsg_end(skb, nlh);
2193 return 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002194
Thomas Grafca860fb2006-08-07 18:00:18 -07002195nla_put_failure:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002196 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08002197 nlmsg_cancel(skb, nlh);
2198 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002199}
2200
Thomas Grafca860fb2006-08-07 18:00:18 -07002201static int neightbl_fill_param_info(struct sk_buff *skb,
2202 struct neigh_table *tbl,
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002203 struct neigh_parms *parms,
Thomas Grafca860fb2006-08-07 18:00:18 -07002204 u32 pid, u32 seq, int type,
2205 unsigned int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002206{
2207 struct ndtmsg *ndtmsg;
2208 struct nlmsghdr *nlh;
2209
Thomas Grafca860fb2006-08-07 18:00:18 -07002210 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2211 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002212 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002213
Thomas Grafca860fb2006-08-07 18:00:18 -07002214 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002215
2216 read_lock_bh(&tbl->lock);
2217 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002218 ndtmsg->ndtm_pad1 = 0;
2219 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002220
Thomas Grafca860fb2006-08-07 18:00:18 -07002221 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2222 neightbl_fill_parms(skb, parms) < 0)
2223 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002224
2225 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01002226 nlmsg_end(skb, nlh);
2227 return 0;
Thomas Grafca860fb2006-08-07 18:00:18 -07002228errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002229 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08002230 nlmsg_cancel(skb, nlh);
2231 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002232}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002233
Patrick McHardyef7c79e2007-06-05 12:38:30 -07002234static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07002235 [NDTA_NAME] = { .type = NLA_STRING },
2236 [NDTA_THRESH1] = { .type = NLA_U32 },
2237 [NDTA_THRESH2] = { .type = NLA_U32 },
2238 [NDTA_THRESH3] = { .type = NLA_U32 },
2239 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2240 [NDTA_PARMS] = { .type = NLA_NESTED },
2241};
2242
Patrick McHardyef7c79e2007-06-05 12:38:30 -07002243static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07002244 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2245 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2246 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2247 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2248 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2249 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002250 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
Thomas Graf6b3f8672006-08-07 17:58:53 -07002251 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2252 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2253 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2254 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2255 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2256 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2257 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2258};
2259
David Ahernc21ef3e2017-04-16 09:48:24 -07002260static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2261 struct netlink_ext_ack *extack)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002262{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002263 struct net *net = sock_net(skb->sk);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002264 struct neigh_table *tbl;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002265 struct ndtmsg *ndtmsg;
2266 struct nlattr *tb[NDTA_MAX+1];
WANG Congd7480fd2014-11-10 15:59:36 -08002267 bool found = false;
2268 int err, tidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002269
Johannes Berg8cb08172019-04-26 14:07:28 +02002270 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2271 nl_neightbl_policy, extack);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002272 if (err < 0)
2273 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002274
Thomas Graf6b3f8672006-08-07 17:58:53 -07002275 if (tb[NDTA_NAME] == NULL) {
2276 err = -EINVAL;
2277 goto errout;
2278 }
2279
2280 ndtmsg = nlmsg_data(nlh);
WANG Congd7480fd2014-11-10 15:59:36 -08002281
2282 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2283 tbl = neigh_tables[tidx];
2284 if (!tbl)
2285 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002286 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2287 continue;
WANG Congd7480fd2014-11-10 15:59:36 -08002288 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2289 found = true;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002290 break;
WANG Congd7480fd2014-11-10 15:59:36 -08002291 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002292 }
2293
WANG Congd7480fd2014-11-10 15:59:36 -08002294 if (!found)
2295 return -ENOENT;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002296
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002297 /*
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002298 * We acquire tbl->lock to be nice to the periodic timers and
2299 * make sure they always see a consistent set of values.
2300 */
2301 write_lock_bh(&tbl->lock);
2302
Thomas Graf6b3f8672006-08-07 17:58:53 -07002303 if (tb[NDTA_PARMS]) {
2304 struct nlattr *tbp[NDTPA_MAX+1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002305 struct neigh_parms *p;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002306 int i, ifindex = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002307
Johannes Berg8cb08172019-04-26 14:07:28 +02002308 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2309 tb[NDTA_PARMS],
2310 nl_ntbl_parm_policy, extack);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002311 if (err < 0)
2312 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002313
Thomas Graf6b3f8672006-08-07 17:58:53 -07002314 if (tbp[NDTPA_IFINDEX])
2315 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002316
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07002317 p = lookup_neigh_parms(tbl, net, ifindex);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002318 if (p == NULL) {
2319 err = -ENOENT;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002320 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002321 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002322
Thomas Graf6b3f8672006-08-07 17:58:53 -07002323 for (i = 1; i <= NDTPA_MAX; i++) {
2324 if (tbp[i] == NULL)
2325 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002326
Thomas Graf6b3f8672006-08-07 17:58:53 -07002327 switch (i) {
2328 case NDTPA_QUEUE_LEN:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002329 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2330 nla_get_u32(tbp[i]) *
2331 SKB_TRUESIZE(ETH_FRAME_LEN));
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002332 break;
2333 case NDTPA_QUEUE_LENBYTES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002334 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2335 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002336 break;
2337 case NDTPA_PROXY_QLEN:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002338 NEIGH_VAR_SET(p, PROXY_QLEN,
2339 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002340 break;
2341 case NDTPA_APP_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002342 NEIGH_VAR_SET(p, APP_PROBES,
2343 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002344 break;
2345 case NDTPA_UCAST_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002346 NEIGH_VAR_SET(p, UCAST_PROBES,
2347 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002348 break;
2349 case NDTPA_MCAST_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002350 NEIGH_VAR_SET(p, MCAST_PROBES,
2351 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002352 break;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002353 case NDTPA_MCAST_REPROBES:
2354 NEIGH_VAR_SET(p, MCAST_REPROBES,
2355 nla_get_u32(tbp[i]));
2356 break;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002357 case NDTPA_BASE_REACHABLE_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002358 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2359 nla_get_msecs(tbp[i]));
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01002360 /* update reachable_time as well, otherwise, the change will
2361 * only be effective after the next time neigh_periodic_work
2362 * decides to recompute it (can be multiple minutes)
2363 */
2364 p->reachable_time =
2365 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002366 break;
2367 case NDTPA_GC_STALETIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002368 NEIGH_VAR_SET(p, GC_STALETIME,
2369 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002370 break;
2371 case NDTPA_DELAY_PROBE_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002372 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2373 nla_get_msecs(tbp[i]));
Ido Schimmel2a4501a2016-07-05 11:27:42 +02002374 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002375 break;
2376 case NDTPA_RETRANS_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002377 NEIGH_VAR_SET(p, RETRANS_TIME,
2378 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002379 break;
2380 case NDTPA_ANYCAST_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002381 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2382 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002383 break;
2384 case NDTPA_PROXY_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002385 NEIGH_VAR_SET(p, PROXY_DELAY,
2386 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002387 break;
2388 case NDTPA_LOCKTIME:
Jiri Pirko39774582014-01-14 15:46:07 +01002389 NEIGH_VAR_SET(p, LOCKTIME,
2390 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002391 break;
2392 }
2393 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002394 }
2395
Gao fengdc25c672013-06-20 10:01:34 +08002396 err = -ENOENT;
2397 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2398 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2399 !net_eq(net, &init_net))
2400 goto errout_tbl_lock;
2401
Thomas Graf6b3f8672006-08-07 17:58:53 -07002402 if (tb[NDTA_THRESH1])
2403 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2404
2405 if (tb[NDTA_THRESH2])
2406 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2407
2408 if (tb[NDTA_THRESH3])
2409 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2410
2411 if (tb[NDTA_GC_INTERVAL])
2412 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2413
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002414 err = 0;
2415
Thomas Graf6b3f8672006-08-07 17:58:53 -07002416errout_tbl_lock:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002417 write_unlock_bh(&tbl->lock);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002418errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002419 return err;
2420}
2421
David Ahern9632d472018-10-07 20:16:37 -07002422static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2423 struct netlink_ext_ack *extack)
2424{
2425 struct ndtmsg *ndtm;
2426
2427 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2428 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2429 return -EINVAL;
2430 }
2431
2432 ndtm = nlmsg_data(nlh);
2433 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2434 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2435 return -EINVAL;
2436 }
2437
2438 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2439 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2440 return -EINVAL;
2441 }
2442
2443 return 0;
2444}
2445
Thomas Grafc8822a42007-03-22 11:50:06 -07002446static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002447{
David Ahern9632d472018-10-07 20:16:37 -07002448 const struct nlmsghdr *nlh = cb->nlh;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002449 struct net *net = sock_net(skb->sk);
Thomas Grafca860fb2006-08-07 18:00:18 -07002450 int family, tidx, nidx = 0;
2451 int tbl_skip = cb->args[0];
2452 int neigh_skip = cb->args[1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002453 struct neigh_table *tbl;
2454
David Ahern9632d472018-10-07 20:16:37 -07002455 if (cb->strict_check) {
2456 int err = neightbl_valid_dump_info(nlh, cb->extack);
2457
2458 if (err < 0)
2459 return err;
2460 }
2461
2462 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002463
WANG Congd7480fd2014-11-10 15:59:36 -08002464 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002465 struct neigh_parms *p;
2466
WANG Congd7480fd2014-11-10 15:59:36 -08002467 tbl = neigh_tables[tidx];
2468 if (!tbl)
2469 continue;
2470
Thomas Grafca860fb2006-08-07 18:00:18 -07002471 if (tidx < tbl_skip || (family && tbl->family != family))
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002472 continue;
2473
Eric W. Biederman15e47302012-09-07 20:12:54 +00002474 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
David Ahern9632d472018-10-07 20:16:37 -07002475 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002476 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002477 break;
2478
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01002479 nidx = 0;
2480 p = list_next_entry(&tbl->parms, list);
2481 list_for_each_entry_from(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002482 if (!net_eq(neigh_parms_net(p), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002483 continue;
2484
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002485 if (nidx < neigh_skip)
2486 goto next;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002487
Thomas Grafca860fb2006-08-07 18:00:18 -07002488 if (neightbl_fill_param_info(skb, tbl, p,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002489 NETLINK_CB(cb->skb).portid,
David Ahern9632d472018-10-07 20:16:37 -07002490 nlh->nlmsg_seq,
Thomas Grafca860fb2006-08-07 18:00:18 -07002491 RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002492 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002493 goto out;
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002494 next:
2495 nidx++;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002496 }
2497
Thomas Grafca860fb2006-08-07 18:00:18 -07002498 neigh_skip = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002499 }
2500out:
Thomas Grafca860fb2006-08-07 18:00:18 -07002501 cb->args[0] = tidx;
2502 cb->args[1] = nidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002503
2504 return skb->len;
2505}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
Thomas Graf8b8aec52006-08-07 17:56:37 -07002507static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2508 u32 pid, u32 seq, int type, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002510 u32 neigh_flags, neigh_flags_ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 unsigned long now = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 struct nda_cacheinfo ci;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002513 struct nlmsghdr *nlh;
2514 struct ndmsg *ndm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Thomas Graf8b8aec52006-08-07 17:56:37 -07002516 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2517 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002518 return -EMSGSIZE;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002519
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002520 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2521 neigh_flags = neigh->flags & NTF_OLD_MASK;
2522
Thomas Graf8b8aec52006-08-07 17:56:37 -07002523 ndm = nlmsg_data(nlh);
2524 ndm->ndm_family = neigh->ops->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002525 ndm->ndm_pad1 = 0;
2526 ndm->ndm_pad2 = 0;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002527 ndm->ndm_flags = neigh_flags;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002528 ndm->ndm_type = neigh->type;
2529 ndm->ndm_ifindex = neigh->dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
David S. Miller9a6308d2012-04-01 20:06:28 -04002531 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2532 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002533
2534 read_lock_bh(&neigh->lock);
2535 ndm->ndm_state = neigh->nud_state;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00002536 if (neigh->nud_state & NUD_VALID) {
2537 char haddr[MAX_ADDR_LEN];
2538
2539 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2540 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2541 read_unlock_bh(&neigh->lock);
2542 goto nla_put_failure;
2543 }
Thomas Graf8b8aec52006-08-07 17:56:37 -07002544 }
2545
Stephen Hemmingerb9f5f522008-06-03 16:03:15 -07002546 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2547 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2548 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
Reshetova, Elena9f237432017-06-30 13:07:55 +03002549 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002550 read_unlock_bh(&neigh->lock);
2551
David S. Miller9a6308d2012-04-01 20:06:28 -04002552 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2553 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2554 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002555
David Aherndf9b0e32018-12-15 14:09:06 -08002556 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2557 goto nla_put_failure;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002558 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2559 goto nla_put_failure;
David Aherndf9b0e32018-12-15 14:09:06 -08002560
Johannes Berg053c0952015-01-16 22:09:00 +01002561 nlmsg_end(skb, nlh);
2562 return 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002563
2564nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002565 nlmsg_cancel(skb, nlh);
2566 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567}
2568
Tony Zelenoff84920c12012-01-26 22:28:58 +00002569static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2570 u32 pid, u32 seq, int type, unsigned int flags,
2571 struct neigh_table *tbl)
2572{
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002573 u32 neigh_flags, neigh_flags_ext;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002574 struct nlmsghdr *nlh;
2575 struct ndmsg *ndm;
2576
2577 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2578 if (nlh == NULL)
2579 return -EMSGSIZE;
2580
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002581 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2582 neigh_flags = pn->flags & NTF_OLD_MASK;
2583
Tony Zelenoff84920c12012-01-26 22:28:58 +00002584 ndm = nlmsg_data(nlh);
2585 ndm->ndm_family = tbl->family;
2586 ndm->ndm_pad1 = 0;
2587 ndm->ndm_pad2 = 0;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002588 ndm->ndm_flags = neigh_flags | NTF_PROXY;
Jun Zhao545469f2014-07-26 00:38:59 +08002589 ndm->ndm_type = RTN_UNICAST;
Konstantin Khlebnikov6adc5fd2015-12-01 01:14:48 +03002590 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002591 ndm->ndm_state = NUD_NONE;
2592
David S. Miller9a6308d2012-04-01 20:06:28 -04002593 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2594 goto nla_put_failure;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002595
David Aherndf9b0e32018-12-15 14:09:06 -08002596 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2597 goto nla_put_failure;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002598 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2599 goto nla_put_failure;
David Aherndf9b0e32018-12-15 14:09:06 -08002600
Johannes Berg053c0952015-01-16 22:09:00 +01002601 nlmsg_end(skb, nlh);
2602 return 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002603
2604nla_put_failure:
2605 nlmsg_cancel(skb, nlh);
2606 return -EMSGSIZE;
2607}
2608
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07002609static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
Thomas Grafd961db32007-08-08 23:12:56 -07002610{
2611 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07002612 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
Thomas Grafd961db32007-08-08 23:12:56 -07002613}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
David Ahern21fdd092015-09-29 09:32:03 -07002615static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2616{
2617 struct net_device *master;
2618
2619 if (!master_idx)
2620 return false;
2621
Eric Dumazetaab456d2018-10-26 09:33:27 -07002622 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
Lahav Schlesingerd3432bf2021-08-10 09:06:58 +00002623
2624 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2625 * invalid value for ifindex to denote "no master".
2626 */
2627 if (master_idx == -1)
2628 return !!master;
2629
David Ahern21fdd092015-09-29 09:32:03 -07002630 if (!master || master->ifindex != master_idx)
2631 return true;
2632
2633 return false;
2634}
2635
David Ahern16660f02015-10-03 11:43:46 -07002636static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2637{
Eric Dumazetaab456d2018-10-26 09:33:27 -07002638 if (filter_idx && (!dev || dev->ifindex != filter_idx))
David Ahern16660f02015-10-03 11:43:46 -07002639 return true;
2640
2641 return false;
2642}
2643
David Ahern6f52f802018-10-03 15:33:12 -07002644struct neigh_dump_filter {
2645 int master_idx;
2646 int dev_idx;
2647};
2648
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
David Ahern6f52f802018-10-03 15:33:12 -07002650 struct netlink_callback *cb,
2651 struct neigh_dump_filter *filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652{
Eric Dumazet767e97e2010-10-06 17:49:21 -07002653 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 struct neighbour *n;
2655 int rc, h, s_h = cb->args[1];
2656 int idx, s_idx = idx = cb->args[2];
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002657 struct neigh_hash_table *nht;
David Ahern21fdd092015-09-29 09:32:03 -07002658 unsigned int flags = NLM_F_MULTI;
David Ahern21fdd092015-09-29 09:32:03 -07002659
David Ahern6f52f802018-10-03 15:33:12 -07002660 if (filter->dev_idx || filter->master_idx)
2661 flags |= NLM_F_DUMP_FILTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002663 rcu_read_lock_bh();
2664 nht = rcu_dereference_bh(tbl->nht);
2665
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002666 for (h = s_h; h < (1 << nht->hash_shift); h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 if (h > s_h)
2668 s_idx = 0;
Eric Dumazet767e97e2010-10-06 17:49:21 -07002669 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2670 n != NULL;
2671 n = rcu_dereference_bh(n->next)) {
Zhang Shengju18502ac2016-11-30 11:24:42 +08002672 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2673 goto next;
David Ahern6f52f802018-10-03 15:33:12 -07002674 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2675 neigh_master_filtered(n->dev, filter->master_idx))
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002676 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002677 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 cb->nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002679 RTM_NEWNEIGH,
David Ahern21fdd092015-09-29 09:32:03 -07002680 flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 rc = -1;
2682 goto out;
2683 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07002684next:
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002685 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 }
2688 rc = skb->len;
2689out:
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002690 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 cb->args[1] = h;
2692 cb->args[2] = idx;
2693 return rc;
2694}
2695
Tony Zelenoff84920c12012-01-26 22:28:58 +00002696static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
David Ahern6f52f802018-10-03 15:33:12 -07002697 struct netlink_callback *cb,
2698 struct neigh_dump_filter *filter)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002699{
2700 struct pneigh_entry *n;
2701 struct net *net = sock_net(skb->sk);
2702 int rc, h, s_h = cb->args[3];
2703 int idx, s_idx = idx = cb->args[4];
David Ahern6f52f802018-10-03 15:33:12 -07002704 unsigned int flags = NLM_F_MULTI;
2705
2706 if (filter->dev_idx || filter->master_idx)
2707 flags |= NLM_F_DUMP_FILTERED;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002708
2709 read_lock_bh(&tbl->lock);
2710
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002711 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002712 if (h > s_h)
2713 s_idx = 0;
2714 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
Zhang Shengju18502ac2016-11-30 11:24:42 +08002715 if (idx < s_idx || pneigh_net(n) != net)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002716 goto next;
David Ahern6f52f802018-10-03 15:33:12 -07002717 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2718 neigh_master_filtered(n->dev, filter->master_idx))
2719 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002720 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Tony Zelenoff84920c12012-01-26 22:28:58 +00002721 cb->nlh->nlmsg_seq,
David Ahern6f52f802018-10-03 15:33:12 -07002722 RTM_NEWNEIGH, flags, tbl) < 0) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002723 read_unlock_bh(&tbl->lock);
2724 rc = -1;
2725 goto out;
2726 }
2727 next:
2728 idx++;
2729 }
2730 }
2731
2732 read_unlock_bh(&tbl->lock);
2733 rc = skb->len;
2734out:
2735 cb->args[3] = h;
2736 cb->args[4] = idx;
2737 return rc;
2738
2739}
2740
David Ahern51183d22018-10-07 20:16:36 -07002741static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2742 bool strict_check,
2743 struct neigh_dump_filter *filter,
2744 struct netlink_ext_ack *extack)
2745{
2746 struct nlattr *tb[NDA_MAX + 1];
2747 int err, i;
2748
2749 if (strict_check) {
2750 struct ndmsg *ndm;
2751
2752 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2753 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2754 return -EINVAL;
2755 }
2756
2757 ndm = nlmsg_data(nlh);
2758 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
David Ahernc0fde872018-12-19 16:54:38 -08002759 ndm->ndm_state || ndm->ndm_type) {
David Ahern51183d22018-10-07 20:16:36 -07002760 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2761 return -EINVAL;
2762 }
2763
David Ahernc0fde872018-12-19 16:54:38 -08002764 if (ndm->ndm_flags & ~NTF_PROXY) {
2765 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2766 return -EINVAL;
2767 }
2768
Johannes Berg8cb08172019-04-26 14:07:28 +02002769 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2770 tb, NDA_MAX, nda_policy,
2771 extack);
David Ahern51183d22018-10-07 20:16:36 -07002772 } else {
Johannes Berg8cb08172019-04-26 14:07:28 +02002773 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2774 NDA_MAX, nda_policy, extack);
David Ahern51183d22018-10-07 20:16:36 -07002775 }
2776 if (err < 0)
2777 return err;
2778
2779 for (i = 0; i <= NDA_MAX; ++i) {
2780 if (!tb[i])
2781 continue;
2782
2783 /* all new attributes should require strict_check */
2784 switch (i) {
2785 case NDA_IFINDEX:
David Ahern51183d22018-10-07 20:16:36 -07002786 filter->dev_idx = nla_get_u32(tb[i]);
2787 break;
2788 case NDA_MASTER:
David Ahern51183d22018-10-07 20:16:36 -07002789 filter->master_idx = nla_get_u32(tb[i]);
2790 break;
2791 default:
2792 if (strict_check) {
2793 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2794 return -EINVAL;
2795 }
2796 }
2797 }
2798
2799 return 0;
2800}
2801
Thomas Grafc8822a42007-03-22 11:50:06 -07002802static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803{
David Ahern6f52f802018-10-03 15:33:12 -07002804 const struct nlmsghdr *nlh = cb->nlh;
2805 struct neigh_dump_filter filter = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 struct neigh_table *tbl;
2807 int t, family, s_t;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002808 int proxy = 0;
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002809 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
David Ahern6f52f802018-10-03 15:33:12 -07002811 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002812
2813 /* check for full ndmsg structure presence, family member is
2814 * the same for both structures
2815 */
David Ahern6f52f802018-10-03 15:33:12 -07002816 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2817 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002818 proxy = 1;
2819
David Ahern51183d22018-10-07 20:16:36 -07002820 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2821 if (err < 0 && cb->strict_check)
2822 return err;
2823
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 s_t = cb->args[0];
2825
WANG Congd7480fd2014-11-10 15:59:36 -08002826 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2827 tbl = neigh_tables[t];
2828
2829 if (!tbl)
2830 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 if (t < s_t || (family && tbl->family != family))
2832 continue;
2833 if (t > s_t)
2834 memset(&cb->args[1], 0, sizeof(cb->args) -
2835 sizeof(cb->args[0]));
Tony Zelenoff84920c12012-01-26 22:28:58 +00002836 if (proxy)
David Ahern6f52f802018-10-03 15:33:12 -07002837 err = pneigh_dump_table(tbl, skb, cb, &filter);
Tony Zelenoff84920c12012-01-26 22:28:58 +00002838 else
David Ahern6f52f802018-10-03 15:33:12 -07002839 err = neigh_dump_table(tbl, skb, cb, &filter);
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002840 if (err < 0)
2841 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843
2844 cb->args[0] = t;
2845 return skb->len;
2846}
2847
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002848static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2849 struct neigh_table **tbl,
2850 void **dst, int *dev_idx, u8 *ndm_flags,
2851 struct netlink_ext_ack *extack)
2852{
2853 struct nlattr *tb[NDA_MAX + 1];
2854 struct ndmsg *ndm;
2855 int err, i;
2856
2857 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2858 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2859 return -EINVAL;
2860 }
2861
2862 ndm = nlmsg_data(nlh);
2863 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2864 ndm->ndm_type) {
2865 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2866 return -EINVAL;
2867 }
2868
2869 if (ndm->ndm_flags & ~NTF_PROXY) {
2870 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2871 return -EINVAL;
2872 }
2873
Johannes Berg8cb08172019-04-26 14:07:28 +02002874 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2875 NDA_MAX, nda_policy, extack);
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002876 if (err < 0)
2877 return err;
2878
2879 *ndm_flags = ndm->ndm_flags;
2880 *dev_idx = ndm->ndm_ifindex;
2881 *tbl = neigh_find_table(ndm->ndm_family);
2882 if (*tbl == NULL) {
2883 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2884 return -EAFNOSUPPORT;
2885 }
2886
2887 for (i = 0; i <= NDA_MAX; ++i) {
2888 if (!tb[i])
2889 continue;
2890
2891 switch (i) {
2892 case NDA_DST:
2893 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2894 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2895 return -EINVAL;
2896 }
2897 *dst = nla_data(tb[i]);
2898 break;
2899 default:
2900 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2901 return -EINVAL;
2902 }
2903 }
2904
2905 return 0;
2906}
2907
2908static inline size_t neigh_nlmsg_size(void)
2909{
2910 return NLMSG_ALIGN(sizeof(struct ndmsg))
2911 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2912 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2913 + nla_total_size(sizeof(struct nda_cacheinfo))
2914 + nla_total_size(4) /* NDA_PROBES */
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002915 + nla_total_size(4) /* NDA_FLAGS_EXT */
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002916 + nla_total_size(1); /* NDA_PROTOCOL */
2917}
2918
2919static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2920 u32 pid, u32 seq)
2921{
2922 struct sk_buff *skb;
2923 int err = 0;
2924
2925 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2926 if (!skb)
2927 return -ENOBUFS;
2928
2929 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2930 if (err) {
2931 kfree_skb(skb);
2932 goto errout;
2933 }
2934
2935 err = rtnl_unicast(skb, net, pid);
2936errout:
2937 return err;
2938}
2939
2940static inline size_t pneigh_nlmsg_size(void)
2941{
2942 return NLMSG_ALIGN(sizeof(struct ndmsg))
Colin Ian King463561e2018-12-20 16:50:50 +00002943 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002944 + nla_total_size(4) /* NDA_FLAGS_EXT */
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002945 + nla_total_size(1); /* NDA_PROTOCOL */
2946}
2947
2948static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2949 u32 pid, u32 seq, struct neigh_table *tbl)
2950{
2951 struct sk_buff *skb;
2952 int err = 0;
2953
2954 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2955 if (!skb)
2956 return -ENOBUFS;
2957
2958 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2959 if (err) {
2960 kfree_skb(skb);
2961 goto errout;
2962 }
2963
2964 err = rtnl_unicast(skb, net, pid);
2965errout:
2966 return err;
2967}
2968
2969static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2970 struct netlink_ext_ack *extack)
2971{
2972 struct net *net = sock_net(in_skb->sk);
2973 struct net_device *dev = NULL;
2974 struct neigh_table *tbl = NULL;
2975 struct neighbour *neigh;
2976 void *dst = NULL;
2977 u8 ndm_flags = 0;
2978 int dev_idx = 0;
2979 int err;
2980
2981 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2982 extack);
2983 if (err < 0)
2984 return err;
2985
2986 if (dev_idx) {
2987 dev = __dev_get_by_index(net, dev_idx);
2988 if (!dev) {
2989 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2990 return -ENODEV;
2991 }
2992 }
2993
2994 if (!dst) {
2995 NL_SET_ERR_MSG(extack, "Network address not specified");
2996 return -EINVAL;
2997 }
2998
2999 if (ndm_flags & NTF_PROXY) {
3000 struct pneigh_entry *pn;
3001
3002 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3003 if (!pn) {
3004 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3005 return -ENOENT;
3006 }
3007 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3008 nlh->nlmsg_seq, tbl);
3009 }
3010
3011 if (!dev) {
3012 NL_SET_ERR_MSG(extack, "No device specified");
3013 return -EINVAL;
3014 }
3015
3016 neigh = neigh_lookup(tbl, dst, dev);
3017 if (!neigh) {
3018 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3019 return -ENOENT;
3020 }
3021
3022 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3023 nlh->nlmsg_seq);
3024
3025 neigh_release(neigh);
3026
3027 return err;
3028}
3029
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3031{
3032 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003033 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003035 rcu_read_lock_bh();
3036 nht = rcu_dereference_bh(tbl->nht);
3037
Eric Dumazet767e97e2010-10-06 17:49:21 -07003038 read_lock(&tbl->lock); /* avoid resizes */
David S. Millercd089332011-07-11 01:28:12 -07003039 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 struct neighbour *n;
3041
Eric Dumazet767e97e2010-10-06 17:49:21 -07003042 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3043 n != NULL;
3044 n = rcu_dereference_bh(n->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 cb(n, cookie);
3046 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003047 read_unlock(&tbl->lock);
3048 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049}
3050EXPORT_SYMBOL(neigh_for_each);
3051
3052/* The tbl->lock must be held as a writer and BH disabled. */
3053void __neigh_for_each_release(struct neigh_table *tbl,
3054 int (*cb)(struct neighbour *))
3055{
3056 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003057 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003059 nht = rcu_dereference_protected(tbl->nht,
3060 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -07003061 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003062 struct neighbour *n;
3063 struct neighbour __rcu **np;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003065 np = &nht->hash_buckets[chain];
Eric Dumazet767e97e2010-10-06 17:49:21 -07003066 while ((n = rcu_dereference_protected(*np,
3067 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 int release;
3069
3070 write_lock(&n->lock);
3071 release = cb(n);
3072 if (release) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003073 rcu_assign_pointer(*np,
3074 rcu_dereference_protected(n->next,
3075 lockdep_is_held(&tbl->lock)));
David Ahern58956312018-12-07 12:24:57 -08003076 neigh_mark_dead(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 } else
3078 np = &n->next;
3079 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -07003080 if (release)
3081 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 }
3083 }
3084}
3085EXPORT_SYMBOL(__neigh_for_each_release);
3086
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003087int neigh_xmit(int index, struct net_device *dev,
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003088 const void *addr, struct sk_buff *skb)
3089{
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003090 int err = -EAFNOSUPPORT;
3091 if (likely(index < NEIGH_NR_TABLES)) {
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003092 struct neigh_table *tbl;
3093 struct neighbour *neigh;
3094
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003095 tbl = neigh_tables[index];
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003096 if (!tbl)
3097 goto out;
David Barrosob560f032016-06-28 11:16:43 +03003098 rcu_read_lock_bh();
David Ahern4b2a2bf2019-05-01 18:18:42 -07003099 if (index == NEIGH_ARP_TABLE) {
3100 u32 key = *((u32 *)addr);
3101
3102 neigh = __ipv4_neigh_lookup_noref(dev, key);
3103 } else {
3104 neigh = __neigh_lookup_noref(tbl, addr, dev);
3105 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003106 if (!neigh)
3107 neigh = __neigh_create(tbl, addr, dev, false);
3108 err = PTR_ERR(neigh);
David Barrosob560f032016-06-28 11:16:43 +03003109 if (IS_ERR(neigh)) {
3110 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003111 goto out_kfree_skb;
David Barrosob560f032016-06-28 11:16:43 +03003112 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003113 err = neigh->output(neigh, skb);
David Barrosob560f032016-06-28 11:16:43 +03003114 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003115 }
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003116 else if (index == NEIGH_LINK_TABLE) {
3117 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3118 addr, NULL, skb->len);
3119 if (err < 0)
3120 goto out_kfree_skb;
3121 err = dev_queue_xmit(skb);
3122 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003123out:
3124 return err;
3125out_kfree_skb:
3126 kfree_skb(skb);
3127 goto out;
3128}
3129EXPORT_SYMBOL(neigh_xmit);
3130
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131#ifdef CONFIG_PROC_FS
3132
3133static struct neighbour *neigh_get_first(struct seq_file *seq)
3134{
3135 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003136 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003137 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 struct neighbour *n = NULL;
Colin Ian Kingf530eed2019-07-26 10:46:11 +01003139 int bucket;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140
3141 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
David S. Millercd089332011-07-11 01:28:12 -07003142 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003143 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144
3145 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003146 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003147 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 if (state->neigh_sub_iter) {
3149 loff_t fakep = 0;
3150 void *v;
3151
3152 v = state->neigh_sub_iter(state, n, &fakep);
3153 if (!v)
3154 goto next;
3155 }
3156 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3157 break;
3158 if (n->nud_state & ~NUD_NOARP)
3159 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07003160next:
3161 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 }
3163
3164 if (n)
3165 break;
3166 }
3167 state->bucket = bucket;
3168
3169 return n;
3170}
3171
3172static struct neighbour *neigh_get_next(struct seq_file *seq,
3173 struct neighbour *n,
3174 loff_t *pos)
3175{
3176 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003177 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003178 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179
3180 if (state->neigh_sub_iter) {
3181 void *v = state->neigh_sub_iter(state, n, pos);
3182 if (v)
3183 return n;
3184 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07003185 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186
3187 while (1) {
3188 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003189 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003190 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 if (state->neigh_sub_iter) {
3192 void *v = state->neigh_sub_iter(state, n, pos);
3193 if (v)
3194 return n;
3195 goto next;
3196 }
3197 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3198 break;
3199
3200 if (n->nud_state & ~NUD_NOARP)
3201 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07003202next:
3203 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
3205
3206 if (n)
3207 break;
3208
David S. Millercd089332011-07-11 01:28:12 -07003209 if (++state->bucket >= (1 << nht->hash_shift))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 break;
3211
Eric Dumazet767e97e2010-10-06 17:49:21 -07003212 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 }
3214
3215 if (n && pos)
3216 --(*pos);
3217 return n;
3218}
3219
3220static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3221{
3222 struct neighbour *n = neigh_get_first(seq);
3223
3224 if (n) {
Chris Larson745e2032008-08-03 01:10:55 -07003225 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 while (*pos) {
3227 n = neigh_get_next(seq, n, pos);
3228 if (!n)
3229 break;
3230 }
3231 }
3232 return *pos ? NULL : n;
3233}
3234
3235static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3236{
3237 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003238 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 struct neigh_table *tbl = state->tbl;
3240 struct pneigh_entry *pn = NULL;
Yang Li48de7c02021-05-08 18:03:05 +08003241 int bucket;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
3243 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3244 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3245 pn = tbl->phash_buckets[bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003246 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003247 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 if (pn)
3249 break;
3250 }
3251 state->bucket = bucket;
3252
3253 return pn;
3254}
3255
3256static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3257 struct pneigh_entry *pn,
3258 loff_t *pos)
3259{
3260 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003261 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 struct neigh_table *tbl = state->tbl;
3263
Jorge Boncompte [DTI2]df07a942011-11-25 13:24:49 -05003264 do {
3265 pn = pn->next;
3266 } while (pn && !net_eq(pneigh_net(pn), net));
3267
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 while (!pn) {
3269 if (++state->bucket > PNEIGH_HASHMASK)
3270 break;
3271 pn = tbl->phash_buckets[state->bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003272 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003273 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 if (pn)
3275 break;
3276 }
3277
3278 if (pn && pos)
3279 --(*pos);
3280
3281 return pn;
3282}
3283
3284static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3285{
3286 struct pneigh_entry *pn = pneigh_get_first(seq);
3287
3288 if (pn) {
Chris Larson745e2032008-08-03 01:10:55 -07003289 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 while (*pos) {
3291 pn = pneigh_get_next(seq, pn, pos);
3292 if (!pn)
3293 break;
3294 }
3295 }
3296 return *pos ? NULL : pn;
3297}
3298
3299static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3300{
3301 struct neigh_seq_state *state = seq->private;
3302 void *rc;
Chris Larson745e2032008-08-03 01:10:55 -07003303 loff_t idxpos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304
Chris Larson745e2032008-08-03 01:10:55 -07003305 rc = neigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
Chris Larson745e2032008-08-03 01:10:55 -07003307 rc = pneigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
3309 return rc;
3310}
3311
3312void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003313 __acquires(tbl->lock)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003314 __acquires(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315{
3316 struct neigh_seq_state *state = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317
3318 state->tbl = tbl;
3319 state->bucket = 0;
3320 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3321
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003322 rcu_read_lock_bh();
3323 state->nht = rcu_dereference_bh(tbl->nht);
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003324 read_lock(&tbl->lock);
Eric Dumazet767e97e2010-10-06 17:49:21 -07003325
Chris Larson745e2032008-08-03 01:10:55 -07003326 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327}
3328EXPORT_SYMBOL(neigh_seq_start);
3329
3330void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3331{
3332 struct neigh_seq_state *state;
3333 void *rc;
3334
3335 if (v == SEQ_START_TOKEN) {
Chris Larsonbff69732008-08-03 01:02:41 -07003336 rc = neigh_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 goto out;
3338 }
3339
3340 state = seq->private;
3341 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3342 rc = neigh_get_next(seq, v, NULL);
3343 if (rc)
3344 goto out;
3345 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3346 rc = pneigh_get_first(seq);
3347 } else {
3348 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3349 rc = pneigh_get_next(seq, v, NULL);
3350 }
3351out:
3352 ++(*pos);
3353 return rc;
3354}
3355EXPORT_SYMBOL(neigh_seq_next);
3356
3357void neigh_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003358 __releases(tbl->lock)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003359 __releases(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360{
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003361 struct neigh_seq_state *state = seq->private;
3362 struct neigh_table *tbl = state->tbl;
3363
3364 read_unlock(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003365 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366}
3367EXPORT_SYMBOL(neigh_seq_stop);
3368
3369/* statistics via seq_file */
3370
3371static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3372{
Muchun Song359745d2022-01-21 22:14:23 -08003373 struct neigh_table *tbl = pde_data(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 int cpu;
3375
3376 if (*pos == 0)
3377 return SEQ_START_TOKEN;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003378
Rusty Russell0f23174a2008-12-29 12:23:42 +00003379 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 if (!cpu_possible(cpu))
3381 continue;
3382 *pos = cpu+1;
3383 return per_cpu_ptr(tbl->stats, cpu);
3384 }
3385 return NULL;
3386}
3387
3388static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3389{
Muchun Song359745d2022-01-21 22:14:23 -08003390 struct neigh_table *tbl = pde_data(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 int cpu;
3392
Rusty Russell0f23174a2008-12-29 12:23:42 +00003393 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 if (!cpu_possible(cpu))
3395 continue;
3396 *pos = cpu+1;
3397 return per_cpu_ptr(tbl->stats, cpu);
3398 }
Vasily Averin1e3f9f02020-01-23 10:11:28 +03003399 (*pos)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 return NULL;
3401}
3402
3403static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3404{
3405
3406}
3407
3408static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3409{
Muchun Song359745d2022-01-21 22:14:23 -08003410 struct neigh_table *tbl = pde_data(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 struct neigh_statistics *st = v;
3412
3413 if (v == SEQ_START_TOKEN) {
Yajun Deng0547ffe2021-08-02 16:05:08 +08003414 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 return 0;
3416 }
3417
Yajun Deng0547ffe2021-08-02 16:05:08 +08003418 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3419 "%08lx %08lx %08lx "
3420 "%08lx %08lx %08lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 atomic_read(&tbl->entries),
3422
3423 st->allocs,
3424 st->destroys,
3425 st->hash_grows,
3426
3427 st->lookups,
3428 st->hits,
3429
3430 st->res_failed,
3431
3432 st->rcv_probes_mcast,
3433 st->rcv_probes_ucast,
3434
3435 st->periodic_gc_runs,
Neil Horman9a6d2762008-07-16 20:50:49 -07003436 st->forced_gc_runs,
Rick Jonesfb811392015-08-07 11:10:37 -07003437 st->unres_discards,
3438 st->table_fulls
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 );
3440
3441 return 0;
3442}
3443
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003444static const struct seq_operations neigh_stat_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 .start = neigh_stat_seq_start,
3446 .next = neigh_stat_seq_next,
3447 .stop = neigh_stat_seq_stop,
3448 .show = neigh_stat_seq_show,
3449};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450#endif /* CONFIG_PROC_FS */
3451
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003452static void __neigh_notify(struct neighbour *n, int type, int flags,
3453 u32 pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003455 struct net *net = dev_net(n->dev);
Thomas Graf8b8aec52006-08-07 17:56:37 -07003456 struct sk_buff *skb;
Thomas Grafb8673312006-08-15 00:33:14 -07003457 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
Thomas Graf339bf982006-11-10 14:10:15 -08003459 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
Thomas Graf8b8aec52006-08-07 17:56:37 -07003460 if (skb == NULL)
Thomas Grafb8673312006-08-15 00:33:14 -07003461 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003463 err = neigh_fill_info(skb, n, pid, 0, type, flags);
Patrick McHardy26932562007-01-31 23:16:40 -08003464 if (err < 0) {
3465 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3466 WARN_ON(err == -EMSGSIZE);
3467 kfree_skb(skb);
3468 goto errout;
3469 }
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08003470 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3471 return;
Thomas Grafb8673312006-08-15 00:33:14 -07003472errout:
3473 if (err < 0)
Eric W. Biederman426b5302008-01-24 00:13:18 -08003474 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
Thomas Grafb8673312006-08-15 00:33:14 -07003475}
3476
3477void neigh_app_ns(struct neighbour *n)
3478{
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003479 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003481EXPORT_SYMBOL(neigh_app_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482
3483#ifdef CONFIG_SYSCTL
Cong Wangb93196d2012-12-06 10:04:04 +08003484static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485
Joe Perchesfe2c6332013-06-11 23:04:25 -07003486static int proc_unres_qlen(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003487 void *buffer, size_t *lenp, loff_t *ppos)
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003488{
3489 int size, ret;
Joe Perchesfe2c6332013-06-11 23:04:25 -07003490 struct ctl_table tmp = *ctl;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003491
Matteo Croceeec48442019-07-18 15:58:50 -07003492 tmp.extra1 = SYSCTL_ZERO;
Shan Weice46cc62012-12-04 18:49:15 +00003493 tmp.extra2 = &unres_qlen_max;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003494 tmp.data = &size;
Shan Weice46cc62012-12-04 18:49:15 +00003495
3496 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3497 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3498
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003499 if (write && !ret)
3500 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3501 return ret;
3502}
3503
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003504static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3505 int family)
3506{
Jiri Pirkobba24892013-12-07 19:26:57 +01003507 switch (family) {
3508 case AF_INET:
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003509 return __in_dev_arp_parms_get_rcu(dev);
Jiri Pirkobba24892013-12-07 19:26:57 +01003510 case AF_INET6:
3511 return __in6_dev_nd_parms_get_rcu(dev);
3512 }
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003513 return NULL;
3514}
3515
3516static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3517 int index)
3518{
3519 struct net_device *dev;
3520 int family = neigh_parms_family(p);
3521
3522 rcu_read_lock();
3523 for_each_netdev_rcu(net, dev) {
3524 struct neigh_parms *dst_p =
3525 neigh_get_dev_parms_rcu(dev, family);
3526
3527 if (dst_p && !test_bit(index, dst_p->data_state))
3528 dst_p->data[index] = p->data[index];
3529 }
3530 rcu_read_unlock();
3531}
3532
3533static void neigh_proc_update(struct ctl_table *ctl, int write)
3534{
3535 struct net_device *dev = ctl->extra1;
3536 struct neigh_parms *p = ctl->extra2;
Jiri Pirko77d47af2013-12-10 23:55:07 +01003537 struct net *net = neigh_parms_net(p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003538 int index = (int *) ctl->data - p->data;
3539
3540 if (!write)
3541 return;
3542
3543 set_bit(index, p->data_state);
Marcus Huewe7627ae62017-02-15 01:00:36 +01003544 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3545 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003546 if (!dev) /* NULL dev means this is default value */
3547 neigh_copy_dflt_parms(net, p, index);
3548}
3549
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003550static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003551 void *buffer, size_t *lenp,
3552 loff_t *ppos)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003553{
3554 struct ctl_table tmp = *ctl;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003555 int ret;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003556
Matteo Croceeec48442019-07-18 15:58:50 -07003557 tmp.extra1 = SYSCTL_ZERO;
3558 tmp.extra2 = SYSCTL_INT_MAX;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003559
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003560 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3561 neigh_proc_update(ctl, write);
3562 return ret;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003563}
3564
Christoph Hellwig32927392020-04-24 08:43:38 +02003565int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3566 size_t *lenp, loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003567{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003568 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3569
3570 neigh_proc_update(ctl, write);
3571 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003572}
3573EXPORT_SYMBOL(neigh_proc_dointvec);
3574
Christoph Hellwig32927392020-04-24 08:43:38 +02003575int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003576 size_t *lenp, loff_t *ppos)
3577{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003578 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3579
3580 neigh_proc_update(ctl, write);
3581 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003582}
3583EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3584
3585static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003586 void *buffer, size_t *lenp,
3587 loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003588{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003589 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3590
3591 neigh_proc_update(ctl, write);
3592 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003593}
3594
3595int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003596 void *buffer, size_t *lenp, loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003597{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003598 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3599
3600 neigh_proc_update(ctl, write);
3601 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003602}
3603EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3604
3605static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003606 void *buffer, size_t *lenp,
3607 loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003608{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003609 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3610
3611 neigh_proc_update(ctl, write);
3612 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003613}
3614
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003615static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003616 void *buffer, size_t *lenp,
3617 loff_t *ppos)
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003618{
3619 struct neigh_parms *p = ctl->extra2;
3620 int ret;
3621
3622 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3623 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3624 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3625 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3626 else
3627 ret = -1;
3628
3629 if (write && ret == 0) {
3630 /* update reachable_time as well, otherwise, the change will
3631 * only be effective after the next time neigh_periodic_work
3632 * decides to recompute it
3633 */
3634 p->reachable_time =
3635 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3636 }
3637 return ret;
3638}
3639
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003640#define NEIGH_PARMS_DATA_OFFSET(index) \
3641 (&((struct neigh_parms *) 0)->data[index])
3642
3643#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3644 [NEIGH_VAR_ ## attr] = { \
3645 .procname = name, \
3646 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3647 .maxlen = sizeof(int), \
3648 .mode = mval, \
3649 .proc_handler = proc, \
3650 }
3651
3652#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3653 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3654
3655#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003656 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003657
3658#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003659 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003660
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003661#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003662 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003663
3664#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003665 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
Eric W. Biederman54716e32010-02-14 03:27:03 +00003666
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667static struct neigh_sysctl_table {
3668 struct ctl_table_header *sysctl_header;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003669 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
Brian Haleyab32ea52006-09-22 14:15:41 -07003670} neigh_sysctl_template __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 .neigh_vars = {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003672 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3673 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3674 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09003675 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003676 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3677 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3678 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3679 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3680 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3681 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3682 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3683 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3684 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3685 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3686 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3687 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003688 [NEIGH_VAR_GC_INTERVAL] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 .procname = "gc_interval",
3690 .maxlen = sizeof(int),
3691 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003692 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003694 [NEIGH_VAR_GC_THRESH1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 .procname = "gc_thresh1",
3696 .maxlen = sizeof(int),
3697 .mode = 0644,
Matteo Croceeec48442019-07-18 15:58:50 -07003698 .extra1 = SYSCTL_ZERO,
3699 .extra2 = SYSCTL_INT_MAX,
Francesco Fusco555445c2013-07-24 10:39:06 +02003700 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003702 [NEIGH_VAR_GC_THRESH2] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 .procname = "gc_thresh2",
3704 .maxlen = sizeof(int),
3705 .mode = 0644,
Matteo Croceeec48442019-07-18 15:58:50 -07003706 .extra1 = SYSCTL_ZERO,
3707 .extra2 = SYSCTL_INT_MAX,
Francesco Fusco555445c2013-07-24 10:39:06 +02003708 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003710 [NEIGH_VAR_GC_THRESH3] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 .procname = "gc_thresh3",
3712 .maxlen = sizeof(int),
3713 .mode = 0644,
Matteo Croceeec48442019-07-18 15:58:50 -07003714 .extra1 = SYSCTL_ZERO,
3715 .extra2 = SYSCTL_INT_MAX,
Francesco Fusco555445c2013-07-24 10:39:06 +02003716 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 },
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11003718 {},
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 },
3720};
3721
3722int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
Jiri Pirko73af6142013-12-07 19:26:55 +01003723 proc_handler *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724{
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003725 int i;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003726 struct neigh_sysctl_table *t;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003727 const char *dev_name_source;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003728 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
Jiri Pirko73af6142013-12-07 19:26:55 +01003729 char *p_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003731 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 if (!t)
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003733 goto err;
3734
Jiri Pirkob194c1f2014-02-21 14:52:57 +01003735 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003736 t->neigh_vars[i].data += (long) p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003737 t->neigh_vars[i].extra1 = dev;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003738 t->neigh_vars[i].extra2 = p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003739 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
3741 if (dev) {
3742 dev_name_source = dev->name;
Eric W. Biedermand12af672007-10-18 03:05:25 -07003743 /* Terminate the table early */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003744 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3745 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 } else {
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003747 struct neigh_table *tbl = p->tbl;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003748 dev_name_source = "default";
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003749 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3750 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3751 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3752 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 }
3754
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003755 if (handler) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 /* RetransTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003757 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 /* ReachableTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003759 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 /* RetransTime (in milliseconds)*/
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003761 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 /* ReachableTime (in milliseconds) */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003763 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003764 } else {
3765 /* Those handlers will update p->reachable_time after
3766 * base_reachable_time(_ms) is set to ensure the new timer starts being
3767 * applied after the next neighbour update instead of waiting for
3768 * neigh_periodic_work to update its value (can be multiple minutes)
3769 * So any handler that replaces them should do this as well
3770 */
3771 /* ReachableTime */
3772 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3773 neigh_proc_base_reachable_time;
3774 /* ReachableTime (in milliseconds) */
3775 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3776 neigh_proc_base_reachable_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 }
3778
Jiri Pirko73af6142013-12-07 19:26:55 +01003779 switch (neigh_parms_family(p)) {
3780 case AF_INET:
3781 p_name = "ipv4";
3782 break;
3783 case AF_INET6:
3784 p_name = "ipv6";
3785 break;
3786 default:
3787 BUG();
3788 }
3789
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003790 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3791 p_name, dev_name_source);
Denis V. Lunev4ab438f2008-02-28 20:48:01 -08003792 t->sysctl_header =
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003793 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003794 if (!t->sysctl_header)
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003795 goto free;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003796
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 p->sysctl_table = t;
3798 return 0;
3799
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003800free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 kfree(t);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003802err:
3803 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003805EXPORT_SYMBOL(neigh_sysctl_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806
3807void neigh_sysctl_unregister(struct neigh_parms *p)
3808{
3809 if (p->sysctl_table) {
3810 struct neigh_sysctl_table *t = p->sysctl_table;
3811 p->sysctl_table = NULL;
Eric W. Biederman5dd3df12012-04-19 13:24:33 +00003812 unregister_net_sysctl_table(t->sysctl_header);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 kfree(t);
3814 }
3815}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003816EXPORT_SYMBOL(neigh_sysctl_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817
3818#endif /* CONFIG_SYSCTL */
3819
Thomas Grafc8822a42007-03-22 11:50:06 -07003820static int __init neigh_init(void)
3821{
Florian Westphalb97bac62017-08-09 20:41:48 +02003822 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3823 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08003824 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
Thomas Grafc8822a42007-03-22 11:50:06 -07003825
Greg Rosec7ac8672011-06-10 01:27:09 +00003826 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
Florian Westphalb97bac62017-08-09 20:41:48 +02003827 0);
3828 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
Thomas Grafc8822a42007-03-22 11:50:06 -07003829
3830 return 0;
3831}
3832
3833subsys_initcall(neigh_init);