blob: 6c2016f7f3d19e29ade746253e75d14aa3da1269 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Generic address resolution entity
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fixes:
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
12 */
13
Joe Perchese005d192012-05-16 19:58:40 +000014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +030017#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020028#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/neighbour.h>
David Ahern4b2a2bf2019-05-01 18:18:42 -070030#include <net/arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/dst.h>
32#include <net/sock.h>
Tom Tucker8d717402006-07-30 20:43:36 -070033#include <net/netevent.h>
Thomas Grafa14a49d2006-08-07 17:53:08 -070034#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/rtnetlink.h>
36#include <linux/random.h>
Paulo Marques543537b2005-06-23 00:09:02 -070037#include <linux/string.h>
vignesh babuc3609d52007-08-24 22:27:55 -070038#include <linux/log2.h>
Jiri Pirko1d4c8c22013-12-07 19:26:56 +010039#include <linux/inetdevice.h>
Jiri Pirkobba24892013-12-07 19:26:57 +010040#include <net/addrconf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Roopa Prabhu56dd18a2019-02-14 09:15:11 -080042#include <trace/events/neigh.h>
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define NEIGH_DEBUG 1
Joe Perchesd5d427c2013-04-15 15:17:19 +000045#define neigh_dbg(level, fmt, ...) \
46do { \
47 if (level <= NEIGH_DEBUG) \
48 pr_debug(fmt, ##__VA_ARGS__); \
49} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#define PNEIGH_HASHMASK 0xF
52
Kees Cooke99e88a2017-10-16 14:43:17 -070053static void neigh_timer_handler(struct timer_list *t);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -070054static void __neigh_notify(struct neighbour *n, int type, int flags,
55 u32 pid);
56static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +020057static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Amos Waterland45fc3b12005-09-24 16:53:16 -070060#ifdef CONFIG_PROC_FS
Christoph Hellwig71a50532018-04-15 10:16:41 +020061static const struct seq_operations neigh_stat_seq_ops;
Amos Waterland45fc3b12005-09-24 16:53:16 -070062#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64/*
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
66
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
71 cache.
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
74
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
78
79 Reference count prevents destruction.
80
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
83 - timer
84 - resolution queue
85
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 */
91
David S. Miller8f40b162011-07-17 13:34:11 -070092static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 kfree_skb(skb);
95 return -ENETDOWN;
96}
97
Thomas Graf4f494552007-08-08 23:12:36 -070098static void neigh_cleanup_and_release(struct neighbour *neigh)
99{
Roopa Prabhu56dd18a2019-02-14 09:15:11 -0800100 trace_neigh_cleanup_and_release(neigh, 0);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -0700101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
Ido Schimmel53f800e2016-12-23 09:32:48 +0100102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
Thomas Graf4f494552007-08-08 23:12:36 -0700103 neigh_release(neigh);
104}
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
110 */
111
112unsigned long neigh_rand_reach_time(unsigned long base)
113{
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900116EXPORT_SYMBOL(neigh_rand_reach_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
David Ahern58956312018-12-07 12:24:57 -0800118static void neigh_mark_dead(struct neighbour *n)
119{
120 n->dead = 1;
121 if (!list_empty(&n->gc_list)) {
122 list_del_init(&n->gc_list);
123 atomic_dec(&n->tbl->gc_entries);
124 }
Daniel Borkmann7482e382021-10-11 14:12:38 +0200125 if (!list_empty(&n->managed_list))
126 list_del_init(&n->managed_list);
David Ahern58956312018-12-07 12:24:57 -0800127}
128
David Ahern9c29a2f2018-12-11 18:57:21 -0700129static void neigh_update_gc_list(struct neighbour *n)
David Ahern58956312018-12-07 12:24:57 -0800130{
David Aherne997f8a2018-12-11 18:57:25 -0700131 bool on_gc_list, exempt_from_gc;
David Ahern58956312018-12-07 12:24:57 -0800132
David Ahern9c29a2f2018-12-11 18:57:21 -0700133 write_lock_bh(&n->tbl->lock);
134 write_lock(&n->lock);
Chinmay Agarwaleefb45e2021-04-22 01:12:22 +0530135 if (n->dead)
136 goto out;
137
David Aherne997f8a2018-12-11 18:57:25 -0700138 /* remove from the gc list if new state is permanent or if neighbor
139 * is externally learned; otherwise entry should be on the gc list
David Ahern58956312018-12-07 12:24:57 -0800140 */
David Aherne997f8a2018-12-11 18:57:25 -0700141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 n->flags & NTF_EXT_LEARNED;
David Ahern9c29a2f2018-12-11 18:57:21 -0700143 on_gc_list = !list_empty(&n->gc_list);
David Ahern8cc196d2018-12-10 13:54:07 -0800144
David Aherne997f8a2018-12-11 18:57:25 -0700145 if (exempt_from_gc && on_gc_list) {
David Ahern9c29a2f2018-12-11 18:57:21 -0700146 list_del_init(&n->gc_list);
David Ahern58956312018-12-07 12:24:57 -0800147 atomic_dec(&n->tbl->gc_entries);
David Aherne997f8a2018-12-11 18:57:25 -0700148 } else if (!exempt_from_gc && !on_gc_list) {
David Ahern58956312018-12-07 12:24:57 -0800149 /* add entries to the tail; cleaning removes from the front */
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
152 }
Chinmay Agarwaleefb45e2021-04-22 01:12:22 +0530153out:
David Ahern9c29a2f2018-12-11 18:57:21 -0700154 write_unlock(&n->lock);
155 write_unlock_bh(&n->tbl->lock);
David Ahern58956312018-12-07 12:24:57 -0800156}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Daniel Borkmann7482e382021-10-11 14:12:38 +0200158static void neigh_update_managed_list(struct neighbour *n)
David Ahern526f1b52018-12-11 18:57:24 -0700159{
Daniel Borkmann7482e382021-10-11 14:12:38 +0200160 bool on_managed_list, add_to_managed;
161
162 write_lock_bh(&n->tbl->lock);
163 write_lock(&n->lock);
164 if (n->dead)
165 goto out;
166
167 add_to_managed = n->flags & NTF_MANAGED;
168 on_managed_list = !list_empty(&n->managed_list);
169
170 if (!add_to_managed && on_managed_list)
171 list_del_init(&n->managed_list);
172 else if (add_to_managed && !on_managed_list)
173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
174out:
175 write_unlock(&n->lock);
176 write_unlock_bh(&n->tbl->lock);
177}
178
179static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 bool *gc_update, bool *managed_update)
181{
182 u32 ndm_flags, old_flags = neigh->flags;
David Ahern526f1b52018-12-11 18:57:24 -0700183
184 if (!(flags & NEIGH_UPDATE_F_ADMIN))
Daniel Borkmann7482e382021-10-11 14:12:38 +0200185 return;
David Ahern526f1b52018-12-11 18:57:24 -0700186
Daniel Borkmann7482e382021-10-11 14:12:38 +0200187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189
190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
David Ahern526f1b52018-12-11 18:57:24 -0700191 if (ndm_flags & NTF_EXT_LEARNED)
192 neigh->flags |= NTF_EXT_LEARNED;
193 else
194 neigh->flags &= ~NTF_EXT_LEARNED;
195 *notify = 1;
Daniel Borkmann7482e382021-10-11 14:12:38 +0200196 *gc_update = true;
David Ahern526f1b52018-12-11 18:57:24 -0700197 }
Daniel Borkmann7482e382021-10-11 14:12:38 +0200198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 if (ndm_flags & NTF_MANAGED)
200 neigh->flags |= NTF_MANAGED;
201 else
202 neigh->flags &= ~NTF_MANAGED;
203 *notify = 1;
204 *managed_update = true;
205 }
David Ahern526f1b52018-12-11 18:57:24 -0700206}
207
David Ahern7e6f1822018-12-11 18:57:23 -0700208static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 struct neigh_table *tbl)
Sowmini Varadhan50710342017-06-02 09:01:49 -0700210{
211 bool retval = false;
212
213 write_lock(&n->lock);
David Ahern7e6f1822018-12-11 18:57:23 -0700214 if (refcount_read(&n->refcnt) == 1) {
Sowmini Varadhan50710342017-06-02 09:01:49 -0700215 struct neighbour *neigh;
216
217 neigh = rcu_dereference_protected(n->next,
218 lockdep_is_held(&tbl->lock));
219 rcu_assign_pointer(*np, neigh);
David Ahern58956312018-12-07 12:24:57 -0800220 neigh_mark_dead(n);
Sowmini Varadhan50710342017-06-02 09:01:49 -0700221 retval = true;
222 }
223 write_unlock(&n->lock);
224 if (retval)
225 neigh_cleanup_and_release(n);
226 return retval;
227}
228
229bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230{
231 struct neigh_hash_table *nht;
232 void *pkey = ndel->primary_key;
233 u32 hash_val;
234 struct neighbour *n;
235 struct neighbour __rcu **np;
236
237 nht = rcu_dereference_protected(tbl->nht,
238 lockdep_is_held(&tbl->lock));
239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 hash_val = hash_val >> (32 - nht->hash_shift);
241
242 np = &nht->hash_buckets[hash_val];
243 while ((n = rcu_dereference_protected(*np,
244 lockdep_is_held(&tbl->lock)))) {
245 if (n == ndel)
David Ahern7e6f1822018-12-11 18:57:23 -0700246 return neigh_del(n, np, tbl);
Sowmini Varadhan50710342017-06-02 09:01:49 -0700247 np = &n->next;
248 }
249 return false;
250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252static int neigh_forced_gc(struct neigh_table *tbl)
253{
David Ahern58956312018-12-07 12:24:57 -0800254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 unsigned long tref = jiffies - 5 * HZ;
David Ahern58956312018-12-07 12:24:57 -0800256 struct neighbour *n, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 int shrunk = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260
261 write_lock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
David Ahern58956312018-12-07 12:24:57 -0800263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 if (refcount_read(&n->refcnt) == 1) {
265 bool remove = false;
266
267 write_lock(&n->lock);
David Ahern758a7f02018-12-11 18:57:22 -0700268 if ((n->nud_state == NUD_FAILED) ||
David Ahern7a6b1ab2021-06-07 11:35:30 -0600269 (n->nud_state == NUD_NOARP) ||
Jeff Dike8cf88212020-11-12 20:58:15 -0500270 (tbl->is_multicast &&
271 tbl->is_multicast(n->primary_key)) ||
David Aherne997f8a2018-12-11 18:57:25 -0700272 time_after(tref, n->updated))
David Ahern58956312018-12-07 12:24:57 -0800273 remove = true;
274 write_unlock(&n->lock);
275
276 if (remove && neigh_remove_one(n, tbl))
277 shrunk++;
278 if (shrunk >= max_clean)
279 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 }
281 }
282
283 tbl->last_flush = jiffies;
284
285 write_unlock_bh(&tbl->lock);
286
287 return shrunk;
288}
289
Pavel Emelyanova43d8992007-12-20 15:49:05 -0800290static void neigh_add_timer(struct neighbour *n, unsigned long when)
291{
292 neigh_hold(n);
293 if (unlikely(mod_timer(&n->timer, when))) {
294 printk("NEIGH: BUG, double timer add, state is %x\n",
295 n->nud_state);
296 dump_stack();
297 }
298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300static int neigh_del_timer(struct neighbour *n)
301{
302 if ((n->nud_state & NUD_IN_TIMER) &&
303 del_timer(&n->timer)) {
304 neigh_release(n);
305 return 1;
306 }
307 return 0;
308}
309
310static void pneigh_queue_purge(struct sk_buff_head *list)
311{
312 struct sk_buff *skb;
313
314 while ((skb = skb_dequeue(list)) != NULL) {
315 dev_put(skb->dev);
316 kfree_skb(skb);
317 }
318}
319
David Ahern859bd2e2018-10-11 20:33:49 -0700320static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
321 bool skip_perm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
323 int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000324 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000326 nht = rcu_dereference_protected(tbl->nht,
327 lockdep_is_held(&tbl->lock));
328
David S. Millercd089332011-07-11 01:28:12 -0700329 for (i = 0; i < (1 << nht->hash_shift); i++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -0700330 struct neighbour *n;
331 struct neighbour __rcu **np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Eric Dumazet767e97e2010-10-06 17:49:21 -0700333 while ((n = rcu_dereference_protected(*np,
334 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (dev && n->dev != dev) {
336 np = &n->next;
337 continue;
338 }
David Ahern859bd2e2018-10-11 20:33:49 -0700339 if (skip_perm && n->nud_state & NUD_PERMANENT) {
340 np = &n->next;
341 continue;
342 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700343 rcu_assign_pointer(*np,
344 rcu_dereference_protected(n->next,
345 lockdep_is_held(&tbl->lock)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 write_lock(&n->lock);
347 neigh_del_timer(n);
David Ahern58956312018-12-07 12:24:57 -0800348 neigh_mark_dead(n);
Reshetova, Elena9f237432017-06-30 13:07:55 +0300349 if (refcount_read(&n->refcnt) != 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /* The most unpleasant situation.
351 We must destroy neighbour entry,
352 but someone still uses it.
353
354 The destroy will be delayed until
355 the last user releases us, but
356 we must kill timers etc. and move
357 it to safe state.
358 */
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700359 __skb_queue_purge(&n->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000360 n->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 n->output = neigh_blackhole;
362 if (n->nud_state & NUD_VALID)
363 n->nud_state = NUD_NOARP;
364 else
365 n->nud_state = NUD_NONE;
Joe Perchesd5d427c2013-04-15 15:17:19 +0000366 neigh_dbg(2, "neigh %p is stray\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -0700369 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 }
371 }
Herbert Xu49636bb2005-10-23 17:18:00 +1000372}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Herbert Xu49636bb2005-10-23 17:18:00 +1000374void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
375{
376 write_lock_bh(&tbl->lock);
David Ahern859bd2e2018-10-11 20:33:49 -0700377 neigh_flush_dev(tbl, dev, false);
Herbert Xu49636bb2005-10-23 17:18:00 +1000378 write_unlock_bh(&tbl->lock);
379}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900380EXPORT_SYMBOL(neigh_changeaddr);
Herbert Xu49636bb2005-10-23 17:18:00 +1000381
David Ahern859bd2e2018-10-11 20:33:49 -0700382static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
383 bool skip_perm)
Herbert Xu49636bb2005-10-23 17:18:00 +1000384{
385 write_lock_bh(&tbl->lock);
David Ahern859bd2e2018-10-11 20:33:49 -0700386 neigh_flush_dev(tbl, dev, skip_perm);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200387 pneigh_ifdown_and_unlock(tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 del_timer_sync(&tbl->proxy_timer);
390 pneigh_queue_purge(&tbl->proxy_queue);
391 return 0;
392}
David Ahern859bd2e2018-10-11 20:33:49 -0700393
394int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
395{
396 __neigh_ifdown(tbl, dev, true);
397 return 0;
398}
399EXPORT_SYMBOL(neigh_carrier_down);
400
401int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
402{
403 __neigh_ifdown(tbl, dev, false);
404 return 0;
405}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900406EXPORT_SYMBOL(neigh_ifdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
David Ahern58956312018-12-07 12:24:57 -0800408static struct neighbour *neigh_alloc(struct neigh_table *tbl,
409 struct net_device *dev,
Roopa Prabhu2c611ad2021-10-11 14:12:37 +0200410 u32 flags, bool exempt_from_gc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
412 struct neighbour *n = NULL;
413 unsigned long now = jiffies;
414 int entries;
415
David Aherne997f8a2018-12-11 18:57:25 -0700416 if (exempt_from_gc)
David Ahern58956312018-12-07 12:24:57 -0800417 goto do_alloc;
418
419 entries = atomic_inc_return(&tbl->gc_entries) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 if (entries >= tbl->gc_thresh3 ||
421 (entries >= tbl->gc_thresh2 &&
422 time_after(now, tbl->last_flush + 5 * HZ))) {
423 if (!neigh_forced_gc(tbl) &&
Rick Jonesfb811392015-08-07 11:10:37 -0700424 entries >= tbl->gc_thresh3) {
425 net_info_ratelimited("%s: neighbor table overflow!\n",
426 tbl->id);
427 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 goto out_entries;
Rick Jonesfb811392015-08-07 11:10:37 -0700429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431
David Ahern58956312018-12-07 12:24:57 -0800432do_alloc:
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +0000433 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 if (!n)
435 goto out_entries;
436
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700437 __skb_queue_head_init(&n->arp_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 rwlock_init(&n->lock);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000439 seqlock_init(&n->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 n->updated = n->used = now;
441 n->nud_state = NUD_NONE;
442 n->output = neigh_blackhole;
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200443 n->flags = flags;
David S. Millerf6b72b622011-07-14 07:53:20 -0700444 seqlock_init(&n->hh.hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 n->parms = neigh_parms_clone(&tbl->parms);
Kees Cooke99e88a2017-10-16 14:43:17 -0700446 timer_setup(&n->timer, neigh_timer_handler, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 NEIGH_CACHE_STAT_INC(tbl, allocs);
449 n->tbl = tbl;
Reshetova, Elena9f237432017-06-30 13:07:55 +0300450 refcount_set(&n->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 n->dead = 1;
David Ahern8cc196d2018-12-10 13:54:07 -0800452 INIT_LIST_HEAD(&n->gc_list);
Daniel Borkmann7482e382021-10-11 14:12:38 +0200453 INIT_LIST_HEAD(&n->managed_list);
David Ahern58956312018-12-07 12:24:57 -0800454
455 atomic_inc(&tbl->entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456out:
457 return n;
458
459out_entries:
David Aherne997f8a2018-12-11 18:57:25 -0700460 if (!exempt_from_gc)
David Ahern58956312018-12-07 12:24:57 -0800461 atomic_dec(&tbl->gc_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 goto out;
463}
464
David S. Miller2c2aba62011-12-28 15:06:58 -0500465static void neigh_get_hash_rnd(u32 *x)
466{
Jason A. Donenfeldb3d0f782017-06-07 23:00:05 -0400467 *x = get_random_u32() | 1;
David S. Miller2c2aba62011-12-28 15:06:58 -0500468}
469
David S. Millercd089332011-07-11 01:28:12 -0700470static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471{
David S. Millercd089332011-07-11 01:28:12 -0700472 size_t size = (1 << shift) * sizeof(struct neighbour *);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000473 struct neigh_hash_table *ret;
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000474 struct neighbour __rcu **buckets;
David S. Miller2c2aba62011-12-28 15:06:58 -0500475 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000477 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
478 if (!ret)
479 return NULL;
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300480 if (size <= PAGE_SIZE) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000481 buckets = kzalloc(size, GFP_ATOMIC);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300482 } else {
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000483 buckets = (struct neighbour __rcu **)
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000484 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
485 get_order(size));
Konstantin Khlebnikov01b833a2019-01-14 13:38:43 +0300486 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300487 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000488 if (!buckets) {
489 kfree(ret);
490 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000492 ret->hash_buckets = buckets;
David S. Millercd089332011-07-11 01:28:12 -0700493 ret->hash_shift = shift;
David S. Miller2c2aba62011-12-28 15:06:58 -0500494 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
495 neigh_get_hash_rnd(&ret->hash_rnd[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 return ret;
497}
498
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000499static void neigh_hash_free_rcu(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000501 struct neigh_hash_table *nht = container_of(head,
502 struct neigh_hash_table,
503 rcu);
David S. Millercd089332011-07-11 01:28:12 -0700504 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
Eric Dumazet6193d2b2011-01-19 22:02:47 +0000505 struct neighbour __rcu **buckets = nht->hash_buckets;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300507 if (size <= PAGE_SIZE) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000508 kfree(buckets);
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300509 } else {
510 kmemleak_free(buckets);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000511 free_pages((unsigned long)buckets, get_order(size));
Konstantin Khlebnikov85704cb2019-01-08 12:30:00 +0300512 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000513 kfree(nht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000516static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
David S. Millercd089332011-07-11 01:28:12 -0700517 unsigned long new_shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000519 unsigned int i, hash;
520 struct neigh_hash_table *new_nht, *old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
523
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000524 old_nht = rcu_dereference_protected(tbl->nht,
525 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -0700526 new_nht = neigh_hash_alloc(new_shift);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000527 if (!new_nht)
528 return old_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
David S. Millercd089332011-07-11 01:28:12 -0700530 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct neighbour *n, *next;
532
Eric Dumazet767e97e2010-10-06 17:49:21 -0700533 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
534 lockdep_is_held(&tbl->lock));
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000535 n != NULL;
536 n = next) {
537 hash = tbl->hash(n->primary_key, n->dev,
538 new_nht->hash_rnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
David S. Millercd089332011-07-11 01:28:12 -0700540 hash >>= (32 - new_nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700541 next = rcu_dereference_protected(n->next,
542 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Eric Dumazet767e97e2010-10-06 17:49:21 -0700544 rcu_assign_pointer(n->next,
545 rcu_dereference_protected(
546 new_nht->hash_buckets[hash],
547 lockdep_is_held(&tbl->lock)));
548 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 }
550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000552 rcu_assign_pointer(tbl->nht, new_nht);
553 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
554 return new_nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
557struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
558 struct net_device *dev)
559{
560 struct neighbour *n;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 NEIGH_CACHE_STAT_INC(tbl, lookups);
563
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000564 rcu_read_lock_bh();
Eric W. Biederman60395a22015-03-03 17:10:44 -0600565 n = __neigh_lookup_noref(tbl, pkey, dev);
566 if (n) {
Reshetova, Elena9f237432017-06-30 13:07:55 +0300567 if (!refcount_inc_not_zero(&n->refcnt))
Eric W. Biederman60395a22015-03-03 17:10:44 -0600568 n = NULL;
569 NEIGH_CACHE_STAT_INC(tbl, hits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700571
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000572 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 return n;
574}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900575EXPORT_SYMBOL(neigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Eric W. Biederman426b5302008-01-24 00:13:18 -0800577struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
578 const void *pkey)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
580 struct neighbour *n;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300581 unsigned int key_len = tbl->key_len;
Pavel Emelyanovbc4bf5f2008-02-23 19:57:02 -0800582 u32 hash_val;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000583 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 NEIGH_CACHE_STAT_INC(tbl, lookups);
586
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000587 rcu_read_lock_bh();
588 nht = rcu_dereference_bh(tbl->nht);
David S. Millercd089332011-07-11 01:28:12 -0700589 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700590
591 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
592 n != NULL;
593 n = rcu_dereference_bh(n->next)) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800594 if (!memcmp(n->primary_key, pkey, key_len) &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900595 net_eq(dev_net(n->dev), net)) {
Reshetova, Elena9f237432017-06-30 13:07:55 +0300596 if (!refcount_inc_not_zero(&n->refcnt))
Eric Dumazet767e97e2010-10-06 17:49:21 -0700597 n = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 NEIGH_CACHE_STAT_INC(tbl, hits);
599 break;
600 }
601 }
Eric Dumazet767e97e2010-10-06 17:49:21 -0700602
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000603 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return n;
605}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900606EXPORT_SYMBOL(neigh_lookup_nodev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200608static struct neighbour *
609___neigh_create(struct neigh_table *tbl, const void *pkey,
Roopa Prabhu2c611ad2021-10-11 14:12:37 +0200610 struct net_device *dev, u32 flags,
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200611 bool exempt_from_gc, bool want_ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200613 u32 hash_val, key_len = tbl->key_len;
614 struct neighbour *n1, *rc, *n;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000615 struct neigh_hash_table *nht;
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200616 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200618 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
David Ahernfc651002019-05-22 12:22:21 -0700619 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (!n) {
621 rc = ERR_PTR(-ENOBUFS);
622 goto out;
623 }
624
625 memcpy(n->primary_key, pkey, key_len);
626 n->dev = dev;
Eric Dumazet85662c92021-12-04 20:22:07 -0800627 dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
629 /* Protocol specific setup. */
630 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
631 rc = ERR_PTR(error);
632 goto out_neigh_release;
633 }
634
David Millerda6a8fa2011-07-25 00:01:38 +0000635 if (dev->netdev_ops->ndo_neigh_construct) {
Jiri Pirko503eebc2016-07-05 11:27:37 +0200636 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
David Millerda6a8fa2011-07-25 00:01:38 +0000637 if (error < 0) {
638 rc = ERR_PTR(error);
639 goto out_neigh_release;
640 }
641 }
642
David S. Miller447f2192011-12-19 15:04:41 -0500643 /* Device specific setup. */
644 if (n->parms->neigh_setup &&
645 (error = n->parms->neigh_setup(n)) < 0) {
646 rc = ERR_PTR(error);
647 goto out_neigh_release;
648 }
649
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100650 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000653 nht = rcu_dereference_protected(tbl->nht,
654 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
David S. Millercd089332011-07-11 01:28:12 -0700656 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
657 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Jim Westfall096b9852018-01-14 04:18:50 -0800659 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661 if (n->parms->dead) {
662 rc = ERR_PTR(-EINVAL);
663 goto out_tbl_unlock;
664 }
665
Eric Dumazet767e97e2010-10-06 17:49:21 -0700666 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
667 lockdep_is_held(&tbl->lock));
668 n1 != NULL;
669 n1 = rcu_dereference_protected(n1->next,
670 lockdep_is_held(&tbl->lock))) {
Jim Westfall096b9852018-01-14 04:18:50 -0800671 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
David S. Millera263b302012-07-02 02:02:15 -0700672 if (want_ref)
673 neigh_hold(n1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 rc = n1;
675 goto out_tbl_unlock;
676 }
677 }
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 n->dead = 0;
David Aherne997f8a2018-12-11 18:57:25 -0700680 if (!exempt_from_gc)
David Ahern8cc196d2018-12-10 13:54:07 -0800681 list_add_tail(&n->gc_list, &n->tbl->gc_list);
Daniel Borkmann7482e382021-10-11 14:12:38 +0200682 if (n->flags & NTF_MANAGED)
683 list_add_tail(&n->managed_list, &n->tbl->managed_list);
David S. Millera263b302012-07-02 02:02:15 -0700684 if (want_ref)
685 neigh_hold(n);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700686 rcu_assign_pointer(n->next,
687 rcu_dereference_protected(nht->hash_buckets[hash_val],
688 lockdep_is_held(&tbl->lock)));
689 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 write_unlock_bh(&tbl->lock);
Joe Perchesd5d427c2013-04-15 15:17:19 +0000691 neigh_dbg(2, "neigh %p is created\n", n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 rc = n;
693out:
694 return rc;
695out_tbl_unlock:
696 write_unlock_bh(&tbl->lock);
697out_neigh_release:
David Ahern64c6f4b2019-05-01 18:08:34 -0700698 if (!exempt_from_gc)
699 atomic_dec(&tbl->gc_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 neigh_release(n);
701 goto out;
702}
David Ahern58956312018-12-07 12:24:57 -0800703
704struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
705 struct net_device *dev, bool want_ref)
706{
Daniel Borkmanne4400bb2021-10-11 14:12:35 +0200707 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
David Ahern58956312018-12-07 12:24:57 -0800708}
David S. Millera263b302012-07-02 02:02:15 -0700709EXPORT_SYMBOL(__neigh_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300711static u32 pneigh_hash(const void *pkey, unsigned int key_len)
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700712{
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700713 u32 hash_val = *(u32 *)(pkey + key_len - 4);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700714 hash_val ^= (hash_val >> 16);
715 hash_val ^= hash_val >> 8;
716 hash_val ^= hash_val >> 4;
717 hash_val &= PNEIGH_HASHMASK;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900718 return hash_val;
719}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700720
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900721static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
722 struct net *net,
723 const void *pkey,
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300724 unsigned int key_len,
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900725 struct net_device *dev)
726{
727 while (n) {
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700728 if (!memcmp(n->key, pkey, key_len) &&
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900729 net_eq(pneigh_net(n), net) &&
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700730 (n->dev == dev || !n->dev))
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900731 return n;
732 n = n->next;
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700733 }
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900734 return NULL;
735}
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700736
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900737struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
738 struct net *net, const void *pkey, struct net_device *dev)
739{
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300740 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900741 u32 hash_val = pneigh_hash(pkey, key_len);
742
743 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
744 net, pkey, key_len, dev);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700745}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900746EXPORT_SYMBOL_GPL(__pneigh_lookup);
Pavel Emelyanovfa86d322008-03-24 14:48:59 -0700747
Eric W. Biederman426b5302008-01-24 00:13:18 -0800748struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
749 struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 struct net_device *dev, int creat)
751{
752 struct pneigh_entry *n;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300753 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900754 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 read_lock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900757 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
758 net, pkey, key_len, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 read_unlock_bh(&tbl->lock);
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900760
761 if (n || !creat)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 goto out;
763
Pavel Emelyanov4ae28942007-10-15 12:54:15 -0700764 ASSERT_RTNL();
765
Eric Dumazete195e9b2021-12-06 08:53:29 -0800766 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 if (!n)
768 goto out;
769
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -0500770 write_pnet(&n->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 memcpy(n->key, pkey, key_len);
772 n->dev = dev;
Eric Dumazet77a23b12021-12-04 20:22:08 -0800773 dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 if (tbl->pconstructor && tbl->pconstructor(n)) {
Eric Dumazet77a23b12021-12-04 20:22:08 -0800776 dev_put_track(dev, &n->dev_tracker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 kfree(n);
778 n = NULL;
779 goto out;
780 }
781
782 write_lock_bh(&tbl->lock);
783 n->next = tbl->phash_buckets[hash_val];
784 tbl->phash_buckets[hash_val] = n;
785 write_unlock_bh(&tbl->lock);
786out:
787 return n;
788}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900789EXPORT_SYMBOL(pneigh_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791
Eric W. Biederman426b5302008-01-24 00:13:18 -0800792int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 struct net_device *dev)
794{
795 struct pneigh_entry *n, **np;
Alexey Dobriyan01ccdf12017-09-23 23:03:04 +0300796 unsigned int key_len = tbl->key_len;
YOSHIFUJI Hideakibe01d652008-03-28 12:46:53 +0900797 u32 hash_val = pneigh_hash(pkey, key_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 write_lock_bh(&tbl->lock);
800 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
801 np = &n->next) {
Eric W. Biederman426b5302008-01-24 00:13:18 -0800802 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900803 net_eq(pneigh_net(n), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 *np = n->next;
805 write_unlock_bh(&tbl->lock);
806 if (tbl->pdestructor)
807 tbl->pdestructor(n);
Eric Dumazet77a23b12021-12-04 20:22:08 -0800808 dev_put_track(n->dev, &n->dev_tracker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 kfree(n);
810 return 0;
811 }
812 }
813 write_unlock_bh(&tbl->lock);
814 return -ENOENT;
815}
816
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200817static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
818 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200820 struct pneigh_entry *n, **np, *freelist = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 u32 h;
822
823 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
824 np = &tbl->phash_buckets[h];
825 while ((n = *np) != NULL) {
826 if (!dev || n->dev == dev) {
827 *np = n->next;
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200828 n->next = freelist;
829 freelist = n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 continue;
831 }
832 np = &n->next;
833 }
834 }
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200835 write_unlock_bh(&tbl->lock);
836 while ((n = freelist)) {
837 freelist = n->next;
838 n->next = NULL;
839 if (tbl->pdestructor)
840 tbl->pdestructor(n);
Eric Dumazet77a23b12021-12-04 20:22:08 -0800841 dev_put_track(n->dev, &n->dev_tracker);
Wolfgang Bumiller53b76cd2018-04-12 10:46:55 +0200842 kfree(n);
843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 return -ENOENT;
845}
846
Denis V. Lunev06f05112008-01-24 00:30:58 -0800847static void neigh_parms_destroy(struct neigh_parms *parms);
848
849static inline void neigh_parms_put(struct neigh_parms *parms)
850{
Reshetova, Elena63439442017-06-30 13:07:56 +0300851 if (refcount_dec_and_test(&parms->refcnt))
Denis V. Lunev06f05112008-01-24 00:30:58 -0800852 neigh_parms_destroy(parms);
853}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855/*
856 * neighbour must already be out of the table;
857 *
858 */
859void neigh_destroy(struct neighbour *neigh)
860{
David Millerda6a8fa2011-07-25 00:01:38 +0000861 struct net_device *dev = neigh->dev;
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
864
865 if (!neigh->dead) {
Joe Perchese005d192012-05-16 19:58:40 +0000866 pr_warn("Destroying alive neighbour %p\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 dump_stack();
868 return;
869 }
870
871 if (neigh_del_timer(neigh))
Joe Perchese005d192012-05-16 19:58:40 +0000872 pr_warn("Impossible event\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Eric Dumazetc9ab4d82013-06-28 02:37:42 -0700874 write_lock_bh(&neigh->lock);
875 __skb_queue_purge(&neigh->arp_queue);
876 write_unlock_bh(&neigh->lock);
Eric Dumazet8b5c1712011-11-09 12:07:14 +0000877 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
David S. Miller447f2192011-12-19 15:04:41 -0500879 if (dev->netdev_ops->ndo_neigh_destroy)
Jiri Pirko503eebc2016-07-05 11:27:37 +0200880 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
David S. Miller447f2192011-12-19 15:04:41 -0500881
Eric Dumazet85662c92021-12-04 20:22:07 -0800882 dev_put_track(dev, &neigh->dev_tracker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 neigh_parms_put(neigh->parms);
884
Joe Perchesd5d427c2013-04-15 15:17:19 +0000885 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 atomic_dec(&neigh->tbl->entries);
David Miller5b8b0062011-07-25 00:01:22 +0000888 kfree_rcu(neigh, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +0900890EXPORT_SYMBOL(neigh_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
892/* Neighbour state is suspicious;
893 disable fast path.
894
895 Called with write_locked neigh.
896 */
897static void neigh_suspect(struct neighbour *neigh)
898{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000899 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
901 neigh->output = neigh->ops->output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902}
903
904/* Neighbour state is OK;
905 enable fast path.
906
907 Called with write_locked neigh.
908 */
909static void neigh_connect(struct neighbour *neigh)
910{
Joe Perchesd5d427c2013-04-15 15:17:19 +0000911 neigh_dbg(2, "neigh %p is connected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 neigh->output = neigh->ops->connected_output;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914}
915
Eric Dumazete4c4e442009-07-30 03:15:07 +0000916static void neigh_periodic_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Eric Dumazete4c4e442009-07-30 03:15:07 +0000918 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
Eric Dumazet767e97e2010-10-06 17:49:21 -0700919 struct neighbour *n;
920 struct neighbour __rcu **np;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000921 unsigned int i;
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000922 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
925
Eric Dumazete4c4e442009-07-30 03:15:07 +0000926 write_lock_bh(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000927 nht = rcu_dereference_protected(tbl->nht,
928 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 /*
931 * periodically recompute ReachableTime from random function
932 */
933
Eric Dumazete4c4e442009-07-30 03:15:07 +0000934 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 struct neigh_parms *p;
Eric Dumazete4c4e442009-07-30 03:15:07 +0000936 tbl->last_rand = jiffies;
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +0100937 list_for_each_entry(p, &tbl->parms_list, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 p->reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100939 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
941
Duan Jiongfeff9ab2014-02-27 17:14:41 +0800942 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
943 goto out;
944
David S. Millercd089332011-07-11 01:28:12 -0700945 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
Eric Dumazetd6bf7812010-10-04 06:15:44 +0000946 np = &nht->hash_buckets[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Eric Dumazet767e97e2010-10-06 17:49:21 -0700948 while ((n = rcu_dereference_protected(*np,
949 lockdep_is_held(&tbl->lock))) != NULL) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000950 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Eric Dumazete4c4e442009-07-30 03:15:07 +0000952 write_lock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Eric Dumazete4c4e442009-07-30 03:15:07 +0000954 state = n->nud_state;
Roopa Prabhu9ce33e42018-04-24 13:49:34 -0700955 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
956 (n->flags & NTF_EXT_LEARNED)) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000957 write_unlock(&n->lock);
958 goto next_elt;
959 }
960
961 if (time_before(n->used, n->confirmed))
962 n->used = n->confirmed;
963
Reshetova, Elena9f237432017-06-30 13:07:55 +0300964 if (refcount_read(&n->refcnt) == 1 &&
Eric Dumazete4c4e442009-07-30 03:15:07 +0000965 (state == NUD_FAILED ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100966 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
Eric Dumazete4c4e442009-07-30 03:15:07 +0000967 *np = n->next;
David Ahern58956312018-12-07 12:24:57 -0800968 neigh_mark_dead(n);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000969 write_unlock(&n->lock);
970 neigh_cleanup_and_release(n);
971 continue;
972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 write_unlock(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975next_elt:
Eric Dumazete4c4e442009-07-30 03:15:07 +0000976 np = &n->next;
977 }
978 /*
979 * It's fine to release lock here, even if hash table
980 * grows while we are preempted.
981 */
982 write_unlock_bh(&tbl->lock);
983 cond_resched();
984 write_lock_bh(&tbl->lock);
Michel Machado84338a62012-02-21 16:04:13 -0500985 nht = rcu_dereference_protected(tbl->nht,
986 lockdep_is_held(&tbl->lock));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 }
YOSHIFUJI Hideaki / 吉藤英明27246802013-01-22 05:20:05 +0000988out:
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100989 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
990 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
991 * BASE_REACHABLE_TIME.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 */
viresh kumarf6180022014-01-22 12:23:33 +0530993 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
Jiri Pirko1f9248e2013-12-07 19:26:53 +0100994 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
Eric Dumazete4c4e442009-07-30 03:15:07 +0000995 write_unlock_bh(&tbl->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997
998static __inline__ int neigh_max_probes(struct neighbour *n)
999{
1000 struct neigh_parms *p = n->parms;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09001001 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1002 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1003 NEIGH_VAR(p, MCAST_PROBES));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
Timo Teras5ef12d92009-06-11 04:16:28 -07001006static void neigh_invalidate(struct neighbour *neigh)
Eric Dumazet0a141502010-03-09 19:40:54 +00001007 __releases(neigh->lock)
1008 __acquires(neigh->lock)
Timo Teras5ef12d92009-06-11 04:16:28 -07001009{
1010 struct sk_buff *skb;
1011
1012 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
Joe Perchesd5d427c2013-04-15 15:17:19 +00001013 neigh_dbg(2, "neigh %p is failed\n", neigh);
Timo Teras5ef12d92009-06-11 04:16:28 -07001014 neigh->updated = jiffies;
1015
1016 /* It is very thin place. report_unreachable is very complicated
1017 routine. Particularly, it can hit the same neighbour entry!
1018
1019 So that, we try to be accurate and avoid dead loop. --ANK
1020 */
1021 while (neigh->nud_state == NUD_FAILED &&
1022 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1023 write_unlock(&neigh->lock);
1024 neigh->ops->error_report(neigh, skb);
1025 write_lock(&neigh->lock);
1026 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -07001027 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001028 neigh->arp_queue_len_bytes = 0;
Timo Teras5ef12d92009-06-11 04:16:28 -07001029}
1030
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001031static void neigh_probe(struct neighbour *neigh)
1032 __releases(neigh->lock)
1033{
Hannes Frederic Sowa4ed377e2013-09-21 06:32:34 +02001034 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001035 /* keep skb alive even if arp_queue overflows */
1036 if (skb)
Martin Zhang19125c12015-11-17 20:49:30 +08001037 skb = skb_clone(skb, GFP_ATOMIC);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001038 write_unlock(&neigh->lock);
Eric Dumazet48481c82017-03-23 12:39:21 -07001039 if (neigh->ops->solicit)
1040 neigh->ops->solicit(neigh, skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001041 atomic_inc(&neigh->probes);
Yang Wei87fff3ca2019-01-17 23:11:30 +08001042 consume_skb(skb);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001043}
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045/* Called when a timer expires for a neighbour entry. */
1046
Kees Cooke99e88a2017-10-16 14:43:17 -07001047static void neigh_timer_handler(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048{
1049 unsigned long now, next;
Kees Cooke99e88a2017-10-16 14:43:17 -07001050 struct neighbour *neigh = from_timer(neigh, t, timer);
Eric Dumazet95c96172012-04-15 05:58:06 +00001051 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 int notify = 0;
1053
1054 write_lock(&neigh->lock);
1055
1056 state = neigh->nud_state;
1057 now = jiffies;
1058 next = now + HZ;
1059
David S. Miller045f7b32011-11-01 17:45:55 -04001060 if (!(state & NUD_IN_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063 if (state & NUD_REACHABLE) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001064 if (time_before_eq(now,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 neigh->confirmed + neigh->parms->reachable_time)) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001066 neigh_dbg(2, "neigh %p is still alive\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 next = neigh->confirmed + neigh->parms->reachable_time;
1068 } else if (time_before_eq(now,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001069 neigh->used +
1070 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001071 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001073 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 neigh_suspect(neigh);
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001075 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001077 neigh_dbg(2, "neigh %p is suspected\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 neigh->nud_state = NUD_STALE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001079 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 neigh_suspect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001081 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
1083 } else if (state & NUD_DELAY) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001084 if (time_before_eq(now,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001085 neigh->confirmed +
1086 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001087 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 neigh->nud_state = NUD_REACHABLE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001089 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 neigh_connect(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001091 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 next = neigh->confirmed + neigh->parms->reachable_time;
1093 } else {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001094 neigh_dbg(2, "neigh %p is probed\n", neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 neigh->nud_state = NUD_PROBE;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001096 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 atomic_set(&neigh->probes, 0);
Erik Kline765c9c62015-05-18 19:44:41 +09001098 notify = 1;
Hangbin Liu19e16d22020-04-01 14:46:20 +08001099 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1100 HZ/100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 }
1102 } else {
1103 /* NUD_PROBE|NUD_INCOMPLETE */
Hangbin Liu19e16d22020-04-01 14:46:20 +08001104 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
1106
1107 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1108 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 neigh->nud_state = NUD_FAILED;
1110 notify = 1;
Timo Teras5ef12d92009-06-11 04:16:28 -07001111 neigh_invalidate(neigh);
Duan Jiong5e2c21d2014-02-27 17:03:03 +08001112 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
1114
1115 if (neigh->nud_state & NUD_IN_TIMER) {
Hangbin Liu96d10d52020-05-28 15:15:13 +08001116 if (time_before(next, jiffies + HZ/100))
1117 next = jiffies + HZ/100;
Herbert Xu6fb99742005-10-23 16:37:48 +10001118 if (!mod_timer(&neigh->timer, next))
1119 neigh_hold(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 }
1121 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001122 neigh_probe(neigh);
David S. Miller9ff56602008-02-17 18:39:54 -08001123 } else {
David S. Miller69cc64d2008-02-11 21:45:44 -08001124out:
David S. Miller9ff56602008-02-17 18:39:54 -08001125 write_unlock(&neigh->lock);
1126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Thomas Grafd961db32007-08-08 23:12:56 -07001128 if (notify)
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001129 neigh_update_notify(neigh, 0);
Thomas Grafd961db32007-08-08 23:12:56 -07001130
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001131 trace_neigh_timer_handler(neigh, 0);
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 neigh_release(neigh);
1134}
1135
1136int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1137{
1138 int rc;
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001139 bool immediate_probe = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
1141 write_lock_bh(&neigh->lock);
1142
1143 rc = 0;
1144 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1145 goto out_unlock_bh;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001146 if (neigh->dead)
1147 goto out_dead;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001150 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1151 NEIGH_VAR(neigh->parms, APP_PROBES)) {
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001152 unsigned long next, now = jiffies;
1153
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001154 atomic_set(&neigh->probes,
1155 NEIGH_VAR(neigh->parms, UCAST_PROBES));
Lorenzo Bianconi071c3792019-07-14 23:36:11 +02001156 neigh_del_timer(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 neigh->nud_state = NUD_INCOMPLETE;
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001158 neigh->updated = now;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001159 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
Hangbin Liu19e16d22020-04-01 14:46:20 +08001160 HZ/100);
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001161 neigh_add_timer(neigh, next);
1162 immediate_probe = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 } else {
1164 neigh->nud_state = NUD_FAILED;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001165 neigh->updated = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 write_unlock_bh(&neigh->lock);
1167
Wei Yongjunf3fbbe02009-02-25 00:37:32 +00001168 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return 1;
1170 }
1171 } else if (neigh->nud_state & NUD_STALE) {
Joe Perchesd5d427c2013-04-15 15:17:19 +00001172 neigh_dbg(2, "neigh %p is delayed\n", neigh);
Lorenzo Bianconi071c3792019-07-14 23:36:11 +02001173 neigh_del_timer(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 neigh->nud_state = NUD_DELAY;
YOSHIFUJI Hideaki955aaa22006-03-20 16:52:52 -08001175 neigh->updated = jiffies;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001176 neigh_add_timer(neigh, jiffies +
1177 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 }
1179
1180 if (neigh->nud_state == NUD_INCOMPLETE) {
1181 if (skb) {
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001182 while (neigh->arp_queue_len_bytes + skb->truesize >
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001183 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 struct sk_buff *buff;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001185
David S. Millerf72051b2008-09-23 01:11:18 -07001186 buff = __skb_dequeue(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001187 if (!buff)
1188 break;
1189 neigh->arp_queue_len_bytes -= buff->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 kfree_skb(buff);
Neil Horman9a6d2762008-07-16 20:50:49 -07001191 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 }
Eric Dumazeta4731132010-05-27 16:09:39 -07001193 skb_dst_force(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 __skb_queue_tail(&neigh->arp_queue, skb);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001195 neigh->arp_queue_len_bytes += skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 }
1197 rc = 1;
1198 }
1199out_unlock_bh:
Eric Dumazetcd28ca02011-08-09 08:15:58 +00001200 if (immediate_probe)
1201 neigh_probe(neigh);
1202 else
1203 write_unlock(&neigh->lock);
1204 local_bh_enable();
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001205 trace_neigh_event_send_done(neigh, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 return rc;
Julian Anastasov2c51a972015-06-16 22:56:39 +03001207
1208out_dead:
1209 if (neigh->nud_state & NUD_STALE)
1210 goto out_unlock_bh;
1211 write_unlock_bh(&neigh->lock);
1212 kfree_skb(skb);
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001213 trace_neigh_event_send_dead(neigh, 1);
Julian Anastasov2c51a972015-06-16 22:56:39 +03001214 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001216EXPORT_SYMBOL(__neigh_event_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
David S. Millerf6b72b622011-07-14 07:53:20 -07001218static void neigh_update_hhs(struct neighbour *neigh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219{
1220 struct hh_cache *hh;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001221 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
Doug Kehn91a72a72010-07-14 18:02:16 -07001222 = NULL;
1223
1224 if (neigh->dev->header_ops)
1225 update = neigh->dev->header_ops->cache_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 if (update) {
David S. Millerf6b72b622011-07-14 07:53:20 -07001228 hh = &neigh->hh;
Eric Dumazetc305c6ae2019-11-07 18:29:11 -08001229 if (READ_ONCE(hh->hh_len)) {
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001230 write_seqlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 update(hh, neigh->dev, neigh->ha);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -08001232 write_sequnlock_bh(&hh->hh_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234 }
1235}
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237/* Generic update routine.
1238 -- lladdr is new lladdr or NULL, if it is not supplied.
1239 -- new is new state.
1240 -- flags
1241 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1242 if it is different.
1243 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001244 lladdr instead of overriding it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 if it is different.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02001247 NEIGH_UPDATE_F_USE means that the entry is user triggered.
Daniel Borkmann7482e382021-10-11 14:12:38 +02001248 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001249 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 NTF_ROUTER flag.
1251 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1252 a router.
1253
1254 Caller MUST hold reference count on the entry.
1255 */
David Ahern7a35a502018-12-05 20:02:29 -08001256static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1257 u8 new, u32 flags, u32 nlmsg_pid,
1258 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259{
Daniel Borkmann7482e382021-10-11 14:12:38 +02001260 bool gc_update = false, managed_update = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 int update_isrouter = 0;
Daniel Borkmann7482e382021-10-11 14:12:38 +02001262 struct net_device *dev;
1263 int err, notify = 0;
1264 u8 old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001266 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 write_lock_bh(&neigh->lock);
1269
1270 dev = neigh->dev;
1271 old = neigh->nud_state;
1272 err = -EPERM;
1273
Chinmay Agarwaleb4e8fa2021-01-27 22:24:54 +05301274 if (neigh->dead) {
1275 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1276 new = old;
1277 goto out;
1278 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001279 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 (old & (NUD_NOARP | NUD_PERMANENT)))
1281 goto out;
1282
Daniel Borkmann7482e382021-10-11 14:12:38 +02001283 neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1284 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02001285 new = old & ~NUD_PERMANENT;
1286 neigh->nud_state = new;
1287 err = 0;
1288 goto out;
1289 }
Roopa Prabhu9ce33e42018-04-24 13:49:34 -07001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (!(new & NUD_VALID)) {
1292 neigh_del_timer(neigh);
1293 if (old & NUD_CONNECTED)
1294 neigh_suspect(neigh);
David Ahern9c29a2f2018-12-11 18:57:21 -07001295 neigh->nud_state = new;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 notify = old & NUD_VALID;
Roopa Prabhud2fb4fb2018-10-20 18:09:31 -07001298 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
Timo Teras5ef12d92009-06-11 04:16:28 -07001299 (new & NUD_FAILED)) {
1300 neigh_invalidate(neigh);
1301 notify = 1;
1302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 goto out;
1304 }
1305
1306 /* Compare new lladdr with cached one */
1307 if (!dev->addr_len) {
1308 /* First case: device needs no address. */
1309 lladdr = neigh->ha;
1310 } else if (lladdr) {
1311 /* The second case: if something is already cached
1312 and a new address is proposed:
1313 - compare new & old
1314 - if they are different, check override flag
1315 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001316 if ((old & NUD_VALID) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 !memcmp(lladdr, neigh->ha, dev->addr_len))
1318 lladdr = neigh->ha;
1319 } else {
1320 /* No address is supplied; if we know something,
1321 use it, otherwise discard the request.
1322 */
1323 err = -EINVAL;
David Ahern7a35a502018-12-05 20:02:29 -08001324 if (!(old & NUD_VALID)) {
1325 NL_SET_ERR_MSG(extack, "No link layer address given");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 lladdr = neigh->ha;
1329 }
1330
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001331 /* Update confirmed timestamp for neighbour entry after we
1332 * received ARP packet even if it doesn't change IP to MAC binding.
1333 */
1334 if (new & NUD_CONNECTED)
1335 neigh->confirmed = jiffies;
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 /* If entry was valid and address is not changed,
1338 do not change entry state, if new one is STALE.
1339 */
1340 err = 0;
1341 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1342 if (old & NUD_VALID) {
1343 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1344 update_isrouter = 0;
1345 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1346 (old & NUD_CONNECTED)) {
1347 lladdr = neigh->ha;
1348 new = NUD_STALE;
1349 } else
1350 goto out;
1351 } else {
Julian Anastasov0e7bbcc2016-07-27 09:56:50 +03001352 if (lladdr == neigh->ha && new == NUD_STALE &&
1353 !(flags & NEIGH_UPDATE_F_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 new = old;
1355 }
1356 }
1357
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001358 /* Update timestamp only once we know we will make a change to the
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001359 * neighbour entry. Otherwise we risk to move the locktime window with
1360 * noop updates and ignore relevant ARP updates.
1361 */
Vasily Khoruzhickf0e0d042018-09-13 11:12:03 -07001362 if (new != old || lladdr != neigh->ha)
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001363 neigh->updated = jiffies;
Ihar Hrachyshka77d71232017-05-16 08:44:24 -07001364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 if (new != old) {
1366 neigh_del_timer(neigh);
Erik Kline765c9c62015-05-18 19:44:41 +09001367 if (new & NUD_PROBE)
1368 atomic_set(&neigh->probes, 0);
Pavel Emelyanova43d8992007-12-20 15:49:05 -08001369 if (new & NUD_IN_TIMER)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001370 neigh_add_timer(neigh, (jiffies +
1371 ((new & NUD_REACHABLE) ?
David S. Miller667347f2005-09-27 12:07:44 -07001372 neigh->parms->reachable_time :
1373 0)));
David Ahern9c29a2f2018-12-11 18:57:21 -07001374 neigh->nud_state = new;
Bob Gilligan53385d22013-12-15 13:39:56 -08001375 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 }
1377
1378 if (lladdr != neigh->ha) {
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001379 write_seqlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 memcpy(&neigh->ha, lladdr, dev->addr_len);
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001381 write_sequnlock(&neigh->ha_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 neigh_update_hhs(neigh);
1383 if (!(new & NUD_CONNECTED))
1384 neigh->confirmed = jiffies -
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001385 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 notify = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 }
1388 if (new == old)
1389 goto out;
1390 if (new & NUD_CONNECTED)
1391 neigh_connect(neigh);
1392 else
1393 neigh_suspect(neigh);
1394 if (!(old & NUD_VALID)) {
1395 struct sk_buff *skb;
1396
1397 /* Again: avoid dead loop if something went wrong */
1398
1399 while (neigh->nud_state & NUD_VALID &&
1400 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
David S. Miller69cce1d2011-07-17 23:09:49 -07001401 struct dst_entry *dst = skb_dst(skb);
1402 struct neighbour *n2, *n1 = neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 write_unlock_bh(&neigh->lock);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001404
1405 rcu_read_lock();
David S. Miller13a43d92012-07-02 22:15:37 -07001406
1407 /* Why not just use 'neigh' as-is? The problem is that
1408 * things such as shaper, eql, and sch_teql can end up
1409 * using alternative, different, neigh objects to output
1410 * the packet in the output path. So what we need to do
1411 * here is re-lookup the top-level neigh in the path so
1412 * we can reinject the packet there.
1413 */
1414 n2 = NULL;
Tong Zhud47ec7a2021-03-19 14:33:37 -04001415 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
David S. Miller13a43d92012-07-02 22:15:37 -07001416 n2 = dst_neigh_lookup_skb(dst, skb);
1417 if (n2)
1418 n1 = n2;
1419 }
David S. Miller8f40b162011-07-17 13:34:11 -07001420 n1->output(n1, skb);
David S. Miller13a43d92012-07-02 22:15:37 -07001421 if (n2)
1422 neigh_release(n2);
roy.qing.li@gmail.come049f282011-10-17 22:32:42 +00001423 rcu_read_unlock();
1424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 write_lock_bh(&neigh->lock);
1426 }
Eric Dumazetc9ab4d82013-06-28 02:37:42 -07001427 __skb_queue_purge(&neigh->arp_queue);
Eric Dumazet8b5c1712011-11-09 12:07:14 +00001428 neigh->arp_queue_len_bytes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 }
1430out:
Roopa Prabhufc6e8072018-09-22 21:26:20 -07001431 if (update_isrouter)
1432 neigh_update_is_router(neigh, flags, &notify);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 write_unlock_bh(&neigh->lock);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001434 if (((new ^ old) & NUD_PERMANENT) || gc_update)
David Ahern9c29a2f2018-12-11 18:57:21 -07001435 neigh_update_gc_list(neigh);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001436 if (managed_update)
1437 neigh_update_managed_list(neigh);
Tom Tucker8d717402006-07-30 20:43:36 -07001438 if (notify)
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001439 neigh_update_notify(neigh, nlmsg_pid);
Roopa Prabhu56dd18a2019-02-14 09:15:11 -08001440 trace_neigh_update_done(neigh, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 return err;
1442}
David Ahern7a35a502018-12-05 20:02:29 -08001443
1444int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1445 u32 flags, u32 nlmsg_pid)
1446{
1447 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1448}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001449EXPORT_SYMBOL(neigh_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Jiri Benc7e980562013-12-11 13:48:20 +01001451/* Update the neigh to listen temporarily for probe responses, even if it is
1452 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1453 */
1454void __neigh_set_probe_once(struct neighbour *neigh)
1455{
Julian Anastasov2c51a972015-06-16 22:56:39 +03001456 if (neigh->dead)
1457 return;
Jiri Benc7e980562013-12-11 13:48:20 +01001458 neigh->updated = jiffies;
1459 if (!(neigh->nud_state & NUD_FAILED))
1460 return;
Duan Jiong2176d5d2014-05-09 13:16:48 +08001461 neigh->nud_state = NUD_INCOMPLETE;
1462 atomic_set(&neigh->probes, neigh_max_probes(neigh));
Jiri Benc7e980562013-12-11 13:48:20 +01001463 neigh_add_timer(neigh,
Hangbin Liu19e16d22020-04-01 14:46:20 +08001464 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1465 HZ/100));
Jiri Benc7e980562013-12-11 13:48:20 +01001466}
1467EXPORT_SYMBOL(__neigh_set_probe_once);
1468
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1470 u8 *lladdr, void *saddr,
1471 struct net_device *dev)
1472{
1473 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1474 lladdr || !dev->addr_len);
1475 if (neigh)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001476 neigh_update(neigh, lladdr, NUD_STALE,
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07001477 NEIGH_UPDATE_F_OVERRIDE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 return neigh;
1479}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001480EXPORT_SYMBOL(neigh_event_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Eric Dumazet34d101d2010-10-11 09:16:57 -07001482/* called with read_lock_bh(&n->lock); */
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001483static void neigh_hh_init(struct neighbour *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001485 struct net_device *dev = n->dev;
1486 __be16 prot = n->tbl->protocol;
David S. Millerf6b72b622011-07-14 07:53:20 -07001487 struct hh_cache *hh = &n->hh;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001488
1489 write_lock_bh(&n->lock);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001490
David S. Millerf6b72b622011-07-14 07:53:20 -07001491 /* Only one thread can come in here and initialize the
1492 * hh_cache entry.
1493 */
David S. Millerb23b5452011-07-16 17:45:02 -07001494 if (!hh->hh_len)
1495 dev->header_ops->cache(n, hh, prot);
David S. Millerf6b72b622011-07-14 07:53:20 -07001496
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001497 write_unlock_bh(&n->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500/* Slow and careful. */
1501
David S. Miller8f40b162011-07-17 13:34:11 -07001502int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 int rc = 0;
1505
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (!neigh_event_send(neigh, skb)) {
1507 int err;
1508 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001509 unsigned int seq;
Eric Dumazet34d101d2010-10-11 09:16:57 -07001510
Eric Dumazetc305c6ae2019-11-07 18:29:11 -08001511 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
Eric W. Biedermanbdf53c52015-03-02 00:13:22 -06001512 neigh_hh_init(neigh);
Eric Dumazet34d101d2010-10-11 09:16:57 -07001513
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001514 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001515 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001516 seq = read_seqbegin(&neigh->ha_lock);
1517 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1518 neigh->ha, NULL, skb->len);
1519 } while (read_seqretry(&neigh->ha_lock, seq));
Eric Dumazet34d101d2010-10-11 09:16:57 -07001520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001522 rc = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 else
1524 goto out_kfree_skb;
1525 }
1526out:
1527 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528out_kfree_skb:
1529 rc = -EINVAL;
1530 kfree_skb(skb);
1531 goto out;
1532}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001533EXPORT_SYMBOL(neigh_resolve_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535/* As fast as possible without hh cache */
1536
David S. Miller8f40b162011-07-17 13:34:11 -07001537int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 struct net_device *dev = neigh->dev;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001540 unsigned int seq;
David S. Miller8f40b162011-07-17 13:34:11 -07001541 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001543 do {
ramesh.nagappa@gmail.come1f16502012-10-05 19:10:15 +00001544 __skb_pull(skb, skb_network_offset(skb));
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00001545 seq = read_seqbegin(&neigh->ha_lock);
1546 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1547 neigh->ha, NULL, skb->len);
1548 } while (read_seqretry(&neigh->ha_lock, seq));
1549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 if (err >= 0)
David S. Miller542d4d62011-07-16 18:06:24 -07001551 err = dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 else {
1553 err = -EINVAL;
1554 kfree_skb(skb);
1555 }
1556 return err;
1557}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001558EXPORT_SYMBOL(neigh_connected_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
David S. Miller8f40b162011-07-17 13:34:11 -07001560int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1561{
1562 return dev_queue_xmit(skb);
1563}
1564EXPORT_SYMBOL(neigh_direct_output);
1565
Daniel Borkmann7482e382021-10-11 14:12:38 +02001566static void neigh_managed_work(struct work_struct *work)
1567{
1568 struct neigh_table *tbl = container_of(work, struct neigh_table,
1569 managed_work.work);
1570 struct neighbour *neigh;
1571
1572 write_lock_bh(&tbl->lock);
1573 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1574 neigh_event_send(neigh, NULL);
1575 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1576 NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
1577 write_unlock_bh(&tbl->lock);
1578}
1579
Kees Cooke99e88a2017-10-16 14:43:17 -07001580static void neigh_proxy_process(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581{
Kees Cooke99e88a2017-10-16 14:43:17 -07001582 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 long sched_next = 0;
1584 unsigned long now = jiffies;
David S. Millerf72051b2008-09-23 01:11:18 -07001585 struct sk_buff *skb, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 spin_lock(&tbl->proxy_queue.lock);
1588
David S. Millerf72051b2008-09-23 01:11:18 -07001589 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1590 long tdif = NEIGH_CB(skb)->sched_next - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if (tdif <= 0) {
David S. Millerf72051b2008-09-23 01:11:18 -07001593 struct net_device *dev = skb->dev;
Eric Dumazet20e60742011-08-22 19:32:42 +00001594
David S. Millerf72051b2008-09-23 01:11:18 -07001595 __skb_unlink(skb, &tbl->proxy_queue);
Eric Dumazet20e60742011-08-22 19:32:42 +00001596 if (tbl->proxy_redo && netif_running(dev)) {
1597 rcu_read_lock();
David S. Millerf72051b2008-09-23 01:11:18 -07001598 tbl->proxy_redo(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001599 rcu_read_unlock();
1600 } else {
David S. Millerf72051b2008-09-23 01:11:18 -07001601 kfree_skb(skb);
Eric Dumazet20e60742011-08-22 19:32:42 +00001602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
1604 dev_put(dev);
1605 } else if (!sched_next || tdif < sched_next)
1606 sched_next = tdif;
1607 }
1608 del_timer(&tbl->proxy_timer);
1609 if (sched_next)
1610 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1611 spin_unlock(&tbl->proxy_queue.lock);
1612}
1613
1614void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1615 struct sk_buff *skb)
1616{
weichenchena533b702020-12-25 13:44:45 +08001617 unsigned long sched_next = jiffies +
1618 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001620 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 kfree_skb(skb);
1622 return;
1623 }
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001624
1625 NEIGH_CB(skb)->sched_next = sched_next;
1626 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
1628 spin_lock(&tbl->proxy_queue.lock);
1629 if (del_timer(&tbl->proxy_timer)) {
1630 if (time_before(tbl->proxy_timer.expires, sched_next))
1631 sched_next = tbl->proxy_timer.expires;
1632 }
Eric Dumazetadf30902009-06-02 05:19:30 +00001633 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 dev_hold(skb->dev);
1635 __skb_queue_tail(&tbl->proxy_queue, skb);
1636 mod_timer(&tbl->proxy_timer, sched_next);
1637 spin_unlock(&tbl->proxy_queue.lock);
1638}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001639EXPORT_SYMBOL(pneigh_enqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07001641static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
Eric W. Biederman426b5302008-01-24 00:13:18 -08001642 struct net *net, int ifindex)
1643{
1644 struct neigh_parms *p;
1645
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001646 list_for_each_entry(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09001647 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
Gao feng170d6f92013-06-20 10:01:33 +08001648 (!p->dev && !ifindex && net_eq(net, &init_net)))
Eric W. Biederman426b5302008-01-24 00:13:18 -08001649 return p;
1650 }
1651
1652 return NULL;
1653}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1656 struct neigh_table *tbl)
1657{
Gao fengcf89d6b2013-06-20 10:01:32 +08001658 struct neigh_parms *p;
Stephen Hemminger00829822008-11-20 20:14:53 -08001659 struct net *net = dev_net(dev);
1660 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Gao fengcf89d6b2013-06-20 10:01:32 +08001662 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 if (p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 p->tbl = tbl;
Reshetova, Elena63439442017-06-30 13:07:56 +03001665 refcount_set(&p->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 p->reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001667 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Eric Dumazet08d62252021-12-04 20:22:09 -08001668 dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
Denis V. Lunev486b51d2008-01-14 22:59:59 -08001669 p->dev = dev;
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -05001670 write_pnet(&p->net, net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 p->sysctl_table = NULL;
Veaceslav Falico63134802013-08-02 19:07:38 +02001672
1673 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
Eric Dumazet08d62252021-12-04 20:22:09 -08001674 dev_put_track(dev, &p->dev_tracker);
Veaceslav Falico63134802013-08-02 19:07:38 +02001675 kfree(p);
1676 return NULL;
1677 }
1678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001680 list_add(&p->list, &tbl->parms.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 write_unlock_bh(&tbl->lock);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01001682
1683 neigh_parms_data_state_cleanall(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 }
1685 return p;
1686}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001687EXPORT_SYMBOL(neigh_parms_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689static void neigh_rcu_free_parms(struct rcu_head *head)
1690{
1691 struct neigh_parms *parms =
1692 container_of(head, struct neigh_parms, rcu_head);
1693
1694 neigh_parms_put(parms);
1695}
1696
1697void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1698{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 if (!parms || parms == &tbl->parms)
1700 return;
1701 write_lock_bh(&tbl->lock);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001702 list_del(&parms->list);
1703 parms->dead = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 write_unlock_bh(&tbl->lock);
Eric Dumazet08d62252021-12-04 20:22:09 -08001705 dev_put_track(parms->dev, &parms->dev_tracker);
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001706 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001708EXPORT_SYMBOL(neigh_parms_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Denis V. Lunev06f05112008-01-24 00:30:58 -08001710static void neigh_parms_destroy(struct neigh_parms *parms)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
1712 kfree(parms);
1713}
1714
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001715static struct lock_class_key neigh_table_proxy_queue_class;
1716
WANG Congd7480fd2014-11-10 15:59:36 -08001717static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1718
1719void neigh_table_init(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
1721 unsigned long now = jiffies;
1722 unsigned long phsize;
1723
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001724 INIT_LIST_HEAD(&tbl->parms_list);
David Ahern58956312018-12-07 12:24:57 -08001725 INIT_LIST_HEAD(&tbl->gc_list);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001726 INIT_LIST_HEAD(&tbl->managed_list);
1727
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01001728 list_add(&tbl->parms.list, &tbl->parms_list);
Eric Dumazete42ea982008-11-12 00:54:54 -08001729 write_pnet(&tbl->parms.net, &init_net);
Reshetova, Elena63439442017-06-30 13:07:56 +03001730 refcount_set(&tbl->parms.refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 tbl->parms.reachable_time =
Jiri Pirko1f9248e2013-12-07 19:26:53 +01001732 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 tbl->stats = alloc_percpu(struct neigh_statistics);
1735 if (!tbl->stats)
1736 panic("cannot create neighbour cache statistics");
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738#ifdef CONFIG_PROC_FS
Christoph Hellwig71a50532018-04-15 10:16:41 +02001739 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1740 &neigh_stat_seq_ops, tbl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 panic("cannot create neighbour proc dir entry");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742#endif
1743
David S. Millercd089332011-07-11 01:28:12 -07001744 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
1746 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
Andrew Morton77d04bd2006-04-07 14:52:59 -07001747 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001749 if (!tbl->nht || !tbl->phash_buckets)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 panic("cannot allocate neighbour cache hashes");
1751
YOSHIFUJI Hideaki / 吉藤英明08433ef2013-01-24 00:44:23 +00001752 if (!tbl->entry_size)
1753 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1754 tbl->key_len, NEIGH_PRIV_ALIGN);
1755 else
1756 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 rwlock_init(&tbl->lock);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001759
Tejun Heo203b42f2012-08-21 13:18:23 -07001760 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
viresh kumarf6180022014-01-22 12:23:33 +05301761 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1762 tbl->parms.reachable_time);
Daniel Borkmann7482e382021-10-11 14:12:38 +02001763 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1764 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1765
Kees Cooke99e88a2017-10-16 14:43:17 -07001766 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
Pavel Emelianovc2ecba72007-04-17 12:45:31 -07001767 skb_queue_head_init_class(&tbl->proxy_queue,
1768 &neigh_table_proxy_queue_class);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 tbl->last_flush = now;
1771 tbl->last_rand = now + tbl->parms.reachable_time * 20;
Simon Kelleybd89efc2006-05-12 14:56:08 -07001772
WANG Congd7480fd2014-11-10 15:59:36 -08001773 neigh_tables[index] = tbl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001775EXPORT_SYMBOL(neigh_table_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
WANG Congd7480fd2014-11-10 15:59:36 -08001777int neigh_table_clear(int index, struct neigh_table *tbl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778{
WANG Congd7480fd2014-11-10 15:59:36 -08001779 neigh_tables[index] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 /* It is not clean... Fix it to unload IPv6 module safely */
Daniel Borkmann4177d5b2021-11-22 16:01:51 +01001781 cancel_delayed_work_sync(&tbl->managed_work);
Tejun Heoa5c30b32010-10-19 06:04:42 +00001782 cancel_delayed_work_sync(&tbl->gc_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 del_timer_sync(&tbl->proxy_timer);
1784 pneigh_queue_purge(&tbl->proxy_queue);
1785 neigh_ifdown(tbl, NULL);
1786 if (atomic_read(&tbl->entries))
Joe Perchese005d192012-05-16 19:58:40 +00001787 pr_crit("neighbour leakage\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Eric Dumazet6193d2b2011-01-19 22:02:47 +00001789 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1790 neigh_hash_free_rcu);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00001791 tbl->nht = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
1793 kfree(tbl->phash_buckets);
1794 tbl->phash_buckets = NULL;
1795
Alexey Dobriyan3f192b52007-11-05 21:28:13 -08001796 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1797
Kirill Korotaev3fcde742006-09-01 01:34:10 -07001798 free_percpu(tbl->stats);
1799 tbl->stats = NULL;
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 return 0;
1802}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09001803EXPORT_SYMBOL(neigh_table_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
WANG Congd7480fd2014-11-10 15:59:36 -08001805static struct neigh_table *neigh_find_table(int family)
1806{
1807 struct neigh_table *tbl = NULL;
1808
1809 switch (family) {
1810 case AF_INET:
1811 tbl = neigh_tables[NEIGH_ARP_TABLE];
1812 break;
1813 case AF_INET6:
1814 tbl = neigh_tables[NEIGH_ND_TABLE];
1815 break;
1816 case AF_DECnet:
1817 tbl = neigh_tables[NEIGH_DN_TABLE];
1818 break;
1819 }
1820
1821 return tbl;
1822}
1823
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001824const struct nla_policy nda_policy[NDA_MAX+1] = {
Roopa Prabhu1274e1c2020-05-21 22:26:14 -07001825 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001826 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1827 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1828 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1829 [NDA_PROBES] = { .type = NLA_U32 },
1830 [NDA_VLAN] = { .type = NLA_U16 },
1831 [NDA_PORT] = { .type = NLA_U16 },
1832 [NDA_VNI] = { .type = NLA_U32 },
1833 [NDA_IFINDEX] = { .type = NLA_U32 },
1834 [NDA_MASTER] = { .type = NLA_U32 },
David Aherna9cd3432018-12-19 20:02:36 -08001835 [NDA_PROTOCOL] = { .type = NLA_U8 },
Roopa Prabhu1274e1c2020-05-21 22:26:14 -07001836 [NDA_NH_ID] = { .type = NLA_U32 },
Daniel Borkmannc8e80c12021-10-13 15:21:39 +02001837 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
Nikolay Aleksandrov899426b2020-06-23 23:47:16 +03001838 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08001839};
1840
David Ahernc21ef3e2017-04-16 09:48:24 -07001841static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1842 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001844 struct net *net = sock_net(skb->sk);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001845 struct ndmsg *ndm;
1846 struct nlattr *dst_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 struct neigh_table *tbl;
WANG Congd7480fd2014-11-10 15:59:36 -08001848 struct neighbour *neigh;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 struct net_device *dev = NULL;
Thomas Grafa14a49d2006-08-07 17:53:08 -07001850 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Eric Dumazet110b2492010-10-04 04:27:36 +00001852 ASSERT_RTNL();
Thomas Grafa14a49d2006-08-07 17:53:08 -07001853 if (nlmsg_len(nlh) < sizeof(*ndm))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 goto out;
1855
Thomas Grafa14a49d2006-08-07 17:53:08 -07001856 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
David Ahern7a35a502018-12-05 20:02:29 -08001857 if (!dst_attr) {
1858 NL_SET_ERR_MSG(extack, "Network address not specified");
Thomas Grafa14a49d2006-08-07 17:53:08 -07001859 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001860 }
Thomas Grafa14a49d2006-08-07 17:53:08 -07001861
1862 ndm = nlmsg_data(nlh);
1863 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001864 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001865 if (dev == NULL) {
1866 err = -ENODEV;
1867 goto out;
1868 }
1869 }
1870
WANG Congd7480fd2014-11-10 15:59:36 -08001871 tbl = neigh_find_table(ndm->ndm_family);
1872 if (tbl == NULL)
1873 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
David Ahern7a35a502018-12-05 20:02:29 -08001875 if (nla_len(dst_attr) < (int)tbl->key_len) {
1876 NL_SET_ERR_MSG(extack, "Invalid network address");
WANG Congd7480fd2014-11-10 15:59:36 -08001877 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
WANG Congd7480fd2014-11-10 15:59:36 -08001880 if (ndm->ndm_flags & NTF_PROXY) {
1881 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
Eric Dumazet110b2492010-10-04 04:27:36 +00001882 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 }
WANG Congd7480fd2014-11-10 15:59:36 -08001884
1885 if (dev == NULL)
1886 goto out;
1887
1888 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1889 if (neigh == NULL) {
1890 err = -ENOENT;
1891 goto out;
1892 }
1893
David Ahern7a35a502018-12-05 20:02:29 -08001894 err = __neigh_update(neigh, NULL, NUD_FAILED,
1895 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1896 NETLINK_CB(skb).portid, extack);
Sowmini Varadhan50710342017-06-02 09:01:49 -07001897 write_lock_bh(&tbl->lock);
WANG Congd7480fd2014-11-10 15:59:36 -08001898 neigh_release(neigh);
Sowmini Varadhan50710342017-06-02 09:01:49 -07001899 neigh_remove_one(neigh, tbl);
1900 write_unlock_bh(&tbl->lock);
Thomas Grafa14a49d2006-08-07 17:53:08 -07001901
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902out:
1903 return err;
1904}
1905
David Ahernc21ef3e2017-04-16 09:48:24 -07001906static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1907 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908{
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07001909 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001910 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001911 struct net *net = sock_net(skb->sk);
Thomas Graf5208deb2006-08-07 17:55:40 -07001912 struct ndmsg *ndm;
1913 struct nlattr *tb[NDA_MAX+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 struct neigh_table *tbl;
1915 struct net_device *dev = NULL;
WANG Congd7480fd2014-11-10 15:59:36 -08001916 struct neighbour *neigh;
1917 void *dst, *lladdr;
David Aherndf9b0e32018-12-15 14:09:06 -08001918 u8 protocol = 0;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001919 u32 ndm_flags;
Thomas Graf5208deb2006-08-07 17:55:40 -07001920 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
Eric Dumazet110b2492010-10-04 04:27:36 +00001922 ASSERT_RTNL();
Johannes Berg8cb08172019-04-26 14:07:28 +02001923 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1924 nda_policy, extack);
Thomas Graf5208deb2006-08-07 17:55:40 -07001925 if (err < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 goto out;
1927
Thomas Graf5208deb2006-08-07 17:55:40 -07001928 err = -EINVAL;
David Ahern7a35a502018-12-05 20:02:29 -08001929 if (!tb[NDA_DST]) {
1930 NL_SET_ERR_MSG(extack, "Network address not specified");
Thomas Graf5208deb2006-08-07 17:55:40 -07001931 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001932 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001933
1934 ndm = nlmsg_data(nlh);
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001935 ndm_flags = ndm->ndm_flags;
1936 if (tb[NDA_FLAGS_EXT]) {
1937 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1938
Daniel Borkmann507c2f12021-10-13 15:21:38 +02001939 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1940 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1941 hweight32(NTF_EXT_MASK)));
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001942 ndm_flags |= (ext << NTF_EXT_SHIFT);
1943 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001944 if (ndm->ndm_ifindex) {
Eric Dumazet110b2492010-10-04 04:27:36 +00001945 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
Thomas Graf5208deb2006-08-07 17:55:40 -07001946 if (dev == NULL) {
1947 err = -ENODEV;
1948 goto out;
1949 }
1950
David Ahern7a35a502018-12-05 20:02:29 -08001951 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1952 NL_SET_ERR_MSG(extack, "Invalid link address");
Eric Dumazet110b2492010-10-04 04:27:36 +00001953 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001954 }
Thomas Graf5208deb2006-08-07 17:55:40 -07001955 }
1956
WANG Congd7480fd2014-11-10 15:59:36 -08001957 tbl = neigh_find_table(ndm->ndm_family);
1958 if (tbl == NULL)
1959 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
David Ahern7a35a502018-12-05 20:02:29 -08001961 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1962 NL_SET_ERR_MSG(extack, "Invalid network address");
WANG Congd7480fd2014-11-10 15:59:36 -08001963 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001964 }
1965
WANG Congd7480fd2014-11-10 15:59:36 -08001966 dst = nla_data(tb[NDA_DST]);
1967 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
David Aherna9cd3432018-12-19 20:02:36 -08001969 if (tb[NDA_PROTOCOL])
David Aherndf9b0e32018-12-15 14:09:06 -08001970 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001971 if (ndm_flags & NTF_PROXY) {
WANG Congd7480fd2014-11-10 15:59:36 -08001972 struct pneigh_entry *pn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
Daniel Borkmann7482e382021-10-11 14:12:38 +02001974 if (ndm_flags & NTF_MANAGED) {
1975 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1976 goto out;
1977 }
1978
WANG Congd7480fd2014-11-10 15:59:36 -08001979 err = -ENOBUFS;
1980 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1981 if (pn) {
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02001982 pn->flags = ndm_flags;
David Aherndf9b0e32018-12-15 14:09:06 -08001983 if (protocol)
1984 pn->protocol = protocol;
Eric Biederman0c5c2d32009-03-04 00:03:08 -08001985 err = 0;
WANG Congd7480fd2014-11-10 15:59:36 -08001986 }
Eric Dumazet110b2492010-10-04 04:27:36 +00001987 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 }
1989
David Ahern7a35a502018-12-05 20:02:29 -08001990 if (!dev) {
1991 NL_SET_ERR_MSG(extack, "Device not specified");
WANG Congd7480fd2014-11-10 15:59:36 -08001992 goto out;
David Ahern7a35a502018-12-05 20:02:29 -08001993 }
WANG Congd7480fd2014-11-10 15:59:36 -08001994
David Ahernb8fb1ab2019-04-16 17:31:43 -07001995 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1996 err = -EINVAL;
1997 goto out;
1998 }
1999
WANG Congd7480fd2014-11-10 15:59:36 -08002000 neigh = neigh_lookup(tbl, dst, dev);
2001 if (neigh == NULL) {
Daniel Borkmann30fc7ef2021-10-13 15:21:40 +02002002 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2003 bool exempt_from_gc = ndm_permanent ||
2004 ndm_flags & NTF_EXT_LEARNED;
David Aherne997f8a2018-12-11 18:57:25 -07002005
WANG Congd7480fd2014-11-10 15:59:36 -08002006 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2007 err = -ENOENT;
2008 goto out;
2009 }
Daniel Borkmann30fc7ef2021-10-13 15:21:40 +02002010 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2011 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2012 err = -EINVAL;
2013 goto out;
2014 }
WANG Congd7480fd2014-11-10 15:59:36 -08002015
Daniel Borkmanne4400bb2021-10-11 14:12:35 +02002016 neigh = ___neigh_create(tbl, dst, dev,
Daniel Borkmann7482e382021-10-11 14:12:38 +02002017 ndm_flags &
2018 (NTF_EXT_LEARNED | NTF_MANAGED),
Daniel Borkmanne4400bb2021-10-11 14:12:35 +02002019 exempt_from_gc, true);
WANG Congd7480fd2014-11-10 15:59:36 -08002020 if (IS_ERR(neigh)) {
2021 err = PTR_ERR(neigh);
2022 goto out;
2023 }
2024 } else {
2025 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2026 err = -EEXIST;
2027 neigh_release(neigh);
2028 goto out;
2029 }
2030
2031 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07002032 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2033 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
WANG Congd7480fd2014-11-10 15:59:36 -08002034 }
2035
Roman Mashak38212bb2020-05-01 21:34:18 -04002036 if (protocol)
2037 neigh->protocol = protocol;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002038 if (ndm_flags & NTF_EXT_LEARNED)
Roopa Prabhu9ce33e42018-04-24 13:49:34 -07002039 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002040 if (ndm_flags & NTF_ROUTER)
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07002041 flags |= NEIGH_UPDATE_F_ISROUTER;
Daniel Borkmann7482e382021-10-11 14:12:38 +02002042 if (ndm_flags & NTF_MANAGED)
2043 flags |= NEIGH_UPDATE_F_MANAGED;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002044 if (ndm_flags & NTF_USE)
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02002045 flags |= NEIGH_UPDATE_F_USE;
Roopa Prabhuf7aa74e2018-09-22 21:26:19 -07002046
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02002047 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2048 NETLINK_CB(skb).portid, extack);
Daniel Borkmann7482e382021-10-11 14:12:38 +02002049 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
WANG Congd7480fd2014-11-10 15:59:36 -08002050 neigh_event_send(neigh, NULL);
2051 err = 0;
Daniel Borkmann3dc20f42021-10-11 14:12:36 +02002052 }
WANG Congd7480fd2014-11-10 15:59:36 -08002053 neigh_release(neigh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054out:
2055 return err;
2056}
2057
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002058static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2059{
Thomas Grafca860fb2006-08-07 18:00:18 -07002060 struct nlattr *nest;
2061
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002062 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
Thomas Grafca860fb2006-08-07 18:00:18 -07002063 if (nest == NULL)
2064 return -ENOBUFS;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002065
David S. Miller9a6308d2012-04-01 20:06:28 -04002066 if ((parms->dev &&
2067 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
Reshetova, Elena63439442017-06-30 13:07:56 +03002068 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002069 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2070 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002071 /* approximative value for deprecated QUEUE_LEN (in packets) */
2072 nla_put_u32(skb, NDTPA_QUEUE_LEN,
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002073 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2074 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2075 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2076 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2077 NEIGH_VAR(parms, UCAST_PROBES)) ||
2078 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2079 NEIGH_VAR(parms, MCAST_PROBES)) ||
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002080 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2081 NEIGH_VAR(parms, MCAST_REPROBES)) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002082 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2083 NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002084 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002085 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002086 nla_put_msecs(skb, NDTPA_GC_STALETIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002087 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002088 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002089 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002090 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002091 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002092 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002093 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002094 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002095 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002096 nla_put_msecs(skb, NDTPA_LOCKTIME,
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002097 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04002098 goto nla_put_failure;
Thomas Grafca860fb2006-08-07 18:00:18 -07002099 return nla_nest_end(skb, nest);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002100
Thomas Grafca860fb2006-08-07 18:00:18 -07002101nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -07002102 nla_nest_cancel(skb, nest);
2103 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002104}
2105
Thomas Grafca860fb2006-08-07 18:00:18 -07002106static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2107 u32 pid, u32 seq, int type, int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002108{
2109 struct nlmsghdr *nlh;
2110 struct ndtmsg *ndtmsg;
2111
Thomas Grafca860fb2006-08-07 18:00:18 -07002112 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2113 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002114 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002115
Thomas Grafca860fb2006-08-07 18:00:18 -07002116 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002117
2118 read_lock_bh(&tbl->lock);
2119 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002120 ndtmsg->ndtm_pad1 = 0;
2121 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002122
David S. Miller9a6308d2012-04-01 20:06:28 -04002123 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
Nicolas Dichtel2175d872016-04-22 17:31:21 +02002124 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
David S. Miller9a6308d2012-04-01 20:06:28 -04002125 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2126 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2127 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2128 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002129 {
2130 unsigned long now = jiffies;
Eric Dumazet9d027e32019-11-05 14:11:49 -08002131 long flush_delta = now - tbl->last_flush;
2132 long rand_delta = now - tbl->last_rand;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002133 struct neigh_hash_table *nht;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002134 struct ndt_config ndc = {
2135 .ndtc_key_len = tbl->key_len,
2136 .ndtc_entry_size = tbl->entry_size,
2137 .ndtc_entries = atomic_read(&tbl->entries),
2138 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2139 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002140 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2141 };
2142
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002143 rcu_read_lock_bh();
2144 nht = rcu_dereference_bh(tbl->nht);
David S. Miller2c2aba62011-12-28 15:06:58 -05002145 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
David S. Millercd089332011-07-11 01:28:12 -07002146 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002147 rcu_read_unlock_bh();
2148
David S. Miller9a6308d2012-04-01 20:06:28 -04002149 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2150 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002151 }
2152
2153 {
2154 int cpu;
2155 struct ndt_stats ndst;
2156
2157 memset(&ndst, 0, sizeof(ndst));
2158
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07002159 for_each_possible_cpu(cpu) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002160 struct neigh_statistics *st;
2161
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002162 st = per_cpu_ptr(tbl->stats, cpu);
2163 ndst.ndts_allocs += st->allocs;
2164 ndst.ndts_destroys += st->destroys;
2165 ndst.ndts_hash_grows += st->hash_grows;
2166 ndst.ndts_res_failed += st->res_failed;
2167 ndst.ndts_lookups += st->lookups;
2168 ndst.ndts_hits += st->hits;
2169 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2170 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2171 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2172 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
Rick Jonesfb811392015-08-07 11:10:37 -07002173 ndst.ndts_table_fulls += st->table_fulls;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002174 }
2175
Nicolas Dichtelb6763382016-04-26 10:06:17 +02002176 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2177 NDTA_PAD))
David S. Miller9a6308d2012-04-01 20:06:28 -04002178 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002179 }
2180
2181 BUG_ON(tbl->parms.dev);
2182 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
Thomas Grafca860fb2006-08-07 18:00:18 -07002183 goto nla_put_failure;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002184
2185 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01002186 nlmsg_end(skb, nlh);
2187 return 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002188
Thomas Grafca860fb2006-08-07 18:00:18 -07002189nla_put_failure:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002190 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08002191 nlmsg_cancel(skb, nlh);
2192 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002193}
2194
Thomas Grafca860fb2006-08-07 18:00:18 -07002195static int neightbl_fill_param_info(struct sk_buff *skb,
2196 struct neigh_table *tbl,
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002197 struct neigh_parms *parms,
Thomas Grafca860fb2006-08-07 18:00:18 -07002198 u32 pid, u32 seq, int type,
2199 unsigned int flags)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002200{
2201 struct ndtmsg *ndtmsg;
2202 struct nlmsghdr *nlh;
2203
Thomas Grafca860fb2006-08-07 18:00:18 -07002204 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2205 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002206 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002207
Thomas Grafca860fb2006-08-07 18:00:18 -07002208 ndtmsg = nlmsg_data(nlh);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002209
2210 read_lock_bh(&tbl->lock);
2211 ndtmsg->ndtm_family = tbl->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002212 ndtmsg->ndtm_pad1 = 0;
2213 ndtmsg->ndtm_pad2 = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002214
Thomas Grafca860fb2006-08-07 18:00:18 -07002215 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2216 neightbl_fill_parms(skb, parms) < 0)
2217 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002218
2219 read_unlock_bh(&tbl->lock);
Johannes Berg053c0952015-01-16 22:09:00 +01002220 nlmsg_end(skb, nlh);
2221 return 0;
Thomas Grafca860fb2006-08-07 18:00:18 -07002222errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002223 read_unlock_bh(&tbl->lock);
Patrick McHardy26932562007-01-31 23:16:40 -08002224 nlmsg_cancel(skb, nlh);
2225 return -EMSGSIZE;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002226}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002227
Patrick McHardyef7c79e2007-06-05 12:38:30 -07002228static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07002229 [NDTA_NAME] = { .type = NLA_STRING },
2230 [NDTA_THRESH1] = { .type = NLA_U32 },
2231 [NDTA_THRESH2] = { .type = NLA_U32 },
2232 [NDTA_THRESH3] = { .type = NLA_U32 },
2233 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2234 [NDTA_PARMS] = { .type = NLA_NESTED },
2235};
2236
Patrick McHardyef7c79e2007-06-05 12:38:30 -07002237static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
Thomas Graf6b3f8672006-08-07 17:58:53 -07002238 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2239 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2240 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2241 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2242 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2243 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002244 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
Thomas Graf6b3f8672006-08-07 17:58:53 -07002245 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2246 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2247 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2248 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2249 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2250 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2251 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2252};
2253
David Ahernc21ef3e2017-04-16 09:48:24 -07002254static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2255 struct netlink_ext_ack *extack)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002256{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002257 struct net *net = sock_net(skb->sk);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002258 struct neigh_table *tbl;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002259 struct ndtmsg *ndtmsg;
2260 struct nlattr *tb[NDTA_MAX+1];
WANG Congd7480fd2014-11-10 15:59:36 -08002261 bool found = false;
2262 int err, tidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002263
Johannes Berg8cb08172019-04-26 14:07:28 +02002264 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2265 nl_neightbl_policy, extack);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002266 if (err < 0)
2267 goto errout;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002268
Thomas Graf6b3f8672006-08-07 17:58:53 -07002269 if (tb[NDTA_NAME] == NULL) {
2270 err = -EINVAL;
2271 goto errout;
2272 }
2273
2274 ndtmsg = nlmsg_data(nlh);
WANG Congd7480fd2014-11-10 15:59:36 -08002275
2276 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2277 tbl = neigh_tables[tidx];
2278 if (!tbl)
2279 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002280 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2281 continue;
WANG Congd7480fd2014-11-10 15:59:36 -08002282 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2283 found = true;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002284 break;
WANG Congd7480fd2014-11-10 15:59:36 -08002285 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002286 }
2287
WANG Congd7480fd2014-11-10 15:59:36 -08002288 if (!found)
2289 return -ENOENT;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002290
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002291 /*
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002292 * We acquire tbl->lock to be nice to the periodic timers and
2293 * make sure they always see a consistent set of values.
2294 */
2295 write_lock_bh(&tbl->lock);
2296
Thomas Graf6b3f8672006-08-07 17:58:53 -07002297 if (tb[NDTA_PARMS]) {
2298 struct nlattr *tbp[NDTPA_MAX+1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002299 struct neigh_parms *p;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002300 int i, ifindex = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002301
Johannes Berg8cb08172019-04-26 14:07:28 +02002302 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2303 tb[NDTA_PARMS],
2304 nl_ntbl_parm_policy, extack);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002305 if (err < 0)
2306 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002307
Thomas Graf6b3f8672006-08-07 17:58:53 -07002308 if (tbp[NDTPA_IFINDEX])
2309 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002310
Tobias Klauser97fd5bc2009-07-13 11:17:49 -07002311 p = lookup_neigh_parms(tbl, net, ifindex);
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002312 if (p == NULL) {
2313 err = -ENOENT;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002314 goto errout_tbl_lock;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002315 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002316
Thomas Graf6b3f8672006-08-07 17:58:53 -07002317 for (i = 1; i <= NDTPA_MAX; i++) {
2318 if (tbp[i] == NULL)
2319 continue;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002320
Thomas Graf6b3f8672006-08-07 17:58:53 -07002321 switch (i) {
2322 case NDTPA_QUEUE_LEN:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002323 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2324 nla_get_u32(tbp[i]) *
2325 SKB_TRUESIZE(ETH_FRAME_LEN));
Eric Dumazet8b5c1712011-11-09 12:07:14 +00002326 break;
2327 case NDTPA_QUEUE_LENBYTES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002328 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2329 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002330 break;
2331 case NDTPA_PROXY_QLEN:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002332 NEIGH_VAR_SET(p, PROXY_QLEN,
2333 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002334 break;
2335 case NDTPA_APP_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002336 NEIGH_VAR_SET(p, APP_PROBES,
2337 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002338 break;
2339 case NDTPA_UCAST_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002340 NEIGH_VAR_SET(p, UCAST_PROBES,
2341 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002342 break;
2343 case NDTPA_MCAST_PROBES:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002344 NEIGH_VAR_SET(p, MCAST_PROBES,
2345 nla_get_u32(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002346 break;
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09002347 case NDTPA_MCAST_REPROBES:
2348 NEIGH_VAR_SET(p, MCAST_REPROBES,
2349 nla_get_u32(tbp[i]));
2350 break;
Thomas Graf6b3f8672006-08-07 17:58:53 -07002351 case NDTPA_BASE_REACHABLE_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002352 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2353 nla_get_msecs(tbp[i]));
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01002354 /* update reachable_time as well, otherwise, the change will
2355 * only be effective after the next time neigh_periodic_work
2356 * decides to recompute it (can be multiple minutes)
2357 */
2358 p->reachable_time =
2359 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002360 break;
2361 case NDTPA_GC_STALETIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002362 NEIGH_VAR_SET(p, GC_STALETIME,
2363 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002364 break;
2365 case NDTPA_DELAY_PROBE_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002366 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2367 nla_get_msecs(tbp[i]));
Ido Schimmel2a4501a2016-07-05 11:27:42 +02002368 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002369 break;
2370 case NDTPA_RETRANS_TIME:
Jiri Pirko1f9248e2013-12-07 19:26:53 +01002371 NEIGH_VAR_SET(p, RETRANS_TIME,
2372 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002373 break;
2374 case NDTPA_ANYCAST_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002375 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2376 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002377 break;
2378 case NDTPA_PROXY_DELAY:
Jiri Pirko39774582014-01-14 15:46:07 +01002379 NEIGH_VAR_SET(p, PROXY_DELAY,
2380 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002381 break;
2382 case NDTPA_LOCKTIME:
Jiri Pirko39774582014-01-14 15:46:07 +01002383 NEIGH_VAR_SET(p, LOCKTIME,
2384 nla_get_msecs(tbp[i]));
Thomas Graf6b3f8672006-08-07 17:58:53 -07002385 break;
2386 }
2387 }
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002388 }
2389
Gao fengdc25c672013-06-20 10:01:34 +08002390 err = -ENOENT;
2391 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2392 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2393 !net_eq(net, &init_net))
2394 goto errout_tbl_lock;
2395
Thomas Graf6b3f8672006-08-07 17:58:53 -07002396 if (tb[NDTA_THRESH1])
2397 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2398
2399 if (tb[NDTA_THRESH2])
2400 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2401
2402 if (tb[NDTA_THRESH3])
2403 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2404
2405 if (tb[NDTA_GC_INTERVAL])
2406 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2407
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002408 err = 0;
2409
Thomas Graf6b3f8672006-08-07 17:58:53 -07002410errout_tbl_lock:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002411 write_unlock_bh(&tbl->lock);
Thomas Graf6b3f8672006-08-07 17:58:53 -07002412errout:
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002413 return err;
2414}
2415
David Ahern9632d472018-10-07 20:16:37 -07002416static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2417 struct netlink_ext_ack *extack)
2418{
2419 struct ndtmsg *ndtm;
2420
2421 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2422 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2423 return -EINVAL;
2424 }
2425
2426 ndtm = nlmsg_data(nlh);
2427 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2428 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2429 return -EINVAL;
2430 }
2431
2432 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2433 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2434 return -EINVAL;
2435 }
2436
2437 return 0;
2438}
2439
Thomas Grafc8822a42007-03-22 11:50:06 -07002440static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002441{
David Ahern9632d472018-10-07 20:16:37 -07002442 const struct nlmsghdr *nlh = cb->nlh;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002443 struct net *net = sock_net(skb->sk);
Thomas Grafca860fb2006-08-07 18:00:18 -07002444 int family, tidx, nidx = 0;
2445 int tbl_skip = cb->args[0];
2446 int neigh_skip = cb->args[1];
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002447 struct neigh_table *tbl;
2448
David Ahern9632d472018-10-07 20:16:37 -07002449 if (cb->strict_check) {
2450 int err = neightbl_valid_dump_info(nlh, cb->extack);
2451
2452 if (err < 0)
2453 return err;
2454 }
2455
2456 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002457
WANG Congd7480fd2014-11-10 15:59:36 -08002458 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002459 struct neigh_parms *p;
2460
WANG Congd7480fd2014-11-10 15:59:36 -08002461 tbl = neigh_tables[tidx];
2462 if (!tbl)
2463 continue;
2464
Thomas Grafca860fb2006-08-07 18:00:18 -07002465 if (tidx < tbl_skip || (family && tbl->family != family))
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002466 continue;
2467
Eric W. Biederman15e47302012-09-07 20:12:54 +00002468 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
David Ahern9632d472018-10-07 20:16:37 -07002469 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002470 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002471 break;
2472
Nicolas Dichtel75fbfd32014-10-29 19:29:31 +01002473 nidx = 0;
2474 p = list_next_entry(&tbl->parms, list);
2475 list_for_each_entry_from(p, &tbl->parms_list, list) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002476 if (!net_eq(neigh_parms_net(p), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08002477 continue;
2478
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002479 if (nidx < neigh_skip)
2480 goto next;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002481
Thomas Grafca860fb2006-08-07 18:00:18 -07002482 if (neightbl_fill_param_info(skb, tbl, p,
Eric W. Biederman15e47302012-09-07 20:12:54 +00002483 NETLINK_CB(cb->skb).portid,
David Ahern9632d472018-10-07 20:16:37 -07002484 nlh->nlmsg_seq,
Thomas Grafca860fb2006-08-07 18:00:18 -07002485 RTM_NEWNEIGHTBL,
David S. Miller7b46a642015-01-18 23:36:08 -05002486 NLM_F_MULTI) < 0)
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002487 goto out;
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002488 next:
2489 nidx++;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002490 }
2491
Thomas Grafca860fb2006-08-07 18:00:18 -07002492 neigh_skip = 0;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002493 }
2494out:
Thomas Grafca860fb2006-08-07 18:00:18 -07002495 cb->args[0] = tidx;
2496 cb->args[1] = nidx;
Thomas Grafc7fb64d2005-06-18 22:50:55 -07002497
2498 return skb->len;
2499}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Thomas Graf8b8aec52006-08-07 17:56:37 -07002501static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2502 u32 pid, u32 seq, int type, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503{
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002504 u32 neigh_flags, neigh_flags_ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 unsigned long now = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 struct nda_cacheinfo ci;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002507 struct nlmsghdr *nlh;
2508 struct ndmsg *ndm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Thomas Graf8b8aec52006-08-07 17:56:37 -07002510 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2511 if (nlh == NULL)
Patrick McHardy26932562007-01-31 23:16:40 -08002512 return -EMSGSIZE;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002513
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002514 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2515 neigh_flags = neigh->flags & NTF_OLD_MASK;
2516
Thomas Graf8b8aec52006-08-07 17:56:37 -07002517 ndm = nlmsg_data(nlh);
2518 ndm->ndm_family = neigh->ops->family;
Patrick McHardy9ef1d4c2005-06-28 12:55:30 -07002519 ndm->ndm_pad1 = 0;
2520 ndm->ndm_pad2 = 0;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002521 ndm->ndm_flags = neigh_flags;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002522 ndm->ndm_type = neigh->type;
2523 ndm->ndm_ifindex = neigh->dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
David S. Miller9a6308d2012-04-01 20:06:28 -04002525 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2526 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002527
2528 read_lock_bh(&neigh->lock);
2529 ndm->ndm_state = neigh->nud_state;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +00002530 if (neigh->nud_state & NUD_VALID) {
2531 char haddr[MAX_ADDR_LEN];
2532
2533 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2534 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2535 read_unlock_bh(&neigh->lock);
2536 goto nla_put_failure;
2537 }
Thomas Graf8b8aec52006-08-07 17:56:37 -07002538 }
2539
Stephen Hemmingerb9f5f522008-06-03 16:03:15 -07002540 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2541 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2542 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
Reshetova, Elena9f237432017-06-30 13:07:55 +03002543 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002544 read_unlock_bh(&neigh->lock);
2545
David S. Miller9a6308d2012-04-01 20:06:28 -04002546 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2547 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2548 goto nla_put_failure;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002549
David Aherndf9b0e32018-12-15 14:09:06 -08002550 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2551 goto nla_put_failure;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002552 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2553 goto nla_put_failure;
David Aherndf9b0e32018-12-15 14:09:06 -08002554
Johannes Berg053c0952015-01-16 22:09:00 +01002555 nlmsg_end(skb, nlh);
2556 return 0;
Thomas Graf8b8aec52006-08-07 17:56:37 -07002557
2558nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08002559 nlmsg_cancel(skb, nlh);
2560 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561}
2562
Tony Zelenoff84920c12012-01-26 22:28:58 +00002563static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2564 u32 pid, u32 seq, int type, unsigned int flags,
2565 struct neigh_table *tbl)
2566{
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002567 u32 neigh_flags, neigh_flags_ext;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002568 struct nlmsghdr *nlh;
2569 struct ndmsg *ndm;
2570
2571 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2572 if (nlh == NULL)
2573 return -EMSGSIZE;
2574
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002575 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2576 neigh_flags = pn->flags & NTF_OLD_MASK;
2577
Tony Zelenoff84920c12012-01-26 22:28:58 +00002578 ndm = nlmsg_data(nlh);
2579 ndm->ndm_family = tbl->family;
2580 ndm->ndm_pad1 = 0;
2581 ndm->ndm_pad2 = 0;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002582 ndm->ndm_flags = neigh_flags | NTF_PROXY;
Jun Zhao545469f2014-07-26 00:38:59 +08002583 ndm->ndm_type = RTN_UNICAST;
Konstantin Khlebnikov6adc5fd2015-12-01 01:14:48 +03002584 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002585 ndm->ndm_state = NUD_NONE;
2586
David S. Miller9a6308d2012-04-01 20:06:28 -04002587 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2588 goto nla_put_failure;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002589
David Aherndf9b0e32018-12-15 14:09:06 -08002590 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2591 goto nla_put_failure;
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002592 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2593 goto nla_put_failure;
David Aherndf9b0e32018-12-15 14:09:06 -08002594
Johannes Berg053c0952015-01-16 22:09:00 +01002595 nlmsg_end(skb, nlh);
2596 return 0;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002597
2598nla_put_failure:
2599 nlmsg_cancel(skb, nlh);
2600 return -EMSGSIZE;
2601}
2602
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07002603static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
Thomas Grafd961db32007-08-08 23:12:56 -07002604{
2605 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07002606 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
Thomas Grafd961db32007-08-08 23:12:56 -07002607}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
David Ahern21fdd092015-09-29 09:32:03 -07002609static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2610{
2611 struct net_device *master;
2612
2613 if (!master_idx)
2614 return false;
2615
Eric Dumazetaab456d2018-10-26 09:33:27 -07002616 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
Lahav Schlesingerd3432bf2021-08-10 09:06:58 +00002617
2618 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2619 * invalid value for ifindex to denote "no master".
2620 */
2621 if (master_idx == -1)
2622 return !!master;
2623
David Ahern21fdd092015-09-29 09:32:03 -07002624 if (!master || master->ifindex != master_idx)
2625 return true;
2626
2627 return false;
2628}
2629
David Ahern16660f02015-10-03 11:43:46 -07002630static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2631{
Eric Dumazetaab456d2018-10-26 09:33:27 -07002632 if (filter_idx && (!dev || dev->ifindex != filter_idx))
David Ahern16660f02015-10-03 11:43:46 -07002633 return true;
2634
2635 return false;
2636}
2637
David Ahern6f52f802018-10-03 15:33:12 -07002638struct neigh_dump_filter {
2639 int master_idx;
2640 int dev_idx;
2641};
2642
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
David Ahern6f52f802018-10-03 15:33:12 -07002644 struct netlink_callback *cb,
2645 struct neigh_dump_filter *filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646{
Eric Dumazet767e97e2010-10-06 17:49:21 -07002647 struct net *net = sock_net(skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 struct neighbour *n;
2649 int rc, h, s_h = cb->args[1];
2650 int idx, s_idx = idx = cb->args[2];
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002651 struct neigh_hash_table *nht;
David Ahern21fdd092015-09-29 09:32:03 -07002652 unsigned int flags = NLM_F_MULTI;
David Ahern21fdd092015-09-29 09:32:03 -07002653
David Ahern6f52f802018-10-03 15:33:12 -07002654 if (filter->dev_idx || filter->master_idx)
2655 flags |= NLM_F_DUMP_FILTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002657 rcu_read_lock_bh();
2658 nht = rcu_dereference_bh(tbl->nht);
2659
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002660 for (h = s_h; h < (1 << nht->hash_shift); h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 if (h > s_h)
2662 s_idx = 0;
Eric Dumazet767e97e2010-10-06 17:49:21 -07002663 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2664 n != NULL;
2665 n = rcu_dereference_bh(n->next)) {
Zhang Shengju18502ac2016-11-30 11:24:42 +08002666 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2667 goto next;
David Ahern6f52f802018-10-03 15:33:12 -07002668 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2669 neigh_master_filtered(n->dev, filter->master_idx))
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002670 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002671 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 cb->nlh->nlmsg_seq,
Jamal Hadi Salimb6544c02005-06-18 22:54:12 -07002673 RTM_NEWNEIGH,
David Ahern21fdd092015-09-29 09:32:03 -07002674 flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 rc = -1;
2676 goto out;
2677 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07002678next:
Gautam Kachrooefc683f2009-02-06 00:52:04 -08002679 idx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 }
2682 rc = skb->len;
2683out:
Eric Dumazetd6bf7812010-10-04 06:15:44 +00002684 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 cb->args[1] = h;
2686 cb->args[2] = idx;
2687 return rc;
2688}
2689
Tony Zelenoff84920c12012-01-26 22:28:58 +00002690static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
David Ahern6f52f802018-10-03 15:33:12 -07002691 struct netlink_callback *cb,
2692 struct neigh_dump_filter *filter)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002693{
2694 struct pneigh_entry *n;
2695 struct net *net = sock_net(skb->sk);
2696 int rc, h, s_h = cb->args[3];
2697 int idx, s_idx = idx = cb->args[4];
David Ahern6f52f802018-10-03 15:33:12 -07002698 unsigned int flags = NLM_F_MULTI;
2699
2700 if (filter->dev_idx || filter->master_idx)
2701 flags |= NLM_F_DUMP_FILTERED;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002702
2703 read_lock_bh(&tbl->lock);
2704
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002705 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002706 if (h > s_h)
2707 s_idx = 0;
2708 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
Zhang Shengju18502ac2016-11-30 11:24:42 +08002709 if (idx < s_idx || pneigh_net(n) != net)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002710 goto next;
David Ahern6f52f802018-10-03 15:33:12 -07002711 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2712 neigh_master_filtered(n->dev, filter->master_idx))
2713 goto next;
Eric W. Biederman15e47302012-09-07 20:12:54 +00002714 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
Tony Zelenoff84920c12012-01-26 22:28:58 +00002715 cb->nlh->nlmsg_seq,
David Ahern6f52f802018-10-03 15:33:12 -07002716 RTM_NEWNEIGH, flags, tbl) < 0) {
Tony Zelenoff84920c12012-01-26 22:28:58 +00002717 read_unlock_bh(&tbl->lock);
2718 rc = -1;
2719 goto out;
2720 }
2721 next:
2722 idx++;
2723 }
2724 }
2725
2726 read_unlock_bh(&tbl->lock);
2727 rc = skb->len;
2728out:
2729 cb->args[3] = h;
2730 cb->args[4] = idx;
2731 return rc;
2732
2733}
2734
David Ahern51183d22018-10-07 20:16:36 -07002735static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2736 bool strict_check,
2737 struct neigh_dump_filter *filter,
2738 struct netlink_ext_ack *extack)
2739{
2740 struct nlattr *tb[NDA_MAX + 1];
2741 int err, i;
2742
2743 if (strict_check) {
2744 struct ndmsg *ndm;
2745
2746 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2747 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2748 return -EINVAL;
2749 }
2750
2751 ndm = nlmsg_data(nlh);
2752 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
David Ahernc0fde872018-12-19 16:54:38 -08002753 ndm->ndm_state || ndm->ndm_type) {
David Ahern51183d22018-10-07 20:16:36 -07002754 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2755 return -EINVAL;
2756 }
2757
David Ahernc0fde872018-12-19 16:54:38 -08002758 if (ndm->ndm_flags & ~NTF_PROXY) {
2759 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2760 return -EINVAL;
2761 }
2762
Johannes Berg8cb08172019-04-26 14:07:28 +02002763 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2764 tb, NDA_MAX, nda_policy,
2765 extack);
David Ahern51183d22018-10-07 20:16:36 -07002766 } else {
Johannes Berg8cb08172019-04-26 14:07:28 +02002767 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2768 NDA_MAX, nda_policy, extack);
David Ahern51183d22018-10-07 20:16:36 -07002769 }
2770 if (err < 0)
2771 return err;
2772
2773 for (i = 0; i <= NDA_MAX; ++i) {
2774 if (!tb[i])
2775 continue;
2776
2777 /* all new attributes should require strict_check */
2778 switch (i) {
2779 case NDA_IFINDEX:
David Ahern51183d22018-10-07 20:16:36 -07002780 filter->dev_idx = nla_get_u32(tb[i]);
2781 break;
2782 case NDA_MASTER:
David Ahern51183d22018-10-07 20:16:36 -07002783 filter->master_idx = nla_get_u32(tb[i]);
2784 break;
2785 default:
2786 if (strict_check) {
2787 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2788 return -EINVAL;
2789 }
2790 }
2791 }
2792
2793 return 0;
2794}
2795
Thomas Grafc8822a42007-03-22 11:50:06 -07002796static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797{
David Ahern6f52f802018-10-03 15:33:12 -07002798 const struct nlmsghdr *nlh = cb->nlh;
2799 struct neigh_dump_filter filter = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 struct neigh_table *tbl;
2801 int t, family, s_t;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002802 int proxy = 0;
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002803 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
David Ahern6f52f802018-10-03 15:33:12 -07002805 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
Tony Zelenoff84920c12012-01-26 22:28:58 +00002806
2807 /* check for full ndmsg structure presence, family member is
2808 * the same for both structures
2809 */
David Ahern6f52f802018-10-03 15:33:12 -07002810 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2811 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
Tony Zelenoff84920c12012-01-26 22:28:58 +00002812 proxy = 1;
2813
David Ahern51183d22018-10-07 20:16:36 -07002814 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2815 if (err < 0 && cb->strict_check)
2816 return err;
2817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 s_t = cb->args[0];
2819
WANG Congd7480fd2014-11-10 15:59:36 -08002820 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2821 tbl = neigh_tables[t];
2822
2823 if (!tbl)
2824 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 if (t < s_t || (family && tbl->family != family))
2826 continue;
2827 if (t > s_t)
2828 memset(&cb->args[1], 0, sizeof(cb->args) -
2829 sizeof(cb->args[0]));
Tony Zelenoff84920c12012-01-26 22:28:58 +00002830 if (proxy)
David Ahern6f52f802018-10-03 15:33:12 -07002831 err = pneigh_dump_table(tbl, skb, cb, &filter);
Tony Zelenoff84920c12012-01-26 22:28:58 +00002832 else
David Ahern6f52f802018-10-03 15:33:12 -07002833 err = neigh_dump_table(tbl, skb, cb, &filter);
Eric Dumazet4bd6683b2012-06-07 04:58:35 +00002834 if (err < 0)
2835 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
2838 cb->args[0] = t;
2839 return skb->len;
2840}
2841
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002842static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2843 struct neigh_table **tbl,
2844 void **dst, int *dev_idx, u8 *ndm_flags,
2845 struct netlink_ext_ack *extack)
2846{
2847 struct nlattr *tb[NDA_MAX + 1];
2848 struct ndmsg *ndm;
2849 int err, i;
2850
2851 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2852 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2853 return -EINVAL;
2854 }
2855
2856 ndm = nlmsg_data(nlh);
2857 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2858 ndm->ndm_type) {
2859 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2860 return -EINVAL;
2861 }
2862
2863 if (ndm->ndm_flags & ~NTF_PROXY) {
2864 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2865 return -EINVAL;
2866 }
2867
Johannes Berg8cb08172019-04-26 14:07:28 +02002868 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2869 NDA_MAX, nda_policy, extack);
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002870 if (err < 0)
2871 return err;
2872
2873 *ndm_flags = ndm->ndm_flags;
2874 *dev_idx = ndm->ndm_ifindex;
2875 *tbl = neigh_find_table(ndm->ndm_family);
2876 if (*tbl == NULL) {
2877 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2878 return -EAFNOSUPPORT;
2879 }
2880
2881 for (i = 0; i <= NDA_MAX; ++i) {
2882 if (!tb[i])
2883 continue;
2884
2885 switch (i) {
2886 case NDA_DST:
2887 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2888 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2889 return -EINVAL;
2890 }
2891 *dst = nla_data(tb[i]);
2892 break;
2893 default:
2894 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2895 return -EINVAL;
2896 }
2897 }
2898
2899 return 0;
2900}
2901
2902static inline size_t neigh_nlmsg_size(void)
2903{
2904 return NLMSG_ALIGN(sizeof(struct ndmsg))
2905 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2906 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2907 + nla_total_size(sizeof(struct nda_cacheinfo))
2908 + nla_total_size(4) /* NDA_PROBES */
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002909 + nla_total_size(4) /* NDA_FLAGS_EXT */
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002910 + nla_total_size(1); /* NDA_PROTOCOL */
2911}
2912
2913static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2914 u32 pid, u32 seq)
2915{
2916 struct sk_buff *skb;
2917 int err = 0;
2918
2919 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2920 if (!skb)
2921 return -ENOBUFS;
2922
2923 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2924 if (err) {
2925 kfree_skb(skb);
2926 goto errout;
2927 }
2928
2929 err = rtnl_unicast(skb, net, pid);
2930errout:
2931 return err;
2932}
2933
2934static inline size_t pneigh_nlmsg_size(void)
2935{
2936 return NLMSG_ALIGN(sizeof(struct ndmsg))
Colin Ian King463561e2018-12-20 16:50:50 +00002937 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
Roopa Prabhu2c611ad2021-10-11 14:12:37 +02002938 + nla_total_size(4) /* NDA_FLAGS_EXT */
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08002939 + nla_total_size(1); /* NDA_PROTOCOL */
2940}
2941
2942static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2943 u32 pid, u32 seq, struct neigh_table *tbl)
2944{
2945 struct sk_buff *skb;
2946 int err = 0;
2947
2948 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2949 if (!skb)
2950 return -ENOBUFS;
2951
2952 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2953 if (err) {
2954 kfree_skb(skb);
2955 goto errout;
2956 }
2957
2958 err = rtnl_unicast(skb, net, pid);
2959errout:
2960 return err;
2961}
2962
2963static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2964 struct netlink_ext_ack *extack)
2965{
2966 struct net *net = sock_net(in_skb->sk);
2967 struct net_device *dev = NULL;
2968 struct neigh_table *tbl = NULL;
2969 struct neighbour *neigh;
2970 void *dst = NULL;
2971 u8 ndm_flags = 0;
2972 int dev_idx = 0;
2973 int err;
2974
2975 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2976 extack);
2977 if (err < 0)
2978 return err;
2979
2980 if (dev_idx) {
2981 dev = __dev_get_by_index(net, dev_idx);
2982 if (!dev) {
2983 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2984 return -ENODEV;
2985 }
2986 }
2987
2988 if (!dst) {
2989 NL_SET_ERR_MSG(extack, "Network address not specified");
2990 return -EINVAL;
2991 }
2992
2993 if (ndm_flags & NTF_PROXY) {
2994 struct pneigh_entry *pn;
2995
2996 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2997 if (!pn) {
2998 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2999 return -ENOENT;
3000 }
3001 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3002 nlh->nlmsg_seq, tbl);
3003 }
3004
3005 if (!dev) {
3006 NL_SET_ERR_MSG(extack, "No device specified");
3007 return -EINVAL;
3008 }
3009
3010 neigh = neigh_lookup(tbl, dst, dev);
3011 if (!neigh) {
3012 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3013 return -ENOENT;
3014 }
3015
3016 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3017 nlh->nlmsg_seq);
3018
3019 neigh_release(neigh);
3020
3021 return err;
3022}
3023
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3025{
3026 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003027 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003029 rcu_read_lock_bh();
3030 nht = rcu_dereference_bh(tbl->nht);
3031
Eric Dumazet767e97e2010-10-06 17:49:21 -07003032 read_lock(&tbl->lock); /* avoid resizes */
David S. Millercd089332011-07-11 01:28:12 -07003033 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 struct neighbour *n;
3035
Eric Dumazet767e97e2010-10-06 17:49:21 -07003036 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3037 n != NULL;
3038 n = rcu_dereference_bh(n->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 cb(n, cookie);
3040 }
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003041 read_unlock(&tbl->lock);
3042 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043}
3044EXPORT_SYMBOL(neigh_for_each);
3045
3046/* The tbl->lock must be held as a writer and BH disabled. */
3047void __neigh_for_each_release(struct neigh_table *tbl,
3048 int (*cb)(struct neighbour *))
3049{
3050 int chain;
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003051 struct neigh_hash_table *nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003053 nht = rcu_dereference_protected(tbl->nht,
3054 lockdep_is_held(&tbl->lock));
David S. Millercd089332011-07-11 01:28:12 -07003055 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003056 struct neighbour *n;
3057 struct neighbour __rcu **np;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003059 np = &nht->hash_buckets[chain];
Eric Dumazet767e97e2010-10-06 17:49:21 -07003060 while ((n = rcu_dereference_protected(*np,
3061 lockdep_is_held(&tbl->lock))) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 int release;
3063
3064 write_lock(&n->lock);
3065 release = cb(n);
3066 if (release) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003067 rcu_assign_pointer(*np,
3068 rcu_dereference_protected(n->next,
3069 lockdep_is_held(&tbl->lock)));
David Ahern58956312018-12-07 12:24:57 -08003070 neigh_mark_dead(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 } else
3072 np = &n->next;
3073 write_unlock(&n->lock);
Thomas Graf4f494552007-08-08 23:12:36 -07003074 if (release)
3075 neigh_cleanup_and_release(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 }
3077 }
3078}
3079EXPORT_SYMBOL(__neigh_for_each_release);
3080
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003081int neigh_xmit(int index, struct net_device *dev,
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003082 const void *addr, struct sk_buff *skb)
3083{
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003084 int err = -EAFNOSUPPORT;
3085 if (likely(index < NEIGH_NR_TABLES)) {
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003086 struct neigh_table *tbl;
3087 struct neighbour *neigh;
3088
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003089 tbl = neigh_tables[index];
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003090 if (!tbl)
3091 goto out;
David Barrosob560f032016-06-28 11:16:43 +03003092 rcu_read_lock_bh();
David Ahern4b2a2bf2019-05-01 18:18:42 -07003093 if (index == NEIGH_ARP_TABLE) {
3094 u32 key = *((u32 *)addr);
3095
3096 neigh = __ipv4_neigh_lookup_noref(dev, key);
3097 } else {
3098 neigh = __neigh_lookup_noref(tbl, addr, dev);
3099 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003100 if (!neigh)
3101 neigh = __neigh_create(tbl, addr, dev, false);
3102 err = PTR_ERR(neigh);
David Barrosob560f032016-06-28 11:16:43 +03003103 if (IS_ERR(neigh)) {
3104 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003105 goto out_kfree_skb;
David Barrosob560f032016-06-28 11:16:43 +03003106 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003107 err = neigh->output(neigh, skb);
David Barrosob560f032016-06-28 11:16:43 +03003108 rcu_read_unlock_bh();
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003109 }
Eric W. Biedermanb79bda32015-03-07 16:25:56 -06003110 else if (index == NEIGH_LINK_TABLE) {
3111 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3112 addr, NULL, skb->len);
3113 if (err < 0)
3114 goto out_kfree_skb;
3115 err = dev_queue_xmit(skb);
3116 }
Eric W. Biederman4fd3d7d2015-03-03 17:11:16 -06003117out:
3118 return err;
3119out_kfree_skb:
3120 kfree_skb(skb);
3121 goto out;
3122}
3123EXPORT_SYMBOL(neigh_xmit);
3124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125#ifdef CONFIG_PROC_FS
3126
3127static struct neighbour *neigh_get_first(struct seq_file *seq)
3128{
3129 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003130 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003131 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 struct neighbour *n = NULL;
Colin Ian Kingf530eed2019-07-26 10:46:11 +01003133 int bucket;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134
3135 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
David S. Millercd089332011-07-11 01:28:12 -07003136 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
Eric Dumazet767e97e2010-10-06 17:49:21 -07003137 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138
3139 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003140 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003141 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 if (state->neigh_sub_iter) {
3143 loff_t fakep = 0;
3144 void *v;
3145
3146 v = state->neigh_sub_iter(state, n, &fakep);
3147 if (!v)
3148 goto next;
3149 }
3150 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3151 break;
3152 if (n->nud_state & ~NUD_NOARP)
3153 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07003154next:
3155 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 }
3157
3158 if (n)
3159 break;
3160 }
3161 state->bucket = bucket;
3162
3163 return n;
3164}
3165
3166static struct neighbour *neigh_get_next(struct seq_file *seq,
3167 struct neighbour *n,
3168 loff_t *pos)
3169{
3170 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003171 struct net *net = seq_file_net(seq);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003172 struct neigh_hash_table *nht = state->nht;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173
3174 if (state->neigh_sub_iter) {
3175 void *v = state->neigh_sub_iter(state, n, pos);
3176 if (v)
3177 return n;
3178 }
Eric Dumazet767e97e2010-10-06 17:49:21 -07003179 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180
3181 while (1) {
3182 while (n) {
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003183 if (!net_eq(dev_net(n->dev), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003184 goto next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 if (state->neigh_sub_iter) {
3186 void *v = state->neigh_sub_iter(state, n, pos);
3187 if (v)
3188 return n;
3189 goto next;
3190 }
3191 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3192 break;
3193
3194 if (n->nud_state & ~NUD_NOARP)
3195 break;
Eric Dumazet767e97e2010-10-06 17:49:21 -07003196next:
3197 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 }
3199
3200 if (n)
3201 break;
3202
David S. Millercd089332011-07-11 01:28:12 -07003203 if (++state->bucket >= (1 << nht->hash_shift))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 break;
3205
Eric Dumazet767e97e2010-10-06 17:49:21 -07003206 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 }
3208
3209 if (n && pos)
3210 --(*pos);
3211 return n;
3212}
3213
3214static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3215{
3216 struct neighbour *n = neigh_get_first(seq);
3217
3218 if (n) {
Chris Larson745e2032008-08-03 01:10:55 -07003219 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 while (*pos) {
3221 n = neigh_get_next(seq, n, pos);
3222 if (!n)
3223 break;
3224 }
3225 }
3226 return *pos ? NULL : n;
3227}
3228
3229static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3230{
3231 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003232 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 struct neigh_table *tbl = state->tbl;
3234 struct pneigh_entry *pn = NULL;
Yang Li48de7c02021-05-08 18:03:05 +08003235 int bucket;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
3237 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3238 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3239 pn = tbl->phash_buckets[bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003240 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003241 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 if (pn)
3243 break;
3244 }
3245 state->bucket = bucket;
3246
3247 return pn;
3248}
3249
3250static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3251 struct pneigh_entry *pn,
3252 loff_t *pos)
3253{
3254 struct neigh_seq_state *state = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09003255 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 struct neigh_table *tbl = state->tbl;
3257
Jorge Boncompte [DTI2]df07a942011-11-25 13:24:49 -05003258 do {
3259 pn = pn->next;
3260 } while (pn && !net_eq(pneigh_net(pn), net));
3261
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 while (!pn) {
3263 if (++state->bucket > PNEIGH_HASHMASK)
3264 break;
3265 pn = tbl->phash_buckets[state->bucket];
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09003266 while (pn && !net_eq(pneigh_net(pn), net))
Eric W. Biederman426b5302008-01-24 00:13:18 -08003267 pn = pn->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 if (pn)
3269 break;
3270 }
3271
3272 if (pn && pos)
3273 --(*pos);
3274
3275 return pn;
3276}
3277
3278static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3279{
3280 struct pneigh_entry *pn = pneigh_get_first(seq);
3281
3282 if (pn) {
Chris Larson745e2032008-08-03 01:10:55 -07003283 --(*pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 while (*pos) {
3285 pn = pneigh_get_next(seq, pn, pos);
3286 if (!pn)
3287 break;
3288 }
3289 }
3290 return *pos ? NULL : pn;
3291}
3292
3293static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3294{
3295 struct neigh_seq_state *state = seq->private;
3296 void *rc;
Chris Larson745e2032008-08-03 01:10:55 -07003297 loff_t idxpos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
Chris Larson745e2032008-08-03 01:10:55 -07003299 rc = neigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
Chris Larson745e2032008-08-03 01:10:55 -07003301 rc = pneigh_get_idx(seq, &idxpos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
3303 return rc;
3304}
3305
3306void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003307 __acquires(tbl->lock)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003308 __acquires(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309{
3310 struct neigh_seq_state *state = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
3312 state->tbl = tbl;
3313 state->bucket = 0;
3314 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3315
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003316 rcu_read_lock_bh();
3317 state->nht = rcu_dereference_bh(tbl->nht);
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003318 read_lock(&tbl->lock);
Eric Dumazet767e97e2010-10-06 17:49:21 -07003319
Chris Larson745e2032008-08-03 01:10:55 -07003320 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321}
3322EXPORT_SYMBOL(neigh_seq_start);
3323
3324void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3325{
3326 struct neigh_seq_state *state;
3327 void *rc;
3328
3329 if (v == SEQ_START_TOKEN) {
Chris Larsonbff69732008-08-03 01:02:41 -07003330 rc = neigh_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 goto out;
3332 }
3333
3334 state = seq->private;
3335 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3336 rc = neigh_get_next(seq, v, NULL);
3337 if (rc)
3338 goto out;
3339 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3340 rc = pneigh_get_first(seq);
3341 } else {
3342 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3343 rc = pneigh_get_next(seq, v, NULL);
3344 }
3345out:
3346 ++(*pos);
3347 return rc;
3348}
3349EXPORT_SYMBOL(neigh_seq_next);
3350
3351void neigh_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003352 __releases(tbl->lock)
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003353 __releases(rcu_bh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354{
Eric Dumazetf3e92cb2019-06-15 16:28:48 -07003355 struct neigh_seq_state *state = seq->private;
3356 struct neigh_table *tbl = state->tbl;
3357
3358 read_unlock(&tbl->lock);
Eric Dumazetd6bf7812010-10-04 06:15:44 +00003359 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360}
3361EXPORT_SYMBOL(neigh_seq_stop);
3362
3363/* statistics via seq_file */
3364
3365static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3366{
Muchun Song359745d2022-01-21 22:14:23 -08003367 struct neigh_table *tbl = pde_data(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 int cpu;
3369
3370 if (*pos == 0)
3371 return SEQ_START_TOKEN;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003372
Rusty Russell0f23174a2008-12-29 12:23:42 +00003373 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 if (!cpu_possible(cpu))
3375 continue;
3376 *pos = cpu+1;
3377 return per_cpu_ptr(tbl->stats, cpu);
3378 }
3379 return NULL;
3380}
3381
3382static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3383{
Muchun Song359745d2022-01-21 22:14:23 -08003384 struct neigh_table *tbl = pde_data(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 int cpu;
3386
Rusty Russell0f23174a2008-12-29 12:23:42 +00003387 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 if (!cpu_possible(cpu))
3389 continue;
3390 *pos = cpu+1;
3391 return per_cpu_ptr(tbl->stats, cpu);
3392 }
Vasily Averin1e3f9f02020-01-23 10:11:28 +03003393 (*pos)++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 return NULL;
3395}
3396
3397static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3398{
3399
3400}
3401
3402static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3403{
Muchun Song359745d2022-01-21 22:14:23 -08003404 struct neigh_table *tbl = pde_data(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 struct neigh_statistics *st = v;
3406
3407 if (v == SEQ_START_TOKEN) {
Yajun Deng0547ffe2021-08-02 16:05:08 +08003408 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 return 0;
3410 }
3411
Yajun Deng0547ffe2021-08-02 16:05:08 +08003412 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3413 "%08lx %08lx %08lx "
3414 "%08lx %08lx %08lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 atomic_read(&tbl->entries),
3416
3417 st->allocs,
3418 st->destroys,
3419 st->hash_grows,
3420
3421 st->lookups,
3422 st->hits,
3423
3424 st->res_failed,
3425
3426 st->rcv_probes_mcast,
3427 st->rcv_probes_ucast,
3428
3429 st->periodic_gc_runs,
Neil Horman9a6d2762008-07-16 20:50:49 -07003430 st->forced_gc_runs,
Rick Jonesfb811392015-08-07 11:10:37 -07003431 st->unres_discards,
3432 st->table_fulls
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 );
3434
3435 return 0;
3436}
3437
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003438static const struct seq_operations neigh_stat_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 .start = neigh_stat_seq_start,
3440 .next = neigh_stat_seq_next,
3441 .stop = neigh_stat_seq_stop,
3442 .show = neigh_stat_seq_show,
3443};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444#endif /* CONFIG_PROC_FS */
3445
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003446static void __neigh_notify(struct neighbour *n, int type, int flags,
3447 u32 pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003449 struct net *net = dev_net(n->dev);
Thomas Graf8b8aec52006-08-07 17:56:37 -07003450 struct sk_buff *skb;
Thomas Grafb8673312006-08-15 00:33:14 -07003451 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
Thomas Graf339bf982006-11-10 14:10:15 -08003453 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
Thomas Graf8b8aec52006-08-07 17:56:37 -07003454 if (skb == NULL)
Thomas Grafb8673312006-08-15 00:33:14 -07003455 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003457 err = neigh_fill_info(skb, n, pid, 0, type, flags);
Patrick McHardy26932562007-01-31 23:16:40 -08003458 if (err < 0) {
3459 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3460 WARN_ON(err == -EMSGSIZE);
3461 kfree_skb(skb);
3462 goto errout;
3463 }
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08003464 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3465 return;
Thomas Grafb8673312006-08-15 00:33:14 -07003466errout:
3467 if (err < 0)
Eric W. Biederman426b5302008-01-24 00:13:18 -08003468 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
Thomas Grafb8673312006-08-15 00:33:14 -07003469}
3470
3471void neigh_app_ns(struct neighbour *n)
3472{
Roopa Prabhu7b8f7a42017-03-19 22:01:28 -07003473 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003475EXPORT_SYMBOL(neigh_app_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476
3477#ifdef CONFIG_SYSCTL
Cong Wangb93196d2012-12-06 10:04:04 +08003478static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479
Joe Perchesfe2c6332013-06-11 23:04:25 -07003480static int proc_unres_qlen(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003481 void *buffer, size_t *lenp, loff_t *ppos)
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003482{
3483 int size, ret;
Joe Perchesfe2c6332013-06-11 23:04:25 -07003484 struct ctl_table tmp = *ctl;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003485
Matteo Croceeec48442019-07-18 15:58:50 -07003486 tmp.extra1 = SYSCTL_ZERO;
Shan Weice46cc62012-12-04 18:49:15 +00003487 tmp.extra2 = &unres_qlen_max;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003488 tmp.data = &size;
Shan Weice46cc62012-12-04 18:49:15 +00003489
3490 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3491 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3492
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003493 if (write && !ret)
3494 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3495 return ret;
3496}
3497
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003498static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3499 int family)
3500{
Jiri Pirkobba24892013-12-07 19:26:57 +01003501 switch (family) {
3502 case AF_INET:
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003503 return __in_dev_arp_parms_get_rcu(dev);
Jiri Pirkobba24892013-12-07 19:26:57 +01003504 case AF_INET6:
3505 return __in6_dev_nd_parms_get_rcu(dev);
3506 }
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003507 return NULL;
3508}
3509
3510static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3511 int index)
3512{
3513 struct net_device *dev;
3514 int family = neigh_parms_family(p);
3515
3516 rcu_read_lock();
3517 for_each_netdev_rcu(net, dev) {
3518 struct neigh_parms *dst_p =
3519 neigh_get_dev_parms_rcu(dev, family);
3520
3521 if (dst_p && !test_bit(index, dst_p->data_state))
3522 dst_p->data[index] = p->data[index];
3523 }
3524 rcu_read_unlock();
3525}
3526
3527static void neigh_proc_update(struct ctl_table *ctl, int write)
3528{
3529 struct net_device *dev = ctl->extra1;
3530 struct neigh_parms *p = ctl->extra2;
Jiri Pirko77d47af2013-12-10 23:55:07 +01003531 struct net *net = neigh_parms_net(p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003532 int index = (int *) ctl->data - p->data;
3533
3534 if (!write)
3535 return;
3536
3537 set_bit(index, p->data_state);
Marcus Huewe7627ae62017-02-15 01:00:36 +01003538 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3539 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003540 if (!dev) /* NULL dev means this is default value */
3541 neigh_copy_dflt_parms(net, p, index);
3542}
3543
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003544static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003545 void *buffer, size_t *lenp,
3546 loff_t *ppos)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003547{
3548 struct ctl_table tmp = *ctl;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003549 int ret;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003550
Matteo Croceeec48442019-07-18 15:58:50 -07003551 tmp.extra1 = SYSCTL_ZERO;
3552 tmp.extra2 = SYSCTL_INT_MAX;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003553
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003554 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3555 neigh_proc_update(ctl, write);
3556 return ret;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003557}
3558
Christoph Hellwig32927392020-04-24 08:43:38 +02003559int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3560 size_t *lenp, loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003561{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003562 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3563
3564 neigh_proc_update(ctl, write);
3565 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003566}
3567EXPORT_SYMBOL(neigh_proc_dointvec);
3568
Christoph Hellwig32927392020-04-24 08:43:38 +02003569int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003570 size_t *lenp, loff_t *ppos)
3571{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003572 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3573
3574 neigh_proc_update(ctl, write);
3575 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003576}
3577EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3578
3579static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003580 void *buffer, size_t *lenp,
3581 loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003582{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003583 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3584
3585 neigh_proc_update(ctl, write);
3586 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003587}
3588
3589int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003590 void *buffer, size_t *lenp, loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003591{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003592 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3593
3594 neigh_proc_update(ctl, write);
3595 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003596}
3597EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3598
3599static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003600 void *buffer, size_t *lenp,
3601 loff_t *ppos)
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003602{
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003603 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3604
3605 neigh_proc_update(ctl, write);
3606 return ret;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003607}
3608
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003609static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02003610 void *buffer, size_t *lenp,
3611 loff_t *ppos)
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003612{
3613 struct neigh_parms *p = ctl->extra2;
3614 int ret;
3615
3616 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3617 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3618 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3619 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3620 else
3621 ret = -1;
3622
3623 if (write && ret == 0) {
3624 /* update reachable_time as well, otherwise, the change will
3625 * only be effective after the next time neigh_periodic_work
3626 * decides to recompute it
3627 */
3628 p->reachable_time =
3629 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3630 }
3631 return ret;
3632}
3633
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003634#define NEIGH_PARMS_DATA_OFFSET(index) \
3635 (&((struct neigh_parms *) 0)->data[index])
3636
3637#define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3638 [NEIGH_VAR_ ## attr] = { \
3639 .procname = name, \
3640 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3641 .maxlen = sizeof(int), \
3642 .mode = mval, \
3643 .proc_handler = proc, \
3644 }
3645
3646#define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3647 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3648
3649#define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003650 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003651
3652#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003653 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003654
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003655#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003656 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003657
3658#define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003659 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
Eric W. Biederman54716e32010-02-14 03:27:03 +00003660
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661static struct neigh_sysctl_table {
3662 struct ctl_table_header *sysctl_header;
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003663 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
Brian Haleyab32ea52006-09-22 14:15:41 -07003664} neigh_sysctl_template __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 .neigh_vars = {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003666 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3667 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3668 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
YOSHIFUJI Hideaki/吉藤英明8da86462015-03-19 22:41:46 +09003669 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003670 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3671 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3672 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3673 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3674 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3675 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3676 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3677 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3678 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3679 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3680 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3681 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003682 [NEIGH_VAR_GC_INTERVAL] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 .procname = "gc_interval",
3684 .maxlen = sizeof(int),
3685 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08003686 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003688 [NEIGH_VAR_GC_THRESH1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 .procname = "gc_thresh1",
3690 .maxlen = sizeof(int),
3691 .mode = 0644,
Matteo Croceeec48442019-07-18 15:58:50 -07003692 .extra1 = SYSCTL_ZERO,
3693 .extra2 = SYSCTL_INT_MAX,
Francesco Fusco555445c2013-07-24 10:39:06 +02003694 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003696 [NEIGH_VAR_GC_THRESH2] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 .procname = "gc_thresh2",
3698 .maxlen = sizeof(int),
3699 .mode = 0644,
Matteo Croceeec48442019-07-18 15:58:50 -07003700 .extra1 = SYSCTL_ZERO,
3701 .extra2 = SYSCTL_INT_MAX,
Francesco Fusco555445c2013-07-24 10:39:06 +02003702 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 },
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003704 [NEIGH_VAR_GC_THRESH3] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 .procname = "gc_thresh3",
3706 .maxlen = sizeof(int),
3707 .mode = 0644,
Matteo Croceeec48442019-07-18 15:58:50 -07003708 .extra1 = SYSCTL_ZERO,
3709 .extra2 = SYSCTL_INT_MAX,
Francesco Fusco555445c2013-07-24 10:39:06 +02003710 .proc_handler = proc_dointvec_minmax,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 },
Pavel Emelyanovc3bac5a2007-12-02 00:08:16 +11003712 {},
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 },
3714};
3715
3716int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
Jiri Pirko73af6142013-12-07 19:26:55 +01003717 proc_handler *handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718{
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003719 int i;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003720 struct neigh_sysctl_table *t;
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003721 const char *dev_name_source;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003722 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
Jiri Pirko73af6142013-12-07 19:26:55 +01003723 char *p_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003725 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 if (!t)
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003727 goto err;
3728
Jiri Pirkob194c1f2014-02-21 14:52:57 +01003729 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
Jiri Pirko1f9248e2013-12-07 19:26:53 +01003730 t->neigh_vars[i].data += (long) p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003731 t->neigh_vars[i].extra1 = dev;
Jiri Pirko1d4c8c22013-12-07 19:26:56 +01003732 t->neigh_vars[i].extra2 = p;
Jiri Pirkocb5b09c2013-12-07 19:26:54 +01003733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
3735 if (dev) {
3736 dev_name_source = dev->name;
Eric W. Biedermand12af672007-10-18 03:05:25 -07003737 /* Terminate the table early */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003738 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3739 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 } else {
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003741 struct neigh_table *tbl = p->tbl;
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003742 dev_name_source = "default";
Mathias Krause9ecf07a2014-07-12 22:36:44 +02003743 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3744 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3745 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3746 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 }
3748
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08003749 if (handler) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 /* RetransTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003751 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 /* ReachableTime */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003753 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754 /* RetransTime (in milliseconds)*/
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003755 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 /* ReachableTime (in milliseconds) */
Eric Dumazet8b5c1712011-11-09 12:07:14 +00003757 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
Jean-Francois Remy4bf69802015-01-14 04:22:39 +01003758 } else {
3759 /* Those handlers will update p->reachable_time after
3760 * base_reachable_time(_ms) is set to ensure the new timer starts being
3761 * applied after the next neighbour update instead of waiting for
3762 * neigh_periodic_work to update its value (can be multiple minutes)
3763 * So any handler that replaces them should do this as well
3764 */
3765 /* ReachableTime */
3766 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3767 neigh_proc_base_reachable_time;
3768 /* ReachableTime (in milliseconds) */
3769 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3770 neigh_proc_base_reachable_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 }
3772
Jiri Pirko73af6142013-12-07 19:26:55 +01003773 switch (neigh_parms_family(p)) {
3774 case AF_INET:
3775 p_name = "ipv4";
3776 break;
3777 case AF_INET6:
3778 p_name = "ipv6";
3779 break;
3780 default:
3781 BUG();
3782 }
3783
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003784 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3785 p_name, dev_name_source);
Denis V. Lunev4ab438f2008-02-28 20:48:01 -08003786 t->sysctl_header =
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003787 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003788 if (!t->sysctl_header)
Eric W. Biederman8f40a1f2012-04-19 13:38:03 +00003789 goto free;
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003790
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 p->sysctl_table = t;
3792 return 0;
3793
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003794free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 kfree(t);
Pavel Emelyanov3c607bb2007-12-02 00:06:34 +11003796err:
3797 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003799EXPORT_SYMBOL(neigh_sysctl_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800
3801void neigh_sysctl_unregister(struct neigh_parms *p)
3802{
3803 if (p->sysctl_table) {
3804 struct neigh_sysctl_table *t = p->sysctl_table;
3805 p->sysctl_table = NULL;
Eric W. Biederman5dd3df12012-04-19 13:24:33 +00003806 unregister_net_sysctl_table(t->sysctl_header);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 kfree(t);
3808 }
3809}
YOSHIFUJI Hideaki0a204502008-03-24 18:39:10 +09003810EXPORT_SYMBOL(neigh_sysctl_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811
3812#endif /* CONFIG_SYSCTL */
3813
Thomas Grafc8822a42007-03-22 11:50:06 -07003814static int __init neigh_init(void)
3815{
Florian Westphalb97bac62017-08-09 20:41:48 +02003816 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3817 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
Roopa Prabhu82cbb5c2018-12-19 12:51:38 -08003818 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
Thomas Grafc8822a42007-03-22 11:50:06 -07003819
Greg Rosec7ac8672011-06-10 01:27:09 +00003820 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
Florian Westphalb97bac62017-08-09 20:41:48 +02003821 0);
3822 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
Thomas Grafc8822a42007-03-22 11:50:06 -07003823
3824 return 0;
3825}
3826
3827subsys_initcall(neigh_init);