blob: 17927966abb337f4bfda65370f0b437638382e80 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Ahern1b69c6d2015-09-29 20:07:11 -07002/*
3 * net/l3mdev/l3mdev.c - L3 master device implementation
4 * Copyright (c) 2015 Cumulus Networks
5 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
David Ahern1b69c6d2015-09-29 20:07:11 -07006 */
7
8#include <linux/netdevice.h>
David Ahern96c63fa2016-06-08 10:55:39 -07009#include <net/fib_rules.h>
David Ahern1b69c6d2015-09-29 20:07:11 -070010#include <net/l3mdev.h>
11
Andrea Mayer49042c22020-06-20 00:54:43 +020012static DEFINE_SPINLOCK(l3mdev_lock);
13
14struct l3mdev_handler {
15 lookup_by_table_id_t dev_lookup;
16};
17
18static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1];
19
20static int l3mdev_check_type(enum l3mdev_type l3type)
21{
22 if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX)
23 return -EINVAL;
24
25 return 0;
26}
27
28int l3mdev_table_lookup_register(enum l3mdev_type l3type,
29 lookup_by_table_id_t fn)
30{
31 struct l3mdev_handler *hdlr;
32 int res;
33
34 res = l3mdev_check_type(l3type);
35 if (res)
36 return res;
37
38 hdlr = &l3mdev_handlers[l3type];
39
40 spin_lock(&l3mdev_lock);
41
42 if (hdlr->dev_lookup) {
43 res = -EBUSY;
44 goto unlock;
45 }
46
47 hdlr->dev_lookup = fn;
48 res = 0;
49
50unlock:
51 spin_unlock(&l3mdev_lock);
52
53 return res;
54}
55EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register);
56
57void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
58 lookup_by_table_id_t fn)
59{
60 struct l3mdev_handler *hdlr;
61
62 if (l3mdev_check_type(l3type))
63 return;
64
65 hdlr = &l3mdev_handlers[l3type];
66
67 spin_lock(&l3mdev_lock);
68
69 if (hdlr->dev_lookup == fn)
70 hdlr->dev_lookup = NULL;
71
72 spin_unlock(&l3mdev_lock);
73}
74EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister);
75
76int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,
77 struct net *net, u32 table_id)
78{
79 lookup_by_table_id_t lookup;
80 struct l3mdev_handler *hdlr;
81 int ifindex = -EINVAL;
82 int res;
83
84 res = l3mdev_check_type(l3type);
85 if (res)
86 return res;
87
88 hdlr = &l3mdev_handlers[l3type];
89
90 spin_lock(&l3mdev_lock);
91
92 lookup = hdlr->dev_lookup;
93 if (!lookup)
94 goto unlock;
95
96 ifindex = lookup(net, table_id);
97
98unlock:
99 spin_unlock(&l3mdev_lock);
100
101 return ifindex;
102}
103EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id);
104
David Ahern1b69c6d2015-09-29 20:07:11 -0700105/**
Xiongfeng Wang37569282021-03-27 16:15:48 +0800106 * l3mdev_master_ifindex_rcu - get index of L3 master device
David Ahern1b69c6d2015-09-29 20:07:11 -0700107 * @dev: targeted interface
108 */
109
David Ahern3f2fb9a2016-02-24 11:47:02 -0800110int l3mdev_master_ifindex_rcu(const struct net_device *dev)
David Ahern1b69c6d2015-09-29 20:07:11 -0700111{
112 int ifindex = 0;
113
114 if (!dev)
115 return 0;
116
117 if (netif_is_l3_master(dev)) {
118 ifindex = dev->ifindex;
David Ahernfee6d4c2015-10-05 08:51:24 -0700119 } else if (netif_is_l3_slave(dev)) {
David Ahern1b69c6d2015-09-29 20:07:11 -0700120 struct net_device *master;
David Ahern3f2fb9a2016-02-24 11:47:02 -0800121 struct net_device *_dev = (struct net_device *)dev;
David Ahern1b69c6d2015-09-29 20:07:11 -0700122
David Ahern3f2fb9a2016-02-24 11:47:02 -0800123 /* netdev_master_upper_dev_get_rcu calls
124 * list_first_or_null_rcu to walk the upper dev list.
125 * list_first_or_null_rcu does not handle a const arg. We aren't
126 * making changes, just want the master device from that list so
127 * typecast to remove the const
128 */
129 master = netdev_master_upper_dev_get_rcu(_dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700130 if (master)
David Ahern1b69c6d2015-09-29 20:07:11 -0700131 ifindex = master->ifindex;
132 }
133
134 return ifindex;
135}
136EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
137
138/**
Xiongfeng Wang37569282021-03-27 16:15:48 +0800139 * l3mdev_master_upper_ifindex_by_index_rcu - get index of upper l3 master
Alexis Bauvin6a6d6682018-12-03 10:54:39 +0100140 * device
141 * @net: network namespace for device index lookup
142 * @ifindex: targeted interface
143 */
144int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
145{
146 struct net_device *dev;
147
148 dev = dev_get_by_index_rcu(net, ifindex);
149 while (dev && !netif_is_l3_master(dev))
150 dev = netdev_master_upper_dev_get(dev);
151
152 return dev ? dev->ifindex : 0;
153}
154EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
155
156/**
Miaohe Lin645f0892020-08-27 07:27:49 -0400157 * l3mdev_fib_table_rcu - get FIB table id associated with an L3
David Ahern1b69c6d2015-09-29 20:07:11 -0700158 * master interface
159 * @dev: targeted interface
160 */
161
162u32 l3mdev_fib_table_rcu(const struct net_device *dev)
163{
164 u32 tb_id = 0;
165
166 if (!dev)
167 return 0;
168
169 if (netif_is_l3_master(dev)) {
170 if (dev->l3mdev_ops->l3mdev_fib_table)
171 tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700172 } else if (netif_is_l3_slave(dev)) {
David Ahern1b69c6d2015-09-29 20:07:11 -0700173 /* Users of netdev_master_upper_dev_get_rcu need non-const,
174 * but current inet_*type functions take a const
175 */
176 struct net_device *_dev = (struct net_device *) dev;
177 const struct net_device *master;
178
179 master = netdev_master_upper_dev_get_rcu(_dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700180 if (master &&
David Ahern1b69c6d2015-09-29 20:07:11 -0700181 master->l3mdev_ops->l3mdev_fib_table)
182 tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
183 }
184
185 return tb_id;
186}
187EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
188
189u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
190{
191 struct net_device *dev;
192 u32 tb_id = 0;
193
194 if (!ifindex)
195 return 0;
196
197 rcu_read_lock();
198
199 dev = dev_get_by_index_rcu(net, ifindex);
200 if (dev)
201 tb_id = l3mdev_fib_table_rcu(dev);
202
203 rcu_read_unlock();
204
205 return tb_id;
206}
207EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
David Ahern4a658962016-05-07 16:48:59 -0700208
209/**
David Ahern4c1feac2016-09-10 12:09:56 -0700210 * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link
211 * local and multicast addresses
David Ahern4a658962016-05-07 16:48:59 -0700212 * @net: network namespace for device index lookup
213 * @fl6: IPv6 flow struct for lookup
Wei Wang7d9e5f42019-06-20 17:36:41 -0700214 * This function does not hold refcnt on the returned dst.
215 * Caller must hold rcu_read_lock().
David Ahern4a658962016-05-07 16:48:59 -0700216 */
217
David Ahern4c1feac2016-09-10 12:09:56 -0700218struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
219 struct flowi6 *fl6)
David Ahern4a658962016-05-07 16:48:59 -0700220{
221 struct dst_entry *dst = NULL;
222 struct net_device *dev;
223
Wei Wang7d9e5f42019-06-20 17:36:41 -0700224 WARN_ON_ONCE(!rcu_read_lock_held());
David Ahern1ff23be2016-05-07 16:49:00 -0700225 if (fl6->flowi6_oif) {
David Ahern1ff23be2016-05-07 16:49:00 -0700226 dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
227 if (dev && netif_is_l3_slave(dev))
228 dev = netdev_master_upper_dev_get_rcu(dev);
229
230 if (dev && netif_is_l3_master(dev) &&
David Ahern4c1feac2016-09-10 12:09:56 -0700231 dev->l3mdev_ops->l3mdev_link_scope_lookup)
232 dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
David Ahern4a658962016-05-07 16:48:59 -0700233 }
234
235 return dst;
236}
David Ahern4c1feac2016-09-10 12:09:56 -0700237EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
David Ahern4a658962016-05-07 16:48:59 -0700238
David Ahern96c63fa2016-06-08 10:55:39 -0700239/**
240 * l3mdev_fib_rule_match - Determine if flowi references an
241 * L3 master device
242 * @net: network namespace for device index lookup
243 * @fl: flow struct
Andrew Lunn9d637f82020-10-28 01:50:59 +0100244 * @arg: store the table the rule matched with here
David Ahern96c63fa2016-06-08 10:55:39 -0700245 */
246
247int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
248 struct fib_lookup_arg *arg)
249{
250 struct net_device *dev;
251 int rc = 0;
252
253 rcu_read_lock();
254
255 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
256 if (dev && netif_is_l3_master(dev) &&
257 dev->l3mdev_ops->l3mdev_fib_table) {
258 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
259 rc = 1;
260 goto out;
261 }
262
263 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
264 if (dev && netif_is_l3_master(dev) &&
265 dev->l3mdev_ops->l3mdev_fib_table) {
266 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
267 rc = 1;
268 goto out;
269 }
270
271out:
272 rcu_read_unlock();
273
274 return rc;
275}
David Ahern9ee00342016-09-10 12:09:52 -0700276
277void l3mdev_update_flow(struct net *net, struct flowi *fl)
278{
279 struct net_device *dev;
280 int ifindex;
281
282 rcu_read_lock();
283
284 if (fl->flowi_oif) {
285 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
286 if (dev) {
287 ifindex = l3mdev_master_ifindex_rcu(dev);
288 if (ifindex) {
289 fl->flowi_oif = ifindex;
290 fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
291 goto out;
292 }
293 }
294 }
295
296 if (fl->flowi_iif) {
297 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
298 if (dev) {
299 ifindex = l3mdev_master_ifindex_rcu(dev);
300 if (ifindex) {
301 fl->flowi_iif = ifindex;
302 fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
303 }
304 }
305 }
306
307out:
308 rcu_read_unlock();
309}
310EXPORT_SYMBOL_GPL(l3mdev_update_flow);