blob: cc9891672eaa58fbe36ce81fbc097862014d7af8 [file] [log] [blame]
Jeremy Kerr889b7da2021-07-29 10:20:45 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Management Component Transport Protocol (MCTP) - routing
4 * implementation.
5 *
6 * This is currently based on a simple routing table, with no dst cache. The
7 * number of routes should stay fairly small, so the lookup cost is small.
8 *
9 * Copyright (c) 2021 Code Construct
10 * Copyright (c) 2021 Google
11 */
12
13#include <linux/idr.h>
14#include <linux/mctp.h>
15#include <linux/netdevice.h>
16#include <linux/rtnetlink.h>
17#include <linux/skbuff.h>
18
19#include <uapi/linux/if_arp.h>
20
21#include <net/mctp.h>
22#include <net/mctpdevice.h>
Matt Johnston06d2f4c2021-07-29 10:20:46 +080023#include <net/netlink.h>
24#include <net/sock.h>
Jeremy Kerr889b7da2021-07-29 10:20:45 +080025
26/* route output callbacks */
27static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb)
28{
29 kfree_skb(skb);
30 return 0;
31}
32
Jeremy Kerr833ef3b2021-07-29 10:20:49 +080033static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
34{
35 struct mctp_skb_cb *cb = mctp_cb(skb);
36 struct mctp_hdr *mh;
37 struct sock *sk;
38 u8 type;
39
40 WARN_ON(!rcu_read_lock_held());
41
42 /* TODO: look up in skb->cb? */
43 mh = mctp_hdr(skb);
44
45 if (!skb_headlen(skb))
46 return NULL;
47
48 type = (*(u8 *)skb->data) & 0x7f;
49
50 sk_for_each_rcu(sk, &net->mctp.binds) {
51 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
52
53 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net)
54 continue;
55
56 if (msk->bind_type != type)
57 continue;
58
59 if (msk->bind_addr != MCTP_ADDR_ANY &&
60 msk->bind_addr != mh->dest)
61 continue;
62
63 return msk;
64 }
65
66 return NULL;
67}
68
69static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
70 mctp_eid_t peer, u8 tag)
71{
72 if (key->local_addr != local)
73 return false;
74
75 if (key->peer_addr != peer)
76 return false;
77
78 if (key->tag != tag)
79 return false;
80
81 return true;
82}
83
84static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
85 mctp_eid_t peer)
86{
87 struct mctp_sk_key *key, *ret;
88 struct mctp_hdr *mh;
89 u8 tag;
90
91 WARN_ON(!rcu_read_lock_held());
92
93 mh = mctp_hdr(skb);
94 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
95
96 ret = NULL;
97
98 hlist_for_each_entry_rcu(key, &net->mctp.keys, hlist) {
99 if (mctp_key_match(key, mh->dest, peer, tag)) {
100 ret = key;
101 break;
102 }
103 }
104
105 return ret;
106}
107
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800108static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
109{
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800110 struct net *net = dev_net(skb->dev);
111 struct mctp_sk_key *key;
112 struct mctp_sock *msk;
113 struct mctp_hdr *mh;
114
115 msk = NULL;
116
117 /* we may be receiving a locally-routed packet; drop source sk
118 * accounting
119 */
120 skb_orphan(skb);
121
122 /* ensure we have enough data for a header and a type */
123 if (skb->len < sizeof(struct mctp_hdr) + 1)
124 goto drop;
125
126 /* grab header, advance data ptr */
127 mh = mctp_hdr(skb);
128 skb_pull(skb, sizeof(struct mctp_hdr));
129
130 if (mh->ver != 1)
131 goto drop;
132
133 /* TODO: reassembly */
134 if ((mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM))
135 != (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM))
136 goto drop;
137
138 rcu_read_lock();
139 /* 1. lookup socket matching (src,dest,tag) */
140 key = mctp_lookup_key(net, skb, mh->src);
141
142 /* 2. lookup socket macthing (BCAST,dest,tag) */
143 if (!key)
144 key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY);
145
146 /* 3. SOM? -> lookup bound socket, conditionally (!EOM) create
147 * mapping for future (1)/(2).
148 */
149 if (key)
150 msk = container_of(key->sk, struct mctp_sock, sk);
151 else if (!msk && (mh->flags_seq_tag & MCTP_HDR_FLAG_SOM))
152 msk = mctp_lookup_bind(net, skb);
153
154 if (!msk)
155 goto unlock_drop;
156
157 sock_queue_rcv_skb(&msk->sk, skb);
158
159 rcu_read_unlock();
160
161 return 0;
162
163unlock_drop:
164 rcu_read_unlock();
165drop:
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800166 kfree_skb(skb);
167 return 0;
168}
169
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800170static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800171{
172 unsigned int mtu;
173 int rc;
174
175 skb->protocol = htons(ETH_P_MCTP);
176
177 mtu = READ_ONCE(skb->dev->mtu);
178 if (skb->len > mtu) {
179 kfree_skb(skb);
180 return -EMSGSIZE;
181 }
182
183 /* TODO: daddr (from rt->neigh), saddr (from device?) */
184 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
185 NULL, NULL, skb->len);
186 if (rc) {
187 kfree_skb(skb);
188 return -EHOSTUNREACH;
189 }
190
191 rc = dev_queue_xmit(skb);
192 if (rc)
193 rc = net_xmit_errno(rc);
194
195 return rc;
196}
197
198/* route alloc/release */
199static void mctp_route_release(struct mctp_route *rt)
200{
201 if (refcount_dec_and_test(&rt->refs)) {
202 dev_put(rt->dev->dev);
203 kfree_rcu(rt, rcu);
204 }
205}
206
207/* returns a route with the refcount at 1 */
208static struct mctp_route *mctp_route_alloc(void)
209{
210 struct mctp_route *rt;
211
212 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
213 if (!rt)
214 return NULL;
215
216 INIT_LIST_HEAD(&rt->list);
217 refcount_set(&rt->refs, 1);
218 rt->output = mctp_route_discard;
219
220 return rt;
221}
222
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800223/* tag management */
224static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
225 struct mctp_sock *msk)
226{
227 struct netns_mctp *mns = &net->mctp;
228
229 lockdep_assert_held(&mns->keys_lock);
230
231 key->sk = &msk->sk;
232
233 /* we hold the net->key_lock here, allowing updates to both
234 * then net and sk
235 */
236 hlist_add_head_rcu(&key->hlist, &mns->keys);
237 hlist_add_head_rcu(&key->sklist, &msk->keys);
238}
239
240/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
241 * it for the socket msk
242 */
243static int mctp_alloc_local_tag(struct mctp_sock *msk,
244 mctp_eid_t saddr, mctp_eid_t daddr, u8 *tagp)
245{
246 struct net *net = sock_net(&msk->sk);
247 struct netns_mctp *mns = &net->mctp;
248 struct mctp_sk_key *key, *tmp;
249 unsigned long flags;
250 int rc = -EAGAIN;
251 u8 tagbits;
252
253 /* be optimistic, alloc now */
254 key = kzalloc(sizeof(*key), GFP_KERNEL);
255 if (!key)
256 return -ENOMEM;
257 key->local_addr = saddr;
258 key->peer_addr = daddr;
259
260 /* 8 possible tag values */
261 tagbits = 0xff;
262
263 spin_lock_irqsave(&mns->keys_lock, flags);
264
265 /* Walk through the existing keys, looking for potential conflicting
266 * tags. If we find a conflict, clear that bit from tagbits
267 */
268 hlist_for_each_entry(tmp, &mns->keys, hlist) {
269 /* if we don't own the tag, it can't conflict */
270 if (tmp->tag & MCTP_HDR_FLAG_TO)
271 continue;
272
273 if ((tmp->peer_addr == daddr ||
274 tmp->peer_addr == MCTP_ADDR_ANY) &&
275 tmp->local_addr == saddr)
276 tagbits &= ~(1 << tmp->tag);
277
278 if (!tagbits)
279 break;
280 }
281
282 if (tagbits) {
283 key->tag = __ffs(tagbits);
284 mctp_reserve_tag(net, key, msk);
285 *tagp = key->tag;
286 rc = 0;
287 }
288
289 spin_unlock_irqrestore(&mns->keys_lock, flags);
290
291 if (!tagbits)
292 kfree(key);
293
294 return rc;
295}
296
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800297/* routing lookups */
298static bool mctp_rt_match_eid(struct mctp_route *rt,
299 unsigned int net, mctp_eid_t eid)
300{
301 return READ_ONCE(rt->dev->net) == net &&
302 rt->min <= eid && rt->max >= eid;
303}
304
305/* compares match, used for duplicate prevention */
306static bool mctp_rt_compare_exact(struct mctp_route *rt1,
307 struct mctp_route *rt2)
308{
309 ASSERT_RTNL();
310 return rt1->dev->net == rt2->dev->net &&
311 rt1->min == rt2->min &&
312 rt1->max == rt2->max;
313}
314
315struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
316 mctp_eid_t daddr)
317{
318 struct mctp_route *tmp, *rt = NULL;
319
320 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
321 /* TODO: add metrics */
322 if (mctp_rt_match_eid(tmp, dnet, daddr)) {
323 if (refcount_inc_not_zero(&tmp->refs)) {
324 rt = tmp;
325 break;
326 }
327 }
328 }
329
330 return rt;
331}
332
333/* sends a skb to rt and releases the route. */
334int mctp_do_route(struct mctp_route *rt, struct sk_buff *skb)
335{
336 int rc;
337
338 rc = rt->output(rt, skb);
339 mctp_route_release(rt);
340 return rc;
341}
342
343int mctp_local_output(struct sock *sk, struct mctp_route *rt,
344 struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag)
345{
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800346 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800347 struct mctp_skb_cb *cb = mctp_cb(skb);
348 struct mctp_hdr *hdr;
349 unsigned long flags;
350 mctp_eid_t saddr;
351 int rc;
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800352 u8 tag;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800353
354 if (WARN_ON(!rt->dev))
355 return -EINVAL;
356
357 spin_lock_irqsave(&rt->dev->addrs_lock, flags);
358 if (rt->dev->num_addrs == 0) {
359 rc = -EHOSTUNREACH;
360 } else {
361 /* use the outbound interface's first address as our source */
362 saddr = rt->dev->addrs[0];
363 rc = 0;
364 }
365 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
366
367 if (rc)
368 return rc;
369
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800370 if (req_tag & MCTP_HDR_FLAG_TO) {
371 rc = mctp_alloc_local_tag(msk, saddr, daddr, &tag);
372 if (rc)
373 return rc;
374 tag |= MCTP_HDR_FLAG_TO;
375 } else {
376 tag = req_tag;
377 }
378
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800379 /* TODO: we have the route MTU here; packetise */
380
381 skb_reset_transport_header(skb);
382 skb_push(skb, sizeof(struct mctp_hdr));
383 skb_reset_network_header(skb);
384 hdr = mctp_hdr(skb);
385 hdr->ver = 1;
386 hdr->dest = daddr;
387 hdr->src = saddr;
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800388 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM | /* TODO */
389 tag;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800390
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800391 skb->dev = rt->dev->dev;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800392 skb->protocol = htons(ETH_P_MCTP);
393 skb->priority = 0;
394
395 /* cb->net will have been set on initial ingress */
396 cb->src = saddr;
397
398 return mctp_do_route(rt, skb);
399}
400
401/* route management */
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800402static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
403 unsigned int daddr_extent, unsigned int mtu,
404 bool is_local)
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800405{
406 struct net *net = dev_net(mdev->dev);
407 struct mctp_route *rt, *ert;
408
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800409 if (!mctp_address_ok(daddr_start))
410 return -EINVAL;
411
412 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
413 return -EINVAL;
414
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800415 rt = mctp_route_alloc();
416 if (!rt)
417 return -ENOMEM;
418
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800419 rt->min = daddr_start;
420 rt->max = daddr_start + daddr_extent;
421 rt->mtu = mtu;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800422 rt->dev = mdev;
423 dev_hold(rt->dev->dev);
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800424 rt->output = is_local ? mctp_route_input : mctp_route_output;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800425
426 ASSERT_RTNL();
427 /* Prevent duplicate identical routes. */
428 list_for_each_entry(ert, &net->mctp.routes, list) {
429 if (mctp_rt_compare_exact(rt, ert)) {
430 mctp_route_release(rt);
431 return -EEXIST;
432 }
433 }
434
435 list_add_rcu(&rt->list, &net->mctp.routes);
436
437 return 0;
438}
439
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800440static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
441 unsigned int daddr_extent)
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800442{
443 struct net *net = dev_net(mdev->dev);
444 struct mctp_route *rt, *tmp;
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800445 mctp_eid_t daddr_end;
446 bool dropped;
447
448 if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
449 return -EINVAL;
450
451 daddr_end = daddr_start + daddr_extent;
452 dropped = false;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800453
454 ASSERT_RTNL();
455
456 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800457 if (rt->dev == mdev &&
458 rt->min == daddr_start && rt->max == daddr_end) {
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800459 list_del_rcu(&rt->list);
460 /* TODO: immediate RTM_DELROUTE */
461 mctp_route_release(rt);
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800462 dropped = true;
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800463 }
464 }
465
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800466 return dropped ? 0 : -ENOENT;
467}
468
469int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
470{
471 return mctp_route_add(mdev, addr, 0, 0, true);
472}
473
474int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
475{
476 return mctp_route_remove(mdev, addr, 0);
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800477}
478
479/* removes all entries for a given device */
480void mctp_route_remove_dev(struct mctp_dev *mdev)
481{
482 struct net *net = dev_net(mdev->dev);
483 struct mctp_route *rt, *tmp;
484
485 ASSERT_RTNL();
486 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
487 if (rt->dev == mdev) {
488 list_del_rcu(&rt->list);
489 /* TODO: immediate RTM_DELROUTE */
490 mctp_route_release(rt);
491 }
492 }
493}
494
495/* Incoming packet-handling */
496
497static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
498 struct packet_type *pt,
499 struct net_device *orig_dev)
500{
501 struct net *net = dev_net(dev);
502 struct mctp_skb_cb *cb;
503 struct mctp_route *rt;
504 struct mctp_hdr *mh;
505
506 /* basic non-data sanity checks */
507 if (dev->type != ARPHRD_MCTP)
508 goto err_drop;
509
510 if (!pskb_may_pull(skb, sizeof(struct mctp_hdr)))
511 goto err_drop;
512
513 skb_reset_transport_header(skb);
514 skb_reset_network_header(skb);
515
516 /* We have enough for a header; decode and route */
517 mh = mctp_hdr(skb);
518 if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
519 goto err_drop;
520
521 cb = __mctp_cb(skb);
522 rcu_read_lock();
523 cb->net = READ_ONCE(__mctp_dev_get(dev)->net);
524 rcu_read_unlock();
525
526 rt = mctp_route_lookup(net, cb->net, mh->dest);
527 if (!rt)
528 goto err_drop;
529
530 mctp_do_route(rt, skb);
531
532 return NET_RX_SUCCESS;
533
534err_drop:
535 kfree_skb(skb);
536 return NET_RX_DROP;
537}
538
539static struct packet_type mctp_packet_type = {
540 .type = cpu_to_be16(ETH_P_MCTP),
541 .func = mctp_pkttype_receive,
542};
543
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800544/* netlink interface */
545
546static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = {
547 [RTA_DST] = { .type = NLA_U8 },
548 [RTA_METRICS] = { .type = NLA_NESTED },
549 [RTA_OIF] = { .type = NLA_U32 },
550};
551
552/* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing.
553 * tb must hold RTA_MAX+1 elements.
554 */
555static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
556 struct netlink_ext_ack *extack,
557 struct nlattr **tb, struct rtmsg **rtm,
558 struct mctp_dev **mdev, mctp_eid_t *daddr_start)
559{
560 struct net *net = sock_net(skb->sk);
561 struct net_device *dev;
562 unsigned int ifindex;
563 int rc;
564
565 rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX,
566 rta_mctp_policy, extack);
567 if (rc < 0) {
568 NL_SET_ERR_MSG(extack, "incorrect format");
569 return rc;
570 }
571
572 if (!tb[RTA_DST]) {
573 NL_SET_ERR_MSG(extack, "dst EID missing");
574 return -EINVAL;
575 }
576 *daddr_start = nla_get_u8(tb[RTA_DST]);
577
578 if (!tb[RTA_OIF]) {
579 NL_SET_ERR_MSG(extack, "ifindex missing");
580 return -EINVAL;
581 }
582 ifindex = nla_get_u32(tb[RTA_OIF]);
583
584 *rtm = nlmsg_data(nlh);
585 if ((*rtm)->rtm_family != AF_MCTP) {
586 NL_SET_ERR_MSG(extack, "route family must be AF_MCTP");
587 return -EINVAL;
588 }
589
590 dev = __dev_get_by_index(net, ifindex);
591 if (!dev) {
592 NL_SET_ERR_MSG(extack, "bad ifindex");
593 return -ENODEV;
594 }
595 *mdev = mctp_dev_get_rtnl(dev);
596 if (!*mdev)
597 return -ENODEV;
598
599 if (dev->flags & IFF_LOOPBACK) {
600 NL_SET_ERR_MSG(extack, "no routes to loopback");
601 return -EINVAL;
602 }
603
604 return 0;
605}
606
607static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
608 struct netlink_ext_ack *extack)
609{
610 struct nlattr *tb[RTA_MAX + 1];
611 mctp_eid_t daddr_start;
612 struct mctp_dev *mdev;
613 struct rtmsg *rtm;
614 unsigned int mtu;
615 int rc;
616
617 rc = mctp_route_nlparse(skb, nlh, extack, tb,
618 &rtm, &mdev, &daddr_start);
619 if (rc < 0)
620 return rc;
621
622 if (rtm->rtm_type != RTN_UNICAST) {
623 NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST");
624 return -EINVAL;
625 }
626
627 /* TODO: parse mtu from nlparse */
628 mtu = 0;
629
630 rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, false);
631 return rc;
632}
633
634static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
635 struct netlink_ext_ack *extack)
636{
637 struct nlattr *tb[RTA_MAX + 1];
638 mctp_eid_t daddr_start;
639 struct mctp_dev *mdev;
640 struct rtmsg *rtm;
641 int rc;
642
643 rc = mctp_route_nlparse(skb, nlh, extack, tb,
644 &rtm, &mdev, &daddr_start);
645 if (rc < 0)
646 return rc;
647
648 /* we only have unicast routes */
649 if (rtm->rtm_type != RTN_UNICAST)
650 return -EINVAL;
651
652 rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
653 return rc;
654}
655
656static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt,
657 u32 portid, u32 seq, int event, unsigned int flags)
658{
659 struct nlmsghdr *nlh;
660 struct rtmsg *hdr;
661 void *metrics;
662
663 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
664 if (!nlh)
665 return -EMSGSIZE;
666
667 hdr = nlmsg_data(nlh);
668 hdr->rtm_family = AF_MCTP;
669
670 /* we use the _len fields as a number of EIDs, rather than
671 * a number of bits in the address
672 */
673 hdr->rtm_dst_len = rt->max - rt->min;
674 hdr->rtm_src_len = 0;
675 hdr->rtm_tos = 0;
676 hdr->rtm_table = RT_TABLE_DEFAULT;
677 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */
678 hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */
679 hdr->rtm_type = RTN_ANYCAST; /* TODO: type from route */
680
681 if (nla_put_u8(skb, RTA_DST, rt->min))
682 goto cancel;
683
684 metrics = nla_nest_start_noflag(skb, RTA_METRICS);
685 if (!metrics)
686 goto cancel;
687
688 if (rt->mtu) {
689 if (nla_put_u32(skb, RTAX_MTU, rt->mtu))
690 goto cancel;
691 }
692
693 nla_nest_end(skb, metrics);
694
695 if (rt->dev) {
696 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex))
697 goto cancel;
698 }
699
700 /* TODO: conditional neighbour physaddr? */
701
702 nlmsg_end(skb, nlh);
703
704 return 0;
705
706cancel:
707 nlmsg_cancel(skb, nlh);
708 return -EMSGSIZE;
709}
710
711static int mctp_dump_rtinfo(struct sk_buff *skb, struct netlink_callback *cb)
712{
713 struct net *net = sock_net(skb->sk);
714 struct mctp_route *rt;
715 int s_idx, idx;
716
717 /* TODO: allow filtering on route data, possibly under
718 * cb->strict_check
719 */
720
721 /* TODO: change to struct overlay */
722 s_idx = cb->args[0];
723 idx = 0;
724
725 rcu_read_lock();
726 list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
727 if (idx++ < s_idx)
728 continue;
729 if (mctp_fill_rtinfo(skb, rt,
730 NETLINK_CB(cb->skb).portid,
731 cb->nlh->nlmsg_seq,
732 RTM_NEWROUTE, NLM_F_MULTI) < 0)
733 break;
734 }
735
736 rcu_read_unlock();
737 cb->args[0] = idx;
738
739 return skb->len;
740}
741
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800742/* net namespace implementation */
743static int __net_init mctp_routes_net_init(struct net *net)
744{
745 struct netns_mctp *ns = &net->mctp;
746
747 INIT_LIST_HEAD(&ns->routes);
Jeremy Kerr833ef3b2021-07-29 10:20:49 +0800748 INIT_HLIST_HEAD(&ns->binds);
749 mutex_init(&ns->bind_lock);
750 INIT_HLIST_HEAD(&ns->keys);
751 spin_lock_init(&ns->keys_lock);
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800752 return 0;
753}
754
755static void __net_exit mctp_routes_net_exit(struct net *net)
756{
757 struct mctp_route *rt;
758
759 list_for_each_entry_rcu(rt, &net->mctp.routes, list)
760 mctp_route_release(rt);
761}
762
763static struct pernet_operations mctp_net_ops = {
764 .init = mctp_routes_net_init,
765 .exit = mctp_routes_net_exit,
766};
767
768int __init mctp_routes_init(void)
769{
770 dev_add_pack(&mctp_packet_type);
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800771
772 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE,
773 NULL, mctp_dump_rtinfo, 0);
774 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE,
775 mctp_newroute, NULL, 0);
776 rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE,
777 mctp_delroute, NULL, 0);
778
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800779 return register_pernet_subsys(&mctp_net_ops);
780}
781
782void __exit mctp_routes_exit(void)
783{
784 unregister_pernet_subsys(&mctp_net_ops);
Matt Johnston06d2f4c2021-07-29 10:20:46 +0800785 rtnl_unregister(PF_MCTP, RTM_DELROUTE);
786 rtnl_unregister(PF_MCTP, RTM_NEWROUTE);
787 rtnl_unregister(PF_MCTP, RTM_GETROUTE);
Jeremy Kerr889b7da2021-07-29 10:20:45 +0800788 dev_remove_pack(&mctp_packet_type);
789}