blob: 528d767eb026fc85c33b24243bd41cb226176d96 [file] [log] [blame]
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/mrp_bridge.h>
4#include "br_private_mrp.h"
5
6static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
7
8static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
9 u32 ifindex)
10{
11 struct net_bridge_port *res = NULL;
12 struct net_bridge_port *port;
13
14 list_for_each_entry(port, &br->port_list, list) {
15 if (port->dev->ifindex == ifindex) {
16 res = port;
17 break;
18 }
19 }
20
21 return res;
22}
23
24static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
25{
26 struct br_mrp *res = NULL;
27 struct br_mrp *mrp;
28
29 list_for_each_entry_rcu(mrp, &br->mrp_list, list,
30 lockdep_rtnl_is_held()) {
31 if (mrp->ring_id == ring_id) {
32 res = mrp;
33 break;
34 }
35 }
36
37 return res;
38}
39
Horatiu Vultur7aa38012020-05-21 23:19:05 +000040static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
41{
42 struct br_mrp *mrp;
43
44 list_for_each_entry_rcu(mrp, &br->mrp_list, list,
45 lockdep_rtnl_is_held()) {
46 struct net_bridge_port *p;
47
48 p = rtnl_dereference(mrp->p_port);
49 if (p && p->dev->ifindex == ifindex)
50 return false;
51
52 p = rtnl_dereference(mrp->s_port);
53 if (p && p->dev->ifindex == ifindex)
54 return false;
55 }
56
57 return true;
58}
59
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +020060static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
61 struct net_bridge_port *p)
62{
63 struct br_mrp *res = NULL;
64 struct br_mrp *mrp;
65
66 list_for_each_entry_rcu(mrp, &br->mrp_list, list,
67 lockdep_rtnl_is_held()) {
68 if (rcu_access_pointer(mrp->p_port) == p ||
69 rcu_access_pointer(mrp->s_port) == p) {
70 res = mrp;
71 break;
72 }
73 }
74
75 return res;
76}
77
78static int br_mrp_next_seq(struct br_mrp *mrp)
79{
80 mrp->seq_id++;
81 return mrp->seq_id;
82}
83
84static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
85 const u8 *src, const u8 *dst)
86{
87 struct ethhdr *eth_hdr;
88 struct sk_buff *skb;
89 u16 *version;
90
91 skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
92 if (!skb)
93 return NULL;
94
95 skb->dev = p->dev;
96 skb->protocol = htons(ETH_P_MRP);
97 skb->priority = MRP_FRAME_PRIO;
98 skb_reserve(skb, sizeof(*eth_hdr));
99
100 eth_hdr = skb_push(skb, sizeof(*eth_hdr));
101 ether_addr_copy(eth_hdr->h_dest, dst);
102 ether_addr_copy(eth_hdr->h_source, src);
103 eth_hdr->h_proto = htons(ETH_P_MRP);
104
105 version = skb_put(skb, sizeof(*version));
106 *version = cpu_to_be16(MRP_VERSION);
107
108 return skb;
109}
110
111static void br_mrp_skb_tlv(struct sk_buff *skb,
112 enum br_mrp_tlv_header_type type,
113 u8 length)
114{
115 struct br_mrp_tlv_hdr *hdr;
116
117 hdr = skb_put(skb, sizeof(*hdr));
118 hdr->type = type;
119 hdr->length = length;
120}
121
122static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
123{
124 struct br_mrp_common_hdr *hdr;
125
126 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
127
128 hdr = skb_put(skb, sizeof(*hdr));
129 hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
130 memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
131}
132
133static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
134 struct net_bridge_port *p,
135 enum br_mrp_port_role_type port_role)
136{
137 struct br_mrp_ring_test_hdr *hdr = NULL;
138 struct sk_buff *skb = NULL;
139
140 if (!p)
141 return NULL;
142
143 skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
144 if (!skb)
145 return NULL;
146
147 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
148 hdr = skb_put(skb, sizeof(*hdr));
149
150 hdr->prio = cpu_to_be16(MRP_DEFAULT_PRIO);
151 ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
152 hdr->port_role = cpu_to_be16(port_role);
153 hdr->state = cpu_to_be16(mrp->ring_state);
154 hdr->transitions = cpu_to_be16(mrp->ring_transitions);
155 hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
156
157 br_mrp_skb_common(skb, mrp);
158 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
159
160 return skb;
161}
162
163static void br_mrp_test_work_expired(struct work_struct *work)
164{
165 struct delayed_work *del_work = to_delayed_work(work);
166 struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
167 struct net_bridge_port *p;
168 bool notify_open = false;
169 struct sk_buff *skb;
170
171 if (time_before_eq(mrp->test_end, jiffies))
172 return;
173
174 if (mrp->test_count_miss < mrp->test_max_miss) {
175 mrp->test_count_miss++;
176 } else {
177 /* Notify that the ring is open only if the ring state is
178 * closed, otherwise it would continue to notify at every
179 * interval.
180 */
181 if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED)
182 notify_open = true;
183 }
184
185 rcu_read_lock();
186
187 p = rcu_dereference(mrp->p_port);
188 if (p) {
189 skb = br_mrp_alloc_test_skb(mrp, p, BR_MRP_PORT_ROLE_PRIMARY);
190 if (!skb)
191 goto out;
192
193 skb_reset_network_header(skb);
194 dev_queue_xmit(skb);
195
196 if (notify_open && !mrp->ring_role_offloaded)
197 br_mrp_port_open(p->dev, true);
198 }
199
200 p = rcu_dereference(mrp->s_port);
201 if (p) {
202 skb = br_mrp_alloc_test_skb(mrp, p, BR_MRP_PORT_ROLE_SECONDARY);
203 if (!skb)
204 goto out;
205
206 skb_reset_network_header(skb);
207 dev_queue_xmit(skb);
208
209 if (notify_open && !mrp->ring_role_offloaded)
210 br_mrp_port_open(p->dev, true);
211 }
212
213out:
214 rcu_read_unlock();
215
216 queue_delayed_work(system_wq, &mrp->test_work,
217 usecs_to_jiffies(mrp->test_interval));
218}
219
220/* Deletes the MRP instance.
221 * note: called under rtnl_lock
222 */
223static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
224{
225 struct net_bridge_port *p;
Horatiu Vultur4fb13492020-05-21 23:19:07 +0000226 u8 state;
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +0200227
228 /* Stop sending MRP_Test frames */
229 cancel_delayed_work_sync(&mrp->test_work);
230 br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0);
231
232 br_mrp_switchdev_del(br, mrp);
233
234 /* Reset the ports */
235 p = rtnl_dereference(mrp->p_port);
236 if (p) {
237 spin_lock_bh(&br->lock);
Horatiu Vultur4fb13492020-05-21 23:19:07 +0000238 state = netif_running(br->dev) ?
239 BR_STATE_FORWARDING : BR_STATE_DISABLED;
240 p->state = state;
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +0200241 p->flags &= ~BR_MRP_AWARE;
242 spin_unlock_bh(&br->lock);
Horatiu Vultur4fb13492020-05-21 23:19:07 +0000243 br_mrp_port_switchdev_set_state(p, state);
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +0200244 rcu_assign_pointer(mrp->p_port, NULL);
245 }
246
247 p = rtnl_dereference(mrp->s_port);
248 if (p) {
249 spin_lock_bh(&br->lock);
Horatiu Vultur4fb13492020-05-21 23:19:07 +0000250 state = netif_running(br->dev) ?
251 BR_STATE_FORWARDING : BR_STATE_DISABLED;
252 p->state = state;
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +0200253 p->flags &= ~BR_MRP_AWARE;
254 spin_unlock_bh(&br->lock);
Horatiu Vultur4fb13492020-05-21 23:19:07 +0000255 br_mrp_port_switchdev_set_state(p, state);
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +0200256 rcu_assign_pointer(mrp->s_port, NULL);
257 }
258
259 list_del_rcu(&mrp->list);
260 kfree_rcu(mrp, rcu);
261}
262
263/* Adds a new MRP instance.
264 * note: called under rtnl_lock
265 */
266int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
267{
268 struct net_bridge_port *p;
269 struct br_mrp *mrp;
270 int err;
271
272 /* If the ring exists, it is not possible to create another one with the
273 * same ring_id
274 */
275 mrp = br_mrp_find_id(br, instance->ring_id);
276 if (mrp)
277 return -EINVAL;
278
279 if (!br_mrp_get_port(br, instance->p_ifindex) ||
280 !br_mrp_get_port(br, instance->s_ifindex))
281 return -EINVAL;
282
Horatiu Vultur7aa38012020-05-21 23:19:05 +0000283 /* It is not possible to have the same port part of multiple rings */
284 if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
285 !br_mrp_unique_ifindex(br, instance->s_ifindex))
286 return -EINVAL;
287
Horatiu Vultur9a9f26e2020-04-26 15:22:05 +0200288 mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
289 if (!mrp)
290 return -ENOMEM;
291
292 mrp->ring_id = instance->ring_id;
293
294 p = br_mrp_get_port(br, instance->p_ifindex);
295 spin_lock_bh(&br->lock);
296 p->state = BR_STATE_FORWARDING;
297 p->flags |= BR_MRP_AWARE;
298 spin_unlock_bh(&br->lock);
299 rcu_assign_pointer(mrp->p_port, p);
300
301 p = br_mrp_get_port(br, instance->s_ifindex);
302 spin_lock_bh(&br->lock);
303 p->state = BR_STATE_FORWARDING;
304 p->flags |= BR_MRP_AWARE;
305 spin_unlock_bh(&br->lock);
306 rcu_assign_pointer(mrp->s_port, p);
307
308 INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
309 list_add_tail_rcu(&mrp->list, &br->mrp_list);
310
311 err = br_mrp_switchdev_add(br, mrp);
312 if (err)
313 goto delete_mrp;
314
315 return 0;
316
317delete_mrp:
318 br_mrp_del_impl(br, mrp);
319
320 return err;
321}
322
323/* Deletes the MRP instance from which the port is part of
324 * note: called under rtnl_lock
325 */
326void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
327{
328 struct br_mrp *mrp = br_mrp_find_port(br, p);
329
330 /* If the port is not part of a MRP instance just bail out */
331 if (!mrp)
332 return;
333
334 br_mrp_del_impl(br, mrp);
335}
336
337/* Deletes existing MRP instance based on ring_id
338 * note: called under rtnl_lock
339 */
340int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
341{
342 struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
343
344 if (!mrp)
345 return -EINVAL;
346
347 br_mrp_del_impl(br, mrp);
348
349 return 0;
350}
351
352/* Set port state, port state can be forwarding, blocked or disabled
353 * note: already called with rtnl_lock
354 */
355int br_mrp_set_port_state(struct net_bridge_port *p,
356 enum br_mrp_port_state_type state)
357{
358 if (!p || !(p->flags & BR_MRP_AWARE))
359 return -EINVAL;
360
361 spin_lock_bh(&p->br->lock);
362
363 if (state == BR_MRP_PORT_STATE_FORWARDING)
364 p->state = BR_STATE_FORWARDING;
365 else
366 p->state = BR_STATE_BLOCKING;
367
368 spin_unlock_bh(&p->br->lock);
369
370 br_mrp_port_switchdev_set_state(p, state);
371
372 return 0;
373}
374
375/* Set port role, port role can be primary or secondary
376 * note: already called with rtnl_lock
377 */
378int br_mrp_set_port_role(struct net_bridge_port *p,
379 struct br_mrp_port_role *role)
380{
381 struct br_mrp *mrp;
382
383 if (!p || !(p->flags & BR_MRP_AWARE))
384 return -EINVAL;
385
386 mrp = br_mrp_find_id(p->br, role->ring_id);
387
388 if (!mrp)
389 return -EINVAL;
390
391 if (role->role == BR_MRP_PORT_ROLE_PRIMARY)
392 rcu_assign_pointer(mrp->p_port, p);
393 else
394 rcu_assign_pointer(mrp->s_port, p);
395
396 br_mrp_port_switchdev_set_role(p, role->role);
397
398 return 0;
399}
400
401/* Set ring state, ring state can be only Open or Closed
402 * note: already called with rtnl_lock
403 */
404int br_mrp_set_ring_state(struct net_bridge *br,
405 struct br_mrp_ring_state *state)
406{
407 struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
408
409 if (!mrp)
410 return -EINVAL;
411
412 if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
413 state->ring_state != BR_MRP_RING_STATE_CLOSED)
414 mrp->ring_transitions++;
415
416 mrp->ring_state = state->ring_state;
417
418 br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
419
420 return 0;
421}
422
423/* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
424 * MRC(Media Redundancy Client).
425 * note: already called with rtnl_lock
426 */
427int br_mrp_set_ring_role(struct net_bridge *br,
428 struct br_mrp_ring_role *role)
429{
430 struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
431 int err;
432
433 if (!mrp)
434 return -EINVAL;
435
436 mrp->ring_role = role->ring_role;
437
438 /* If there is an error just bailed out */
439 err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
440 if (err && err != -EOPNOTSUPP)
441 return err;
442
443 /* Now detect if the HW actually applied the role or not. If the HW
444 * applied the role it means that the SW will not to do those operations
445 * anymore. For example if the role ir MRM then the HW will notify the
446 * SW when ring is open, but if the is not pushed to the HW the SW will
447 * need to detect when the ring is open
448 */
449 mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
450
451 return 0;
452}
453
454/* Start to generate MRP test frames, the frames are generated by HW and if it
455 * fails, they are generated by the SW.
456 * note: already called with rtnl_lock
457 */
458int br_mrp_start_test(struct net_bridge *br,
459 struct br_mrp_start_test *test)
460{
461 struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
462
463 if (!mrp)
464 return -EINVAL;
465
466 /* Try to push it to the HW and if it fails then continue to generate in
467 * SW and if that also fails then return error
468 */
469 if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
470 test->max_miss, test->period))
471 return 0;
472
473 mrp->test_interval = test->interval;
474 mrp->test_end = jiffies + usecs_to_jiffies(test->period);
475 mrp->test_max_miss = test->max_miss;
476 mrp->test_count_miss = 0;
477 queue_delayed_work(system_wq, &mrp->test_work,
478 usecs_to_jiffies(test->interval));
479
480 return 0;
481}
482
483/* Process only MRP Test frame. All the other MRP frames are processed by
484 * userspace application
485 * note: already called with rcu_read_lock
486 */
487static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
488 struct sk_buff *skb)
489{
490 const struct br_mrp_tlv_hdr *hdr;
491 struct br_mrp_tlv_hdr _hdr;
492
493 /* Each MRP header starts with a version field which is 16 bits.
494 * Therefore skip the version and get directly the TLV header.
495 */
496 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
497 if (!hdr)
498 return;
499
500 if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
501 return;
502
503 mrp->test_count_miss = 0;
504
505 /* Notify the userspace that the ring is closed only when the ring is
506 * not closed
507 */
508 if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
509 br_mrp_port_open(port->dev, false);
510}
511
512/* This will just forward the frame to the other mrp ring port(MRC role) or will
513 * not do anything.
514 * note: already called with rcu_read_lock
515 */
516static int br_mrp_rcv(struct net_bridge_port *p,
517 struct sk_buff *skb, struct net_device *dev)
518{
519 struct net_device *s_dev, *p_dev, *d_dev;
520 struct net_bridge_port *p_port, *s_port;
521 struct net_bridge *br;
522 struct sk_buff *nskb;
523 struct br_mrp *mrp;
524
525 /* If port is disabled don't accept any frames */
526 if (p->state == BR_STATE_DISABLED)
527 return 0;
528
529 br = p->br;
530 mrp = br_mrp_find_port(br, p);
531 if (unlikely(!mrp))
532 return 0;
533
534 p_port = rcu_dereference(mrp->p_port);
535 if (!p_port)
536 return 0;
537
538 s_port = rcu_dereference(mrp->s_port);
539 if (!s_port)
540 return 0;
541
542 /* If the role is MRM then don't forward the frames */
543 if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
544 br_mrp_mrm_process(mrp, p, skb);
545 return 1;
546 }
547
548 /* Clone the frame and forward it on the other MRP port */
549 nskb = skb_clone(skb, GFP_ATOMIC);
550 if (!nskb)
551 return 0;
552
553 p_dev = p_port->dev;
554 s_dev = s_port->dev;
555
556 if (p_dev == dev)
557 d_dev = s_dev;
558 else
559 d_dev = p_dev;
560
561 nskb->dev = d_dev;
562 skb_push(nskb, ETH_HLEN);
563 dev_queue_xmit(nskb);
564
565 return 1;
566}
567
568/* Check if the frame was received on a port that is part of MRP ring
569 * and if the frame has MRP eth. In that case process the frame otherwise do
570 * normal forwarding.
571 * note: already called with rcu_read_lock
572 */
573int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
574{
575 /* If there is no MRP instance do normal forwarding */
576 if (likely(!(p->flags & BR_MRP_AWARE)))
577 goto out;
578
579 if (unlikely(skb->protocol == htons(ETH_P_MRP)))
580 return br_mrp_rcv(p, skb, p->dev);
581
582out:
583 return 0;
584}
585
586bool br_mrp_enabled(struct net_bridge *br)
587{
588 return !list_empty(&br->mrp_list);
589}