blob: 1a95f3beb784db57079e5009a571dd81882628fc [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Matt Porterf89efd52005-09-09 12:10:10 -07002/*
3 * rionet - Ethernet driver over RapidIO messaging services
4 *
5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org>
Matt Porterf89efd52005-09-09 12:10:10 -07007 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/dma-mapping.h>
12#include <linux/delay.h>
13#include <linux/rio.h>
14#include <linux/rio_drv.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Matt Porterf89efd52005-09-09 12:10:10 -070016#include <linux/rio_ids.h>
17
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/skbuff.h>
21#include <linux/crc32.h>
22#include <linux/ethtool.h>
Alexandre Bouninef41e2472016-03-22 14:26:11 -070023#include <linux/reboot.h>
Matt Porterf89efd52005-09-09 12:10:10 -070024
25#define DRV_NAME "rionet"
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070026#define DRV_VERSION "0.3"
Matt Porterf89efd52005-09-09 12:10:10 -070027#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
28#define DRV_DESC "Ethernet over RapidIO"
29
30MODULE_AUTHOR(DRV_AUTHOR);
31MODULE_DESCRIPTION(DRV_DESC);
32MODULE_LICENSE("GPL");
33
34#define RIONET_DEFAULT_MSGLEVEL \
35 (NETIF_MSG_DRV | \
36 NETIF_MSG_LINK | \
37 NETIF_MSG_RX_ERR | \
38 NETIF_MSG_TX_ERR)
39
40#define RIONET_DOORBELL_JOIN 0x1000
41#define RIONET_DOORBELL_LEAVE 0x1001
42
43#define RIONET_MAILBOX 0
44
45#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
46#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070047#define RIONET_MAX_NETS 8
Aurelien Jacquiot92444bb2016-03-22 14:25:45 -070048#define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
49#define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
Matt Porterf89efd52005-09-09 12:10:10 -070050
51struct rionet_private {
52 struct rio_mport *mport;
53 struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
54 struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
Matt Porterf89efd52005-09-09 12:10:10 -070055 int rx_slot;
56 int tx_slot;
57 int tx_cnt;
58 int ack_slot;
59 spinlock_t lock;
60 spinlock_t tx_lock;
61 u32 msg_enable;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -070062 bool open;
Matt Porterf89efd52005-09-09 12:10:10 -070063};
64
65struct rionet_peer {
66 struct list_head node;
67 struct rio_dev *rdev;
68 struct resource *res;
69};
70
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070071struct rionet_net {
72 struct net_device *ndev;
73 struct list_head peers;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -070074 spinlock_t lock; /* net info access lock */
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070075 struct rio_dev **active;
76 int nact; /* number of active peers */
77};
Matt Porterf89efd52005-09-09 12:10:10 -070078
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070079static struct rionet_net nets[RIONET_MAX_NETS];
Matt Porterf89efd52005-09-09 12:10:10 -070080
Alexandre Bounine284fb682011-08-25 15:59:13 -070081#define is_rionet_capable(src_ops, dst_ops) \
82 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
83 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
Matt Porterf89efd52005-09-09 12:10:10 -070084 (src_ops & RIO_SRC_OPS_DOORBELL) && \
85 (dst_ops & RIO_DST_OPS_DOORBELL))
86#define dev_rionet_capable(dev) \
Alexandre Bounine284fb682011-08-25 15:59:13 -070087 is_rionet_capable(dev->src_ops, dev->dst_ops)
Matt Porterf89efd52005-09-09 12:10:10 -070088
Alexandre Bouninee0c87bd2011-11-02 13:39:15 -070089#define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
90#define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
Matt Porterf89efd52005-09-09 12:10:10 -070091
Matt Porterf89efd52005-09-09 12:10:10 -070092static int rionet_rx_clean(struct net_device *ndev)
93{
94 int i;
95 int error = 0;
Wang Chen4cf16532008-11-12 23:38:14 -080096 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -070097 void *data;
98
99 i = rnet->rx_slot;
100
101 do {
102 if (!rnet->rx_skb[i])
103 continue;
104
105 if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
106 break;
107
108 rnet->rx_skb[i]->data = data;
109 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
Matt Porterf89efd52005-09-09 12:10:10 -0700110 rnet->rx_skb[i]->protocol =
111 eth_type_trans(rnet->rx_skb[i], ndev);
112 error = netif_rx(rnet->rx_skb[i]);
113
114 if (error == NET_RX_DROP) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700115 ndev->stats.rx_dropped++;
Matt Porterf89efd52005-09-09 12:10:10 -0700116 } else {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700117 ndev->stats.rx_packets++;
118 ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE;
Matt Porterf89efd52005-09-09 12:10:10 -0700119 }
120
121 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
122
123 return i;
124}
125
126static void rionet_rx_fill(struct net_device *ndev, int end)
127{
128 int i;
Wang Chen4cf16532008-11-12 23:38:14 -0800129 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700130
131 i = rnet->rx_slot;
132 do {
133 rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
134
135 if (!rnet->rx_skb[i])
136 break;
137
138 rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
139 rnet->rx_skb[i]->data);
140 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
141
142 rnet->rx_slot = i;
143}
144
145static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
146 struct rio_dev *rdev)
147{
Wang Chen4cf16532008-11-12 23:38:14 -0800148 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700149
150 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
151 rnet->tx_skb[rnet->tx_slot] = skb;
152
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700153 ndev->stats.tx_packets++;
154 ndev->stats.tx_bytes += skb->len;
Matt Porterf89efd52005-09-09 12:10:10 -0700155
156 if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
157 netif_stop_queue(ndev);
158
159 ++rnet->tx_slot;
160 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
161
162 if (netif_msg_tx_queued(rnet))
David S. Miller8df8a472011-05-22 20:35:54 -0400163 printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
164 skb->len);
Matt Porterf89efd52005-09-09 12:10:10 -0700165
166 return 0;
167}
168
Yunjian Wang51070a32020-04-30 19:26:40 +0800169static netdev_tx_t rionet_start_xmit(struct sk_buff *skb,
170 struct net_device *ndev)
Matt Porterf89efd52005-09-09 12:10:10 -0700171{
172 int i;
Wang Chen4cf16532008-11-12 23:38:14 -0800173 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700174 struct ethhdr *eth = (struct ethhdr *)skb->data;
175 u16 destid;
176 unsigned long flags;
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700177 int add_num = 1;
Matt Porterf89efd52005-09-09 12:10:10 -0700178
Florian Westphala6086a82016-04-24 21:38:13 +0200179 spin_lock_irqsave(&rnet->tx_lock, flags);
Matt Porterf89efd52005-09-09 12:10:10 -0700180
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700181 if (is_multicast_ether_addr(eth->h_dest))
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700182 add_num = nets[rnet->mport->id].nact;
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700183
184 if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
Matt Porterf89efd52005-09-09 12:10:10 -0700185 netif_stop_queue(ndev);
186 spin_unlock_irqrestore(&rnet->tx_lock, flags);
187 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
188 ndev->name);
189 return NETDEV_TX_BUSY;
190 }
191
Tobias Klauserabfc89c2011-07-03 23:54:54 +0000192 if (is_multicast_ether_addr(eth->h_dest)) {
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700193 int count = 0;
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700194
Zhang Weie0423232008-04-18 13:33:42 -0700195 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
196 i++)
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700197 if (nets[rnet->mport->id].active[i]) {
Matt Porterf89efd52005-09-09 12:10:10 -0700198 rionet_queue_tx_msg(skb, ndev,
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700199 nets[rnet->mport->id].active[i]);
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700200 if (count)
Reshetova, Elena63354792017-06-30 13:07:58 +0300201 refcount_inc(&skb->users);
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700202 count++;
203 }
Matt Porterf89efd52005-09-09 12:10:10 -0700204 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
205 destid = RIONET_GET_DESTID(eth->h_dest);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700206 if (nets[rnet->mport->id].active[destid])
207 rionet_queue_tx_msg(skb, ndev,
208 nets[rnet->mport->id].active[destid]);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700209 else {
210 /*
211 * If the target device was removed from the list of
212 * active peers but we still have TX packets targeting
213 * it just report sending a packet to the target
214 * (without actual packet transfer).
215 */
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700216 ndev->stats.tx_packets++;
217 ndev->stats.tx_bytes += skb->len;
Pan Biancfc43512018-11-28 14:53:19 +0800218 dev_kfree_skb_any(skb);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700219 }
Matt Porterf89efd52005-09-09 12:10:10 -0700220 }
221
222 spin_unlock_irqrestore(&rnet->tx_lock, flags);
223
Patrick McHardy6ed10652009-06-23 06:03:08 +0000224 return NETDEV_TX_OK;
Matt Porterf89efd52005-09-09 12:10:10 -0700225}
226
227static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
228 u16 info)
229{
230 struct net_device *ndev = dev_id;
Wang Chen4cf16532008-11-12 23:38:14 -0800231 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700232 struct rionet_peer *peer;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700233 unsigned char netid = rnet->mport->id;
Matt Porterf89efd52005-09-09 12:10:10 -0700234
235 if (netif_msg_intr(rnet))
236 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
237 DRV_NAME, sid, tid, info);
238 if (info == RIONET_DOORBELL_JOIN) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700239 if (!nets[netid].active[sid]) {
240 spin_lock(&nets[netid].lock);
241 list_for_each_entry(peer, &nets[netid].peers, node) {
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700242 if (peer->rdev->destid == sid) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700243 nets[netid].active[sid] = peer->rdev;
244 nets[netid].nact++;
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700245 }
Matt Porterf89efd52005-09-09 12:10:10 -0700246 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700247 spin_unlock(&nets[netid].lock);
248
Matt Porterf89efd52005-09-09 12:10:10 -0700249 rio_mport_send_doorbell(mport, sid,
250 RIONET_DOORBELL_JOIN);
251 }
252 } else if (info == RIONET_DOORBELL_LEAVE) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700253 spin_lock(&nets[netid].lock);
254 if (nets[netid].active[sid]) {
255 nets[netid].active[sid] = NULL;
256 nets[netid].nact--;
257 }
258 spin_unlock(&nets[netid].lock);
Matt Porterf89efd52005-09-09 12:10:10 -0700259 } else {
260 if (netif_msg_intr(rnet))
261 printk(KERN_WARNING "%s: unhandled doorbell\n",
262 DRV_NAME);
263 }
264}
265
266static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
267{
268 int n;
269 struct net_device *ndev = dev_id;
Wang Chen4cf16532008-11-12 23:38:14 -0800270 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700271
272 if (netif_msg_intr(rnet))
273 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
274 DRV_NAME, mbox, slot);
275
276 spin_lock(&rnet->lock);
277 if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
278 rionet_rx_fill(ndev, n);
279 spin_unlock(&rnet->lock);
280}
281
282static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
283{
284 struct net_device *ndev = dev_id;
Wang Chen4cf16532008-11-12 23:38:14 -0800285 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700286
Aurelien Jacquiot36915972016-03-22 14:25:42 -0700287 spin_lock(&rnet->tx_lock);
Matt Porterf89efd52005-09-09 12:10:10 -0700288
289 if (netif_msg_intr(rnet))
290 printk(KERN_INFO
291 "%s: outbound message event, mbox %d slot %d\n",
292 DRV_NAME, mbox, slot);
293
294 while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
295 /* dma unmap single */
296 dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
297 rnet->tx_skb[rnet->ack_slot] = NULL;
298 ++rnet->ack_slot;
299 rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
300 rnet->tx_cnt--;
301 }
302
303 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
304 netif_wake_queue(ndev);
305
Aurelien Jacquiot36915972016-03-22 14:25:42 -0700306 spin_unlock(&rnet->tx_lock);
Matt Porterf89efd52005-09-09 12:10:10 -0700307}
308
309static int rionet_open(struct net_device *ndev)
310{
311 int i, rc = 0;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700312 struct rionet_peer *peer;
Wang Chen4cf16532008-11-12 23:38:14 -0800313 struct rionet_private *rnet = netdev_priv(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700314 unsigned char netid = rnet->mport->id;
315 unsigned long flags;
Matt Porterf89efd52005-09-09 12:10:10 -0700316
317 if (netif_msg_ifup(rnet))
318 printk(KERN_INFO "%s: open\n", DRV_NAME);
319
320 if ((rc = rio_request_inb_dbell(rnet->mport,
321 (void *)ndev,
322 RIONET_DOORBELL_JOIN,
323 RIONET_DOORBELL_LEAVE,
324 rionet_dbell_event)) < 0)
325 goto out;
326
327 if ((rc = rio_request_inb_mbox(rnet->mport,
328 (void *)ndev,
329 RIONET_MAILBOX,
330 RIONET_RX_RING_SIZE,
331 rionet_inb_msg_event)) < 0)
332 goto out;
333
334 if ((rc = rio_request_outb_mbox(rnet->mport,
335 (void *)ndev,
336 RIONET_MAILBOX,
337 RIONET_TX_RING_SIZE,
338 rionet_outb_msg_event)) < 0)
339 goto out;
340
341 /* Initialize inbound message ring */
342 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
343 rnet->rx_skb[i] = NULL;
344 rnet->rx_slot = 0;
345 rionet_rx_fill(ndev, 0);
346
347 rnet->tx_slot = 0;
348 rnet->tx_cnt = 0;
349 rnet->ack_slot = 0;
350
351 netif_carrier_on(ndev);
352 netif_start_queue(ndev);
353
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700354 spin_lock_irqsave(&nets[netid].lock, flags);
355 list_for_each_entry(peer, &nets[netid].peers, node) {
Alexandre Bounine284fb682011-08-25 15:59:13 -0700356 /* Send a join message */
357 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
Matt Porterf89efd52005-09-09 12:10:10 -0700358 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700359 spin_unlock_irqrestore(&nets[netid].lock, flags);
360 rnet->open = true;
Matt Porterf89efd52005-09-09 12:10:10 -0700361
362 out:
363 return rc;
364}
365
366static int rionet_close(struct net_device *ndev)
367{
Wang Chen4cf16532008-11-12 23:38:14 -0800368 struct rionet_private *rnet = netdev_priv(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700369 struct rionet_peer *peer;
370 unsigned char netid = rnet->mport->id;
371 unsigned long flags;
Matt Porterf89efd52005-09-09 12:10:10 -0700372 int i;
373
374 if (netif_msg_ifup(rnet))
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700375 printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name);
Matt Porterf89efd52005-09-09 12:10:10 -0700376
377 netif_stop_queue(ndev);
378 netif_carrier_off(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700379 rnet->open = false;
Matt Porterf89efd52005-09-09 12:10:10 -0700380
381 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
Wei Yongjunaaff1e12009-02-25 00:18:12 +0000382 kfree_skb(rnet->rx_skb[i]);
Matt Porterf89efd52005-09-09 12:10:10 -0700383
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700384 spin_lock_irqsave(&nets[netid].lock, flags);
385 list_for_each_entry(peer, &nets[netid].peers, node) {
386 if (nets[netid].active[peer->rdev->destid]) {
Matt Porterf89efd52005-09-09 12:10:10 -0700387 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700388 nets[netid].active[peer->rdev->destid] = NULL;
Matt Porterf89efd52005-09-09 12:10:10 -0700389 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700390 if (peer->res)
391 rio_release_outb_dbell(peer->rdev, peer->res);
Matt Porterf89efd52005-09-09 12:10:10 -0700392 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700393 spin_unlock_irqrestore(&nets[netid].lock, flags);
Matt Porterf89efd52005-09-09 12:10:10 -0700394
395 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
396 RIONET_DOORBELL_LEAVE);
397 rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
398 rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
399
400 return 0;
401}
402
Viresh Kumar71db87b2015-07-30 15:04:01 +0530403static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
Matt Porterf89efd52005-09-09 12:10:10 -0700404{
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700405 struct rio_dev *rdev = to_rio_dev(dev);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700406 unsigned char netid = rdev->net->hport->id;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700407 struct rionet_peer *peer;
408 int state, found = 0;
409 unsigned long flags;
Matt Porterf89efd52005-09-09 12:10:10 -0700410
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700411 if (!dev_rionet_capable(rdev))
412 return;
413
414 spin_lock_irqsave(&nets[netid].lock, flags);
415 list_for_each_entry(peer, &nets[netid].peers, node) {
416 if (peer->rdev == rdev) {
417 list_del(&peer->node);
418 if (nets[netid].active[rdev->destid]) {
419 state = atomic_read(&rdev->state);
420 if (state != RIO_DEVICE_GONE &&
421 state != RIO_DEVICE_INITIALIZING) {
422 rio_send_doorbell(rdev,
423 RIONET_DOORBELL_LEAVE);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700424 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700425 nets[netid].active[rdev->destid] = NULL;
426 nets[netid].nact--;
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700427 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700428 found = 1;
429 break;
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700430 }
Matt Porterf89efd52005-09-09 12:10:10 -0700431 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700432 spin_unlock_irqrestore(&nets[netid].lock, flags);
433
434 if (found) {
435 if (peer->res)
436 rio_release_outb_dbell(rdev, peer->res);
437 kfree(peer);
438 }
Matt Porterf89efd52005-09-09 12:10:10 -0700439}
440
441static void rionet_get_drvinfo(struct net_device *ndev,
442 struct ethtool_drvinfo *info)
443{
Wang Chen4cf16532008-11-12 23:38:14 -0800444 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700445
Jiri Pirko7826d432013-01-06 00:44:26 +0000446 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
447 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
448 strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
449 strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
Matt Porterf89efd52005-09-09 12:10:10 -0700450}
451
452static u32 rionet_get_msglevel(struct net_device *ndev)
453{
Wang Chen4cf16532008-11-12 23:38:14 -0800454 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700455
456 return rnet->msg_enable;
457}
458
459static void rionet_set_msglevel(struct net_device *ndev, u32 value)
460{
Wang Chen4cf16532008-11-12 23:38:14 -0800461 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700462
463 rnet->msg_enable = value;
464}
465
Jeff Garzik7282d492006-09-13 14:30:00 -0400466static const struct ethtool_ops rionet_ethtool_ops = {
Matt Porterf89efd52005-09-09 12:10:10 -0700467 .get_drvinfo = rionet_get_drvinfo,
468 .get_msglevel = rionet_get_msglevel,
469 .set_msglevel = rionet_set_msglevel,
470 .get_link = ethtool_op_get_link,
471};
472
Alexander Beregalova33a2bb2009-04-15 12:52:56 +0000473static const struct net_device_ops rionet_netdev_ops = {
474 .ndo_open = rionet_open,
475 .ndo_stop = rionet_close,
476 .ndo_start_xmit = rionet_start_xmit,
Alexander Beregalova33a2bb2009-04-15 12:52:56 +0000477 .ndo_validate_addr = eth_validate_addr,
478 .ndo_set_mac_address = eth_mac_addr,
479};
480
Yinglin Luan55caa922011-06-25 18:12:12 +0000481static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
Matt Porterf89efd52005-09-09 12:10:10 -0700482{
483 int rc = 0;
Matt Porterf89efd52005-09-09 12:10:10 -0700484 struct rionet_private *rnet;
Jakub Kicinski5f07da82021-10-21 06:12:13 -0700485 u8 addr[ETH_ALEN];
Matt Porterf89efd52005-09-09 12:10:10 -0700486 u16 device_id;
Akinobu Mitaacc65632012-03-30 01:01:46 +0000487 const size_t rionet_active_bytes = sizeof(void *) *
488 RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
Matt Porterf89efd52005-09-09 12:10:10 -0700489
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700490 nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
491 get_order(rionet_active_bytes));
492 if (!nets[mport->id].active) {
Zhang Weie0423232008-04-18 13:33:42 -0700493 rc = -ENOMEM;
494 goto out;
495 }
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700496 memset((void *)nets[mport->id].active, 0, rionet_active_bytes);
Zhang Weie0423232008-04-18 13:33:42 -0700497
Matt Porterf89efd52005-09-09 12:10:10 -0700498 /* Set up private area */
Wang Chen4cf16532008-11-12 23:38:14 -0800499 rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700500 rnet->mport = mport;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700501 rnet->open = false;
Matt Porterf89efd52005-09-09 12:10:10 -0700502
503 /* Set the default MAC address */
504 device_id = rio_local_get_device_id(mport);
Jakub Kicinski5f07da82021-10-21 06:12:13 -0700505 addr[0] = 0x00;
506 addr[1] = 0x01;
507 addr[2] = 0x00;
508 addr[3] = 0x01;
509 addr[4] = device_id >> 8;
510 addr[5] = device_id & 0xff;
511 eth_hw_addr_set(ndev, addr);
Matt Porterf89efd52005-09-09 12:10:10 -0700512
Alexander Beregalova33a2bb2009-04-15 12:52:56 +0000513 ndev->netdev_ops = &rionet_netdev_ops;
Aurelien Jacquiot92444bb2016-03-22 14:25:45 -0700514 ndev->mtu = RIONET_MAX_MTU;
Jarod Wilsonb3e38932016-10-20 13:55:22 -0400515 /* MTU range: 68 - 4082 */
516 ndev->min_mtu = ETH_MIN_MTU;
517 ndev->max_mtu = RIONET_MAX_MTU;
Matt Porterf89efd52005-09-09 12:10:10 -0700518 ndev->features = NETIF_F_LLTX;
Alexandre Bounine2aaf3082014-04-07 15:38:56 -0700519 SET_NETDEV_DEV(ndev, &mport->dev);
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000520 ndev->ethtool_ops = &rionet_ethtool_ops;
Matt Porterf89efd52005-09-09 12:10:10 -0700521
Matt Porterf89efd52005-09-09 12:10:10 -0700522 spin_lock_init(&rnet->lock);
523 spin_lock_init(&rnet->tx_lock);
524
525 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
526
527 rc = register_netdev(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700528 if (rc != 0) {
529 free_pages((unsigned long)nets[mport->id].active,
530 get_order(rionet_active_bytes));
Matt Porterf89efd52005-09-09 12:10:10 -0700531 goto out;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700532 }
Matt Porterf89efd52005-09-09 12:10:10 -0700533
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700534 printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
Matt Porterf89efd52005-09-09 12:10:10 -0700535 ndev->name,
536 DRV_NAME,
537 DRV_DESC,
538 DRV_VERSION,
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700539 ndev->dev_addr,
540 mport->name);
Matt Porterf89efd52005-09-09 12:10:10 -0700541
542 out:
543 return rc;
544}
545
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700546static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
Matt Porterf89efd52005-09-09 12:10:10 -0700547{
548 int rc = -ENODEV;
Alexandre Bounine284fb682011-08-25 15:59:13 -0700549 u32 lsrc_ops, ldst_ops;
Matt Porterf89efd52005-09-09 12:10:10 -0700550 struct rionet_peer *peer;
Yinglin Luan55caa922011-06-25 18:12:12 +0000551 struct net_device *ndev = NULL;
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700552 struct rio_dev *rdev = to_rio_dev(dev);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700553 unsigned char netid = rdev->net->hport->id;
Matt Porterf89efd52005-09-09 12:10:10 -0700554
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700555 if (netid >= RIONET_MAX_NETS)
556 return rc;
Matt Porterf89efd52005-09-09 12:10:10 -0700557
558 /*
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700559 * If first time through this net, make sure local device is rionet
560 * capable and setup netdev (this step will be skipped in later probes
561 * on the same net).
Matt Porterf89efd52005-09-09 12:10:10 -0700562 */
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700563 if (!nets[netid].ndev) {
Matt Porterf89efd52005-09-09 12:10:10 -0700564 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
565 &lsrc_ops);
566 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
567 &ldst_ops);
Alexandre Bounine284fb682011-08-25 15:59:13 -0700568 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
Matt Porterf89efd52005-09-09 12:10:10 -0700569 printk(KERN_ERR
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700570 "%s: local device %s is not network capable\n",
571 DRV_NAME, rdev->net->hport->name);
Matt Porterf89efd52005-09-09 12:10:10 -0700572 goto out;
573 }
574
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700575 /* Allocate our net_device structure */
576 ndev = alloc_etherdev(sizeof(struct rionet_private));
577 if (ndev == NULL) {
578 rc = -ENOMEM;
579 goto out;
580 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700581
Yinglin Luan55caa922011-06-25 18:12:12 +0000582 rc = rionet_setup_netdev(rdev->net->hport, ndev);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700583 if (rc) {
584 printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
585 DRV_NAME, rc);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700586 free_netdev(ndev);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700587 goto out;
588 }
589
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700590 INIT_LIST_HEAD(&nets[netid].peers);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700591 spin_lock_init(&nets[netid].lock);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700592 nets[netid].nact = 0;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700593 nets[netid].ndev = ndev;
594 }
Matt Porterf89efd52005-09-09 12:10:10 -0700595
596 /*
597 * If the remote device has mailbox/doorbell capabilities,
598 * add it to the peer list.
599 */
600 if (dev_rionet_capable(rdev)) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700601 struct rionet_private *rnet;
602 unsigned long flags;
603
604 rnet = netdev_priv(nets[netid].ndev);
605
606 peer = kzalloc(sizeof(*peer), GFP_KERNEL);
607 if (!peer) {
Matt Porterf89efd52005-09-09 12:10:10 -0700608 rc = -ENOMEM;
609 goto out;
610 }
611 peer->rdev = rdev;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700612 peer->res = rio_request_outb_dbell(peer->rdev,
613 RIONET_DOORBELL_JOIN,
614 RIONET_DOORBELL_LEAVE);
615 if (!peer->res) {
616 pr_err("%s: error requesting doorbells\n", DRV_NAME);
617 kfree(peer);
618 rc = -ENOMEM;
619 goto out;
620 }
621
622 spin_lock_irqsave(&nets[netid].lock, flags);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700623 list_add_tail(&peer->node, &nets[netid].peers);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700624 spin_unlock_irqrestore(&nets[netid].lock, flags);
625 pr_debug("%s: %s add peer %s\n",
626 DRV_NAME, __func__, rio_name(rdev));
627
628 /* If netdev is already opened, send join request to new peer */
629 if (rnet->open)
630 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
Matt Porterf89efd52005-09-09 12:10:10 -0700631 }
632
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700633 return 0;
634out:
Matt Porterf89efd52005-09-09 12:10:10 -0700635 return rc;
636}
637
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700638static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
639 void *unused)
640{
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700641 struct rionet_peer *peer;
642 unsigned long flags;
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700643 int i;
644
645 pr_debug("%s: %s\n", DRV_NAME, __func__);
646
647 for (i = 0; i < RIONET_MAX_NETS; i++) {
648 if (!nets[i].ndev)
649 continue;
650
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700651 spin_lock_irqsave(&nets[i].lock, flags);
652 list_for_each_entry(peer, &nets[i].peers, node) {
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700653 if (nets[i].active[peer->rdev->destid]) {
654 rio_send_doorbell(peer->rdev,
655 RIONET_DOORBELL_LEAVE);
656 nets[i].active[peer->rdev->destid] = NULL;
657 }
658 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700659 spin_unlock_irqrestore(&nets[i].lock, flags);
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700660 }
661
662 return NOTIFY_DONE;
663}
664
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700665static void rionet_remove_mport(struct device *dev,
666 struct class_interface *class_intf)
667{
668 struct rio_mport *mport = to_rio_mport(dev);
669 struct net_device *ndev;
670 int id = mport->id;
671
672 pr_debug("%s %s\n", __func__, mport->name);
673
674 WARN(nets[id].nact, "%s called when connected to %d peers\n",
675 __func__, nets[id].nact);
676 WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
677 __func__);
678
679 if (nets[id].ndev) {
680 ndev = nets[id].ndev;
681 netif_stop_queue(ndev);
682 unregister_netdev(ndev);
683
684 free_pages((unsigned long)nets[id].active,
685 get_order(sizeof(void *) *
686 RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
687 nets[id].active = NULL;
688 free_netdev(ndev);
689 nets[id].ndev = NULL;
690 }
691}
692
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700693#ifdef MODULE
Matt Porterf89efd52005-09-09 12:10:10 -0700694static struct rio_device_id rionet_id_table[] = {
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700695 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
696 { 0, } /* terminate list */
Matt Porterf89efd52005-09-09 12:10:10 -0700697};
698
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700699MODULE_DEVICE_TABLE(rapidio, rionet_id_table);
700#endif
701
702static struct subsys_interface rionet_interface = {
703 .name = "rionet",
704 .subsys = &rio_bus_type,
705 .add_dev = rionet_add_dev,
706 .remove_dev = rionet_remove_dev,
Matt Porterf89efd52005-09-09 12:10:10 -0700707};
708
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700709static struct notifier_block rionet_notifier = {
710 .notifier_call = rionet_shutdown,
711};
712
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700713/* the rio_mport_interface is used to handle local mport devices */
714static struct class_interface rio_mport_interface __refdata = {
715 .class = &rio_mport_class,
716 .add_dev = NULL,
717 .remove_dev = rionet_remove_mport,
718};
719
Matt Porterf89efd52005-09-09 12:10:10 -0700720static int __init rionet_init(void)
721{
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700722 int ret;
723
724 ret = register_reboot_notifier(&rionet_notifier);
725 if (ret) {
726 pr_err("%s: failed to register reboot notifier (err=%d)\n",
727 DRV_NAME, ret);
728 return ret;
729 }
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700730
731 ret = class_interface_register(&rio_mport_interface);
732 if (ret) {
733 pr_err("%s: class_interface_register error: %d\n",
734 DRV_NAME, ret);
735 return ret;
736 }
737
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700738 return subsys_interface_register(&rionet_interface);
Matt Porterf89efd52005-09-09 12:10:10 -0700739}
740
741static void __exit rionet_exit(void)
742{
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700743 unregister_reboot_notifier(&rionet_notifier);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700744 subsys_interface_unregister(&rionet_interface);
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700745 class_interface_unregister(&rio_mport_interface);
Matt Porterf89efd52005-09-09 12:10:10 -0700746}
747
Alexandre Bounine2f809982011-03-23 16:43:04 -0700748late_initcall(rionet_init);
Matt Porterf89efd52005-09-09 12:10:10 -0700749module_exit(rionet_exit);