blob: a5bab614ff8459788493297bdcaa897106f7f1ba [file] [log] [blame]
Jon Mason548c2372012-11-16 19:27:13 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Mason548c2372012-11-16 19:27:13 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040017 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Mason548c2372012-11-16 19:27:13 -070018 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
Allen Hubbee26a5842015-04-09 10:33:20 -040045 * PCIe NTB Network Linux driver
Jon Mason548c2372012-11-16 19:27:13 -070046 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
50#include <linux/etherdevice.h>
51#include <linux/ethtool.h>
52#include <linux/module.h>
53#include <linux/pci.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040054#include <linux/ntb.h>
Allen Hubbeec110bc2015-05-07 06:45:21 -040055#include <linux/ntb_transport.h>
Jon Mason548c2372012-11-16 19:27:13 -070056
Jon Mason24208bb2013-01-19 02:02:35 -070057#define NTB_NETDEV_VER "0.7"
Jon Mason548c2372012-11-16 19:27:13 -070058
59MODULE_DESCRIPTION(KBUILD_MODNAME);
60MODULE_VERSION(NTB_NETDEV_VER);
61MODULE_LICENSE("Dual BSD/GPL");
62MODULE_AUTHOR("Intel Corporation");
63
Dave Jiange74bfee2015-07-13 08:07:17 -040064/* Time in usecs for tx resource reaper */
65static unsigned int tx_time = 1;
66
67/* Number of descriptors to free before resuming tx */
68static unsigned int tx_start = 10;
69
70/* Number of descriptors still available before stop upper layer tx */
71static unsigned int tx_stop = 5;
72
Jon Mason548c2372012-11-16 19:27:13 -070073struct ntb_netdev {
Jon Mason548c2372012-11-16 19:27:13 -070074 struct pci_dev *pdev;
75 struct net_device *ndev;
76 struct ntb_transport_qp *qp;
Dave Jiange74bfee2015-07-13 08:07:17 -040077 struct timer_list tx_timer;
Jon Mason548c2372012-11-16 19:27:13 -070078};
79
80#define NTB_TX_TIMEOUT_MS 1000
81#define NTB_RXQ_SIZE 100
82
Allen Hubbee26a5842015-04-09 10:33:20 -040083static void ntb_netdev_event_handler(void *data, int link_is_up)
Jon Mason548c2372012-11-16 19:27:13 -070084{
85 struct net_device *ndev = data;
86 struct ntb_netdev *dev = netdev_priv(ndev);
87
Allen Hubbee26a5842015-04-09 10:33:20 -040088 netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
Jon Mason548c2372012-11-16 19:27:13 -070089 ntb_transport_link_query(dev->qp));
90
Allen Hubbee26a5842015-04-09 10:33:20 -040091 if (link_is_up) {
92 if (ntb_transport_link_query(dev->qp))
93 netif_carrier_on(ndev);
94 } else {
Jon Mason548c2372012-11-16 19:27:13 -070095 netif_carrier_off(ndev);
Jon Mason403c63c2013-07-29 16:31:18 -070096 }
Jon Mason548c2372012-11-16 19:27:13 -070097}
98
99static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
100 void *data, int len)
101{
102 struct net_device *ndev = qp_data;
103 struct sk_buff *skb;
104 int rc;
105
106 skb = data;
107 if (!skb)
108 return;
109
110 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
111
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400112 if (len < 0) {
113 ndev->stats.rx_errors++;
114 ndev->stats.rx_length_errors++;
115 goto enqueue_again;
116 }
117
Jon Mason548c2372012-11-16 19:27:13 -0700118 skb_put(skb, len);
119 skb->protocol = eth_type_trans(skb, ndev);
120 skb->ip_summed = CHECKSUM_NONE;
121
122 if (netif_rx(skb) == NET_RX_DROP) {
123 ndev->stats.rx_errors++;
124 ndev->stats.rx_dropped++;
125 } else {
126 ndev->stats.rx_packets++;
127 ndev->stats.rx_bytes += len;
128 }
129
130 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
131 if (!skb) {
132 ndev->stats.rx_errors++;
133 ndev->stats.rx_frame_errors++;
134 return;
135 }
136
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400137enqueue_again:
Jon Mason548c2372012-11-16 19:27:13 -0700138 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
139 if (rc) {
Jon Mason765ccc72013-01-19 02:02:31 -0700140 dev_kfree_skb(skb);
Jon Mason548c2372012-11-16 19:27:13 -0700141 ndev->stats.rx_errors++;
142 ndev->stats.rx_fifo_errors++;
143 }
144}
145
Dave Jiange74bfee2015-07-13 08:07:17 -0400146static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
147 struct ntb_transport_qp *qp, int size)
148{
149 struct ntb_netdev *dev = netdev_priv(netdev);
150
151 netif_stop_queue(netdev);
152 /* Make sure to see the latest value of ntb_transport_tx_free_entry()
153 * since the queue was last started.
154 */
155 smp_mb();
156
157 if (likely(ntb_transport_tx_free_entry(qp) < size)) {
158 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
159 return -EBUSY;
160 }
161
162 netif_start_queue(netdev);
163 return 0;
164}
165
166static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
167 struct ntb_transport_qp *qp, int size)
168{
169 if (netif_queue_stopped(ndev) ||
170 (ntb_transport_tx_free_entry(qp) >= size))
171 return 0;
172
173 return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
174}
175
Jon Mason548c2372012-11-16 19:27:13 -0700176static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
177 void *data, int len)
178{
179 struct net_device *ndev = qp_data;
180 struct sk_buff *skb;
Dave Jiange74bfee2015-07-13 08:07:17 -0400181 struct ntb_netdev *dev = netdev_priv(ndev);
Jon Mason548c2372012-11-16 19:27:13 -0700182
183 skb = data;
184 if (!skb || !ndev)
185 return;
186
187 if (len > 0) {
188 ndev->stats.tx_packets++;
189 ndev->stats.tx_bytes += skb->len;
190 } else {
191 ndev->stats.tx_errors++;
192 ndev->stats.tx_aborted_errors++;
193 }
194
195 dev_kfree_skb(skb);
Dave Jiange74bfee2015-07-13 08:07:17 -0400196
197 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
198 /* Make sure anybody stopping the queue after this sees the new
199 * value of ntb_transport_tx_free_entry()
200 */
201 smp_mb();
202 if (netif_queue_stopped(ndev))
203 netif_wake_queue(ndev);
204 }
Jon Mason548c2372012-11-16 19:27:13 -0700205}
206
207static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
208 struct net_device *ndev)
209{
210 struct ntb_netdev *dev = netdev_priv(ndev);
211 int rc;
212
Dave Jiange74bfee2015-07-13 08:07:17 -0400213 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
214
Jon Mason548c2372012-11-16 19:27:13 -0700215 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
216 if (rc)
217 goto err;
218
Dave Jiange74bfee2015-07-13 08:07:17 -0400219 /* check for next submit */
220 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
221
Jon Mason548c2372012-11-16 19:27:13 -0700222 return NETDEV_TX_OK;
223
224err:
225 ndev->stats.tx_dropped++;
226 ndev->stats.tx_errors++;
Jon Mason548c2372012-11-16 19:27:13 -0700227 return NETDEV_TX_BUSY;
228}
229
Kees Cook2fd2f612017-10-30 14:05:12 -0700230static void ntb_netdev_tx_timer(struct timer_list *t)
Dave Jiange74bfee2015-07-13 08:07:17 -0400231{
Kees Cook2fd2f612017-10-30 14:05:12 -0700232 struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
233 struct net_device *ndev = dev->ndev;
Dave Jiange74bfee2015-07-13 08:07:17 -0400234
235 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
Jon Masona8615942018-06-11 16:13:12 -0400236 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
Dave Jiange74bfee2015-07-13 08:07:17 -0400237 } else {
238 /* Make sure anybody stopping the queue after this sees the new
239 * value of ntb_transport_tx_free_entry()
240 */
241 smp_mb();
242 if (netif_queue_stopped(ndev))
243 netif_wake_queue(ndev);
244 }
245}
246
Jon Mason548c2372012-11-16 19:27:13 -0700247static int ntb_netdev_open(struct net_device *ndev)
248{
249 struct ntb_netdev *dev = netdev_priv(ndev);
250 struct sk_buff *skb;
251 int rc, i, len;
252
253 /* Add some empty rx bufs */
254 for (i = 0; i < NTB_RXQ_SIZE; i++) {
255 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
256 if (!skb) {
257 rc = -ENOMEM;
258 goto err;
259 }
260
261 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
262 ndev->mtu + ETH_HLEN);
Dave Jiangda4eb272015-07-13 08:07:10 -0400263 if (rc) {
Jon Masone8bc2ebd2013-11-22 16:50:57 -0700264 dev_kfree_skb(skb);
Jon Mason548c2372012-11-16 19:27:13 -0700265 goto err;
Jon Masone8bc2ebd2013-11-22 16:50:57 -0700266 }
Jon Mason548c2372012-11-16 19:27:13 -0700267 }
268
Kees Cook2fd2f612017-10-30 14:05:12 -0700269 timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0);
Dave Jiange74bfee2015-07-13 08:07:17 -0400270
Jon Mason548c2372012-11-16 19:27:13 -0700271 netif_carrier_off(ndev);
272 ntb_transport_link_up(dev->qp);
Dave Jiange74bfee2015-07-13 08:07:17 -0400273 netif_start_queue(ndev);
Jon Mason548c2372012-11-16 19:27:13 -0700274
275 return 0;
276
277err:
278 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
279 dev_kfree_skb(skb);
280 return rc;
281}
282
283static int ntb_netdev_close(struct net_device *ndev)
284{
285 struct ntb_netdev *dev = netdev_priv(ndev);
286 struct sk_buff *skb;
287 int len;
288
289 ntb_transport_link_down(dev->qp);
290
291 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
292 dev_kfree_skb(skb);
293
Dave Jiange74bfee2015-07-13 08:07:17 -0400294 del_timer_sync(&dev->tx_timer);
295
Jon Mason548c2372012-11-16 19:27:13 -0700296 return 0;
297}
298
299static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
300{
301 struct ntb_netdev *dev = netdev_priv(ndev);
302 struct sk_buff *skb;
303 int len, rc;
304
305 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
306 return -EINVAL;
307
308 if (!netif_running(ndev)) {
309 ndev->mtu = new_mtu;
310 return 0;
311 }
312
313 /* Bring down the link and dispose of posted rx entries */
314 ntb_transport_link_down(dev->qp);
315
316 if (ndev->mtu < new_mtu) {
317 int i;
318
319 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
320 dev_kfree_skb(skb);
321
322 for (; i; i--) {
323 skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
324 if (!skb) {
325 rc = -ENOMEM;
326 goto err;
327 }
328
329 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
330 new_mtu + ETH_HLEN);
331 if (rc) {
332 dev_kfree_skb(skb);
333 goto err;
334 }
335 }
336 }
337
338 ndev->mtu = new_mtu;
339
340 ntb_transport_link_up(dev->qp);
341
342 return 0;
343
344err:
345 ntb_transport_link_down(dev->qp);
346
347 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
348 dev_kfree_skb(skb);
349
350 netdev_err(ndev, "Error changing MTU, device inoperable\n");
351 return rc;
352}
353
Jon Mason548c2372012-11-16 19:27:13 -0700354static const struct net_device_ops ntb_netdev_ops = {
355 .ndo_open = ntb_netdev_open,
356 .ndo_stop = ntb_netdev_close,
357 .ndo_start_xmit = ntb_netdev_start_xmit,
358 .ndo_change_mtu = ntb_netdev_change_mtu,
Jon Mason548c2372012-11-16 19:27:13 -0700359 .ndo_set_mac_address = eth_mac_addr,
360};
361
362static void ntb_get_drvinfo(struct net_device *ndev,
363 struct ethtool_drvinfo *info)
364{
365 struct ntb_netdev *dev = netdev_priv(ndev);
366
367 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
368 strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
369 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
370}
371
Philippe Reynesa062d192017-03-09 23:10:13 +0100372static int ntb_get_link_ksettings(struct net_device *dev,
373 struct ethtool_link_ksettings *cmd)
Jon Mason548c2372012-11-16 19:27:13 -0700374{
Philippe Reynesa062d192017-03-09 23:10:13 +0100375 ethtool_link_ksettings_zero_link_mode(cmd, supported);
376 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
377 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
378 ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
379
380 cmd->base.speed = SPEED_UNKNOWN;
381 cmd->base.duplex = DUPLEX_FULL;
382 cmd->base.port = PORT_OTHER;
383 cmd->base.phy_address = 0;
384 cmd->base.autoneg = AUTONEG_ENABLE;
Jon Mason548c2372012-11-16 19:27:13 -0700385
386 return 0;
387}
388
389static const struct ethtool_ops ntb_ethtool_ops = {
390 .get_drvinfo = ntb_get_drvinfo,
391 .get_link = ethtool_op_get_link,
Philippe Reynesa062d192017-03-09 23:10:13 +0100392 .get_link_ksettings = ntb_get_link_ksettings,
Jon Mason548c2372012-11-16 19:27:13 -0700393};
394
395static const struct ntb_queue_handlers ntb_netdev_handlers = {
396 .tx_handler = ntb_netdev_tx_handler,
397 .rx_handler = ntb_netdev_rx_handler,
398 .event_handler = ntb_netdev_event_handler,
399};
400
Allen Hubbee26a5842015-04-09 10:33:20 -0400401static int ntb_netdev_probe(struct device *client_dev)
Jon Mason548c2372012-11-16 19:27:13 -0700402{
Allen Hubbee26a5842015-04-09 10:33:20 -0400403 struct ntb_dev *ntb;
Jon Mason548c2372012-11-16 19:27:13 -0700404 struct net_device *ndev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400405 struct pci_dev *pdev;
Jon Mason548c2372012-11-16 19:27:13 -0700406 struct ntb_netdev *dev;
407 int rc;
408
Allen Hubbee26a5842015-04-09 10:33:20 -0400409 ntb = dev_ntb(client_dev->parent);
410 pdev = ntb->pdev;
411 if (!pdev)
412 return -ENODEV;
413
414 ndev = alloc_etherdev(sizeof(*dev));
Jon Mason548c2372012-11-16 19:27:13 -0700415 if (!ndev)
416 return -ENOMEM;
417
Logan Gunthorpe854b1dd2017-06-05 14:00:51 -0600418 SET_NETDEV_DEV(ndev, client_dev);
419
Jon Mason548c2372012-11-16 19:27:13 -0700420 dev = netdev_priv(ndev);
421 dev->ndev = ndev;
422 dev->pdev = pdev;
Jon Mason548c2372012-11-16 19:27:13 -0700423 ndev->features = NETIF_F_HIGHDMA;
424
425 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
426
427 ndev->hw_features = ndev->features;
428 ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
429
Joe Perches6c1f0a12018-06-22 10:51:00 -0700430 eth_random_addr(ndev->perm_addr);
Jon Mason548c2372012-11-16 19:27:13 -0700431 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
432
433 ndev->netdev_ops = &ntb_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000434 ndev->ethtool_ops = &ntb_ethtool_ops;
Jon Mason548c2372012-11-16 19:27:13 -0700435
Jarod Wilson91572082016-10-20 13:55:20 -0400436 ndev->min_mtu = 0;
437 ndev->max_mtu = ETH_MAX_MTU;
438
Allen Hubbee26a5842015-04-09 10:33:20 -0400439 dev->qp = ntb_transport_create_queue(ndev, client_dev,
440 &ntb_netdev_handlers);
Jon Mason548c2372012-11-16 19:27:13 -0700441 if (!dev->qp) {
442 rc = -EIO;
443 goto err;
444 }
445
446 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
447
448 rc = register_netdev(ndev);
449 if (rc)
450 goto err1;
451
Aaron Sierra906e86b2018-10-15 15:32:47 -0500452 dev_set_drvdata(client_dev, ndev);
Jon Mason7bcd2b12013-01-19 02:02:34 -0700453 dev_info(&pdev->dev, "%s created\n", ndev->name);
Jon Mason548c2372012-11-16 19:27:13 -0700454 return 0;
455
456err1:
457 ntb_transport_free_queue(dev->qp);
458err:
459 free_netdev(ndev);
460 return rc;
461}
462
Allen Hubbee26a5842015-04-09 10:33:20 -0400463static void ntb_netdev_remove(struct device *client_dev)
Jon Mason548c2372012-11-16 19:27:13 -0700464{
Aaron Sierra906e86b2018-10-15 15:32:47 -0500465 struct net_device *ndev = dev_get_drvdata(client_dev);
466 struct ntb_netdev *dev = netdev_priv(ndev);
Jon Mason548c2372012-11-16 19:27:13 -0700467
468 unregister_netdev(ndev);
469 ntb_transport_free_queue(dev->qp);
470 free_netdev(ndev);
471}
472
Allen Hubbee26a5842015-04-09 10:33:20 -0400473static struct ntb_transport_client ntb_netdev_client = {
Jon Mason548c2372012-11-16 19:27:13 -0700474 .driver.name = KBUILD_MODNAME,
475 .driver.owner = THIS_MODULE,
476 .probe = ntb_netdev_probe,
477 .remove = ntb_netdev_remove,
478};
479
480static int __init ntb_netdev_init_module(void)
481{
482 int rc;
483
Allen Hubbee26a5842015-04-09 10:33:20 -0400484 rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
Jon Mason548c2372012-11-16 19:27:13 -0700485 if (rc)
486 return rc;
Allen Hubbeec110bc2015-05-07 06:45:21 -0400487 return ntb_transport_register_client(&ntb_netdev_client);
Jon Mason548c2372012-11-16 19:27:13 -0700488}
489module_init(ntb_netdev_init_module);
490
491static void __exit ntb_netdev_exit_module(void)
492{
Allen Hubbeec110bc2015-05-07 06:45:21 -0400493 ntb_transport_unregister_client(&ntb_netdev_client);
Allen Hubbee26a5842015-04-09 10:33:20 -0400494 ntb_transport_unregister_client_dev(KBUILD_MODNAME);
Jon Mason548c2372012-11-16 19:27:13 -0700495}
496module_exit(ntb_netdev_exit_module);