blob: 1662b7b144a879aa74434cd28ef8ca8c19d917c1 [file] [log] [blame]
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000022#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
Woojung.Huh@microchip.combdfba55e2015-09-16 23:41:07 +000033#include <linux/microchipphy.h>
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000034#include "lan78xx.h"
35
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx"
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +000039#define DRIVER_VERSION "1.0.1"
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000040
41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8)
43#define UNLINK_TIMEOUT_MS 3
44
45#define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47#define SS_USB_PKT_SIZE (1024)
48#define HS_USB_PKT_SIZE (512)
49#define FS_USB_PKT_SIZE (64)
50
51#define MAX_RX_FIFO_SIZE (12 * 1024)
52#define MAX_TX_FIFO_SIZE (12 * 1024)
53#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54#define DEFAULT_BULK_IN_DELAY (0x0800)
55#define MAX_SINGLE_PACKET_SIZE (9000)
56#define DEFAULT_TX_CSUM_ENABLE (true)
57#define DEFAULT_RX_CSUM_ENABLE (true)
58#define DEFAULT_TSO_CSUM_ENABLE (true)
59#define DEFAULT_VLAN_FILTER_ENABLE (true)
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000060#define TX_OVERHEAD (8)
61#define RXW_PADDING 2
62
63#define LAN78XX_USB_VENDOR_ID (0x0424)
64#define LAN7800_USB_PRODUCT_ID (0x7800)
65#define LAN7850_USB_PRODUCT_ID (0x7850)
66#define LAN78XX_EEPROM_MAGIC (0x78A5)
67#define LAN78XX_OTP_MAGIC (0x78F3)
68
69#define MII_READ 1
70#define MII_WRITE 0
71
72#define EEPROM_INDICATOR (0xA5)
73#define EEPROM_MAC_OFFSET (0x01)
74#define MAX_EEPROM_SIZE 512
75#define OTP_INDICATOR_1 (0xF3)
76#define OTP_INDICATOR_2 (0xF7)
77
78#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
81
82/* USB related defines */
83#define BULK_IN_PIPE 1
84#define BULK_OUT_PIPE 2
85
86/* default autosuspend delay (mSec)*/
87#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88
89static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 "RX FCS Errors",
91 "RX Alignment Errors",
92 "Rx Fragment Errors",
93 "RX Jabber Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
96 "RX Dropped Frames",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
100 "RX Unicast Frames",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
103 "RX Pause Frames",
104 "RX 64 Byte Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
112 "EEE RX LPI Time",
113 "TX FCS Errors",
114 "TX Excess Deferral Errors",
115 "TX Carrier Errors",
116 "TX Bad Byte Count",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
124 "TX Unicast Frames",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
127 "TX Pause Frames",
128 "TX 64 Byte Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
136 "EEE TX LPI Time",
137};
138
139struct lan78xx_statstage {
140 u32 rx_fcs_errors;
141 u32 rx_alignment_errors;
142 u32 rx_fragment_errors;
143 u32 rx_jabber_errors;
144 u32 rx_undersize_frame_errors;
145 u32 rx_oversize_frame_errors;
146 u32 rx_dropped_frames;
147 u32 rx_unicast_byte_count;
148 u32 rx_broadcast_byte_count;
149 u32 rx_multicast_byte_count;
150 u32 rx_unicast_frames;
151 u32 rx_broadcast_frames;
152 u32 rx_multicast_frames;
153 u32 rx_pause_frames;
154 u32 rx_64_byte_frames;
155 u32 rx_65_127_byte_frames;
156 u32 rx_128_255_byte_frames;
157 u32 rx_256_511_bytes_frames;
158 u32 rx_512_1023_byte_frames;
159 u32 rx_1024_1518_byte_frames;
160 u32 rx_greater_1518_byte_frames;
161 u32 eee_rx_lpi_transitions;
162 u32 eee_rx_lpi_time;
163 u32 tx_fcs_errors;
164 u32 tx_excess_deferral_errors;
165 u32 tx_carrier_errors;
166 u32 tx_bad_byte_count;
167 u32 tx_single_collisions;
168 u32 tx_multiple_collisions;
169 u32 tx_excessive_collision;
170 u32 tx_late_collisions;
171 u32 tx_unicast_byte_count;
172 u32 tx_broadcast_byte_count;
173 u32 tx_multicast_byte_count;
174 u32 tx_unicast_frames;
175 u32 tx_broadcast_frames;
176 u32 tx_multicast_frames;
177 u32 tx_pause_frames;
178 u32 tx_64_byte_frames;
179 u32 tx_65_127_byte_frames;
180 u32 tx_128_255_byte_frames;
181 u32 tx_256_511_bytes_frames;
182 u32 tx_512_1023_byte_frames;
183 u32 tx_1024_1518_byte_frames;
184 u32 tx_greater_1518_byte_frames;
185 u32 eee_tx_lpi_transitions;
186 u32 eee_tx_lpi_time;
187};
188
189struct lan78xx_net;
190
191struct lan78xx_priv {
192 struct lan78xx_net *dev;
193 u32 rfe_ctl;
194 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 struct mutex dataport_mutex; /* for dataport access */
198 spinlock_t rfe_ctl_lock; /* for rfe register access */
199 struct work_struct set_multicast;
200 struct work_struct set_vlan;
201 u32 wol;
202};
203
204enum skb_state {
205 illegal = 0,
206 tx_start,
207 tx_done,
208 rx_start,
209 rx_done,
210 rx_cleanup,
211 unlink_start
212};
213
214struct skb_data { /* skb->cb is one of these */
215 struct urb *urb;
216 struct lan78xx_net *dev;
217 enum skb_state state;
218 size_t length;
219};
220
221struct usb_context {
222 struct usb_ctrlrequest req;
223 struct lan78xx_net *dev;
224};
225
226#define EVENT_TX_HALT 0
227#define EVENT_RX_HALT 1
228#define EVENT_RX_MEMORY 2
229#define EVENT_STS_SPLIT 3
230#define EVENT_LINK_RESET 4
231#define EVENT_RX_PAUSED 5
232#define EVENT_DEV_WAKING 6
233#define EVENT_DEV_ASLEEP 7
234#define EVENT_DEV_OPEN 8
235
236struct lan78xx_net {
237 struct net_device *net;
238 struct usb_device *udev;
239 struct usb_interface *intf;
240 void *driver_priv;
241
242 int rx_qlen;
243 int tx_qlen;
244 struct sk_buff_head rxq;
245 struct sk_buff_head txq;
246 struct sk_buff_head done;
247 struct sk_buff_head rxq_pause;
248 struct sk_buff_head txq_pend;
249
250 struct tasklet_struct bh;
251 struct delayed_work wq;
252
253 struct usb_host_endpoint *ep_blkin;
254 struct usb_host_endpoint *ep_blkout;
255 struct usb_host_endpoint *ep_intr;
256
257 int msg_enable;
258
259 struct urb *urb_intr;
260 struct usb_anchor deferred;
261
262 struct mutex phy_mutex; /* for phy access */
263 unsigned pipe_in, pipe_out, pipe_intr;
264
265 u32 hard_mtu; /* count any extra framing */
266 size_t rx_urb_size; /* size for rx urbs */
267
268 unsigned long flags;
269
270 wait_queue_head_t *wait;
271 unsigned char suspend_count;
272
273 unsigned maxpacket;
274 struct timer_list delay;
275
276 unsigned long data[5];
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000277
278 int link_on;
279 u8 mdix_ctrl;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000280
281 u32 devid;
282 struct mii_bus *mdiobus;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000283};
284
285/* use ethtool to change the level for any given device */
286static int msg_level = -1;
287module_param(msg_level, int, 0);
288MODULE_PARM_DESC(msg_level, "Override default message level");
289
290static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291{
292 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293 int ret;
294
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000295 if (!buf)
296 return -ENOMEM;
297
298 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER,
300 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302 if (likely(ret >= 0)) {
303 le32_to_cpus(buf);
304 *data = *buf;
305 } else {
306 netdev_warn(dev->net,
307 "Failed to read register index 0x%08x. ret = %d",
308 index, ret);
309 }
310
311 kfree(buf);
312
313 return ret;
314}
315
316static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317{
318 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319 int ret;
320
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000321 if (!buf)
322 return -ENOMEM;
323
324 *buf = data;
325 cpu_to_le32s(buf);
326
327 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER,
329 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331 if (unlikely(ret < 0)) {
332 netdev_warn(dev->net,
333 "Failed to write register index 0x%08x. ret = %d",
334 index, ret);
335 }
336
337 kfree(buf);
338
339 return ret;
340}
341
342static int lan78xx_read_stats(struct lan78xx_net *dev,
343 struct lan78xx_statstage *data)
344{
345 int ret = 0;
346 int i;
347 struct lan78xx_statstage *stats;
348 u32 *src;
349 u32 *dst;
350
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000351 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352 if (!stats)
353 return -ENOMEM;
354
355 ret = usb_control_msg(dev->udev,
356 usb_rcvctrlpipe(dev->udev, 0),
357 USB_VENDOR_REQUEST_GET_STATS,
358 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359 0,
360 0,
361 (void *)stats,
362 sizeof(*stats),
363 USB_CTRL_SET_TIMEOUT);
364 if (likely(ret >= 0)) {
365 src = (u32 *)stats;
366 dst = (u32 *)data;
367 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368 le32_to_cpus(&src[i]);
369 dst[i] = src[i];
370 }
371 } else {
372 netdev_warn(dev->net,
373 "Failed to read stat ret = 0x%x", ret);
374 }
375
376 kfree(stats);
377
378 return ret;
379}
380
381/* Loop until the read is completed with timeout called with phy_mutex held */
382static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383{
384 unsigned long start_time = jiffies;
385 u32 val;
386 int ret;
387
388 do {
389 ret = lan78xx_read_reg(dev, MII_ACC, &val);
390 if (unlikely(ret < 0))
391 return -EIO;
392
393 if (!(val & MII_ACC_MII_BUSY_))
394 return 0;
395 } while (!time_after(jiffies, start_time + HZ));
396
397 return -EIO;
398}
399
400static inline u32 mii_access(int id, int index, int read)
401{
402 u32 ret;
403
404 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406 if (read)
407 ret |= MII_ACC_MII_READ_;
408 else
409 ret |= MII_ACC_MII_WRITE_;
410 ret |= MII_ACC_MII_BUSY_;
411
412 return ret;
413}
414
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000415static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416{
417 unsigned long start_time = jiffies;
418 u32 val;
419 int ret;
420
421 do {
422 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423 if (unlikely(ret < 0))
424 return -EIO;
425
426 if (!(val & E2P_CMD_EPC_BUSY_) ||
427 (val & E2P_CMD_EPC_TIMEOUT_))
428 break;
429 usleep_range(40, 100);
430 } while (!time_after(jiffies, start_time + HZ));
431
432 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433 netdev_warn(dev->net, "EEPROM read operation timeout");
434 return -EIO;
435 }
436
437 return 0;
438}
439
440static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441{
442 unsigned long start_time = jiffies;
443 u32 val;
444 int ret;
445
446 do {
447 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448 if (unlikely(ret < 0))
449 return -EIO;
450
451 if (!(val & E2P_CMD_EPC_BUSY_))
452 return 0;
453
454 usleep_range(40, 100);
455 } while (!time_after(jiffies, start_time + HZ));
456
457 netdev_warn(dev->net, "EEPROM is busy");
458 return -EIO;
459}
460
461static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462 u32 length, u8 *data)
463{
464 u32 val;
465 int i, ret;
466
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000467 ret = lan78xx_eeprom_confirm_not_busy(dev);
468 if (ret)
469 return ret;
470
471 for (i = 0; i < length; i++) {
472 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474 ret = lan78xx_write_reg(dev, E2P_CMD, val);
475 if (unlikely(ret < 0))
476 return -EIO;
477
478 ret = lan78xx_wait_eeprom(dev);
479 if (ret < 0)
480 return ret;
481
482 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483 if (unlikely(ret < 0))
484 return -EIO;
485
486 data[i] = val & 0xFF;
487 offset++;
488 }
489
490 return 0;
491}
492
493static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
494 u32 length, u8 *data)
495{
496 u8 sig;
497 int ret;
498
499 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
500 if ((ret == 0) && (sig == EEPROM_INDICATOR))
501 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
502 else
503 ret = -EINVAL;
504
505 return ret;
506}
507
508static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509 u32 length, u8 *data)
510{
511 u32 val;
512 int i, ret;
513
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000514 ret = lan78xx_eeprom_confirm_not_busy(dev);
515 if (ret)
516 return ret;
517
518 /* Issue write/erase enable command */
519 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520 ret = lan78xx_write_reg(dev, E2P_CMD, val);
521 if (unlikely(ret < 0))
522 return -EIO;
523
524 ret = lan78xx_wait_eeprom(dev);
525 if (ret < 0)
526 return ret;
527
528 for (i = 0; i < length; i++) {
529 /* Fill data register */
530 val = data[i];
531 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532 if (ret < 0)
533 return ret;
534
535 /* Send "write" command */
536 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539 if (ret < 0)
540 return ret;
541
542 ret = lan78xx_wait_eeprom(dev);
543 if (ret < 0)
544 return ret;
545
546 offset++;
547 }
548
549 return 0;
550}
551
552static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
553 u32 length, u8 *data)
554{
555 int i;
556 int ret;
557 u32 buf;
558 unsigned long timeout;
559
560 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
561
562 if (buf & OTP_PWR_DN_PWRDN_N_) {
563 /* clear it and wait to be cleared */
564 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
565
566 timeout = jiffies + HZ;
567 do {
568 usleep_range(1, 10);
569 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
570 if (time_after(jiffies, timeout)) {
571 netdev_warn(dev->net,
572 "timeout on OTP_PWR_DN");
573 return -EIO;
574 }
575 } while (buf & OTP_PWR_DN_PWRDN_N_);
576 }
577
578 for (i = 0; i < length; i++) {
579 ret = lan78xx_write_reg(dev, OTP_ADDR1,
580 ((offset + i) >> 8) & OTP_ADDR1_15_11);
581 ret = lan78xx_write_reg(dev, OTP_ADDR2,
582 ((offset + i) & OTP_ADDR2_10_3));
583
584 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
585 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
586
587 timeout = jiffies + HZ;
588 do {
589 udelay(1);
590 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
591 if (time_after(jiffies, timeout)) {
592 netdev_warn(dev->net,
593 "timeout on OTP_STATUS");
594 return -EIO;
595 }
596 } while (buf & OTP_STATUS_BUSY_);
597
598 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
599
600 data[i] = (u8)(buf & 0xFF);
601 }
602
603 return 0;
604}
605
606static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
607 u32 length, u8 *data)
608{
609 u8 sig;
610 int ret;
611
612 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
613
614 if (ret == 0) {
615 if (sig == OTP_INDICATOR_1)
616 offset = offset;
617 else if (sig == OTP_INDICATOR_2)
618 offset += 0x100;
619 else
620 ret = -EINVAL;
621 ret = lan78xx_read_raw_otp(dev, offset, length, data);
622 }
623
624 return ret;
625}
626
627static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
628{
629 int i, ret;
630
631 for (i = 0; i < 100; i++) {
632 u32 dp_sel;
633
634 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
635 if (unlikely(ret < 0))
636 return -EIO;
637
638 if (dp_sel & DP_SEL_DPRDY_)
639 return 0;
640
641 usleep_range(40, 100);
642 }
643
644 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
645
646 return -EIO;
647}
648
649static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
650 u32 addr, u32 length, u32 *buf)
651{
652 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
653 u32 dp_sel;
654 int i, ret;
655
656 if (usb_autopm_get_interface(dev->intf) < 0)
657 return 0;
658
659 mutex_lock(&pdata->dataport_mutex);
660
661 ret = lan78xx_dataport_wait_not_busy(dev);
662 if (ret < 0)
663 goto done;
664
665 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
666
667 dp_sel &= ~DP_SEL_RSEL_MASK_;
668 dp_sel |= ram_select;
669 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
670
671 for (i = 0; i < length; i++) {
672 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
673
674 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
675
676 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
677
678 ret = lan78xx_dataport_wait_not_busy(dev);
679 if (ret < 0)
680 goto done;
681 }
682
683done:
684 mutex_unlock(&pdata->dataport_mutex);
685 usb_autopm_put_interface(dev->intf);
686
687 return ret;
688}
689
690static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
691 int index, u8 addr[ETH_ALEN])
692{
693 u32 temp;
694
695 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
696 temp = addr[3];
697 temp = addr[2] | (temp << 8);
698 temp = addr[1] | (temp << 8);
699 temp = addr[0] | (temp << 8);
700 pdata->pfilter_table[index][1] = temp;
701 temp = addr[5];
702 temp = addr[4] | (temp << 8);
703 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
704 pdata->pfilter_table[index][0] = temp;
705 }
706}
707
708/* returns hash bit number for given MAC address */
709static inline u32 lan78xx_hash(char addr[ETH_ALEN])
710{
711 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
712}
713
714static void lan78xx_deferred_multicast_write(struct work_struct *param)
715{
716 struct lan78xx_priv *pdata =
717 container_of(param, struct lan78xx_priv, set_multicast);
718 struct lan78xx_net *dev = pdata->dev;
719 int i;
720 int ret;
721
722 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
723 pdata->rfe_ctl);
724
725 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
726 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
727
728 for (i = 1; i < NUM_OF_MAF; i++) {
729 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
730 ret = lan78xx_write_reg(dev, MAF_LO(i),
731 pdata->pfilter_table[i][1]);
732 ret = lan78xx_write_reg(dev, MAF_HI(i),
733 pdata->pfilter_table[i][0]);
734 }
735
736 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
737}
738
739static void lan78xx_set_multicast(struct net_device *netdev)
740{
741 struct lan78xx_net *dev = netdev_priv(netdev);
742 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
743 unsigned long flags;
744 int i;
745
746 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
747
748 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
749 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
750
751 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
752 pdata->mchash_table[i] = 0;
753 /* pfilter_table[0] has own HW address */
754 for (i = 1; i < NUM_OF_MAF; i++) {
755 pdata->pfilter_table[i][0] =
756 pdata->pfilter_table[i][1] = 0;
757 }
758
759 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
760
761 if (dev->net->flags & IFF_PROMISC) {
762 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
763 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
764 } else {
765 if (dev->net->flags & IFF_ALLMULTI) {
766 netif_dbg(dev, drv, dev->net,
767 "receive all multicast enabled");
768 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
769 }
770 }
771
772 if (netdev_mc_count(dev->net)) {
773 struct netdev_hw_addr *ha;
774 int i;
775
776 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
777
778 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
779
780 i = 1;
781 netdev_for_each_mc_addr(ha, netdev) {
782 /* set first 32 into Perfect Filter */
783 if (i < 33) {
784 lan78xx_set_addr_filter(pdata, i, ha->addr);
785 } else {
786 u32 bitnum = lan78xx_hash(ha->addr);
787
788 pdata->mchash_table[bitnum / 32] |=
789 (1 << (bitnum % 32));
790 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
791 }
792 i++;
793 }
794 }
795
796 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
797
798 /* defer register writes to a sleepable context */
799 schedule_work(&pdata->set_multicast);
800}
801
802static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
803 u16 lcladv, u16 rmtadv)
804{
805 u32 flow = 0, fct_flow = 0;
806 int ret;
807
808 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
809
810 if (cap & FLOW_CTRL_TX)
811 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
812
813 if (cap & FLOW_CTRL_RX)
814 flow |= FLOW_CR_RX_FCEN_;
815
816 if (dev->udev->speed == USB_SPEED_SUPER)
817 fct_flow = 0x817;
818 else if (dev->udev->speed == USB_SPEED_HIGH)
819 fct_flow = 0x211;
820
821 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
822 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
823 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
824
825 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
826
827 /* threshold value should be set before enabling flow */
828 ret = lan78xx_write_reg(dev, FLOW, flow);
829
830 return 0;
831}
832
833static int lan78xx_link_reset(struct lan78xx_net *dev)
834{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000835 struct phy_device *phydev = dev->net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000836 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
Geert Uytterhoeven99c79ec2015-09-04 12:47:28 +0200837 int ladv, radv, ret;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000838 u32 buf;
839
840 /* clear PHY interrupt status */
Woojung.Huh@microchip.combdfba55e2015-09-16 23:41:07 +0000841 ret = phy_read(phydev, LAN88XX_INT_STS);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000842 if (unlikely(ret < 0))
843 return -EIO;
844
845 /* clear LAN78xx interrupt status */
846 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
847 if (unlikely(ret < 0))
848 return -EIO;
849
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000850 phy_read_status(phydev);
851
852 if (!phydev->link && dev->link_on) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000853 dev->link_on = false;
854 netif_carrier_off(dev->net);
855
856 /* reset MAC */
857 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
858 if (unlikely(ret < 0))
859 return -EIO;
860 buf |= MAC_CR_RST_;
861 ret = lan78xx_write_reg(dev, MAC_CR, buf);
862 if (unlikely(ret < 0))
863 return -EIO;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000864 } else if (phydev->link && !dev->link_on) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000865 dev->link_on = true;
866
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000867 phy_ethtool_gset(phydev, &ecmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000868
Woojung.Huh@microchip.combdfba55e2015-09-16 23:41:07 +0000869 ret = phy_read(phydev, LAN88XX_INT_STS);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000870
871 if (dev->udev->speed == USB_SPEED_SUPER) {
872 if (ethtool_cmd_speed(&ecmd) == 1000) {
873 /* disable U2 */
874 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
875 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
876 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
877 /* enable U1 */
878 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
879 buf |= USB_CFG1_DEV_U1_INIT_EN_;
880 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
881 } else {
882 /* enable U1 & U2 */
883 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
884 buf |= USB_CFG1_DEV_U2_INIT_EN_;
885 buf |= USB_CFG1_DEV_U1_INIT_EN_;
886 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
887 }
888 }
889
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000890 ladv = phy_read(phydev, MII_ADVERTISE);
Geert Uytterhoeven99c79ec2015-09-04 12:47:28 +0200891 if (ladv < 0)
892 return ladv;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000893
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000894 radv = phy_read(phydev, MII_LPA);
Geert Uytterhoeven99c79ec2015-09-04 12:47:28 +0200895 if (radv < 0)
896 return radv;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000897
898 netif_dbg(dev, link, dev->net,
899 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
900 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
901
902 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
903 netif_carrier_on(dev->net);
904 }
905
906 return ret;
907}
908
909/* some work can't be done in tasklets, so we use keventd
910 *
911 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
912 * but tasklet_schedule() doesn't. hope the failure is rare.
913 */
914void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915{
916 set_bit(work, &dev->flags);
917 if (!schedule_delayed_work(&dev->wq, 0))
918 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919}
920
921static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922{
923 u32 intdata;
924
925 if (urb->actual_length != 4) {
926 netdev_warn(dev->net,
927 "unexpected urb length %d", urb->actual_length);
928 return;
929 }
930
931 memcpy(&intdata, urb->transfer_buffer, 4);
932 le32_to_cpus(&intdata);
933
934 if (intdata & INT_ENP_PHY_INT) {
935 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937 } else
938 netdev_warn(dev->net,
939 "unexpected interrupt: 0x%08x\n", intdata);
940}
941
942static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943{
944 return MAX_EEPROM_SIZE;
945}
946
947static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948 struct ethtool_eeprom *ee, u8 *data)
949{
950 struct lan78xx_net *dev = netdev_priv(netdev);
951
952 ee->magic = LAN78XX_EEPROM_MAGIC;
953
954 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955}
956
957static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958 struct ethtool_eeprom *ee, u8 *data)
959{
960 struct lan78xx_net *dev = netdev_priv(netdev);
961
962 /* Allow entire eeprom update only */
963 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964 (ee->offset == 0) &&
965 (ee->len == 512) &&
966 (data[0] == EEPROM_INDICATOR))
967 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969 (ee->offset == 0) &&
970 (ee->len == 512) &&
971 (data[0] == OTP_INDICATOR_1))
972 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973
974 return -EINVAL;
975}
976
977static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978 u8 *data)
979{
980 if (stringset == ETH_SS_STATS)
981 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982}
983
984static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985{
986 if (sset == ETH_SS_STATS)
987 return ARRAY_SIZE(lan78xx_gstrings);
988 else
989 return -EOPNOTSUPP;
990}
991
992static void lan78xx_get_stats(struct net_device *netdev,
993 struct ethtool_stats *stats, u64 *data)
994{
995 struct lan78xx_net *dev = netdev_priv(netdev);
996 struct lan78xx_statstage lan78xx_stat;
997 u32 *p;
998 int i;
999
1000 if (usb_autopm_get_interface(dev->intf) < 0)
1001 return;
1002
1003 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004 p = (u32 *)&lan78xx_stat;
1005 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006 data[i] = p[i];
1007 }
1008
1009 usb_autopm_put_interface(dev->intf);
1010}
1011
1012static void lan78xx_get_wol(struct net_device *netdev,
1013 struct ethtool_wolinfo *wol)
1014{
1015 struct lan78xx_net *dev = netdev_priv(netdev);
1016 int ret;
1017 u32 buf;
1018 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019
1020 if (usb_autopm_get_interface(dev->intf) < 0)
1021 return;
1022
1023 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024 if (unlikely(ret < 0)) {
1025 wol->supported = 0;
1026 wol->wolopts = 0;
1027 } else {
1028 if (buf & USB_CFG_RMT_WKP_) {
1029 wol->supported = WAKE_ALL;
1030 wol->wolopts = pdata->wol;
1031 } else {
1032 wol->supported = 0;
1033 wol->wolopts = 0;
1034 }
1035 }
1036
1037 usb_autopm_put_interface(dev->intf);
1038}
1039
1040static int lan78xx_set_wol(struct net_device *netdev,
1041 struct ethtool_wolinfo *wol)
1042{
1043 struct lan78xx_net *dev = netdev_priv(netdev);
1044 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045 int ret;
1046
1047 ret = usb_autopm_get_interface(dev->intf);
1048 if (ret < 0)
1049 return ret;
1050
1051 pdata->wol = 0;
1052 if (wol->wolopts & WAKE_UCAST)
1053 pdata->wol |= WAKE_UCAST;
1054 if (wol->wolopts & WAKE_MCAST)
1055 pdata->wol |= WAKE_MCAST;
1056 if (wol->wolopts & WAKE_BCAST)
1057 pdata->wol |= WAKE_BCAST;
1058 if (wol->wolopts & WAKE_MAGIC)
1059 pdata->wol |= WAKE_MAGIC;
1060 if (wol->wolopts & WAKE_PHY)
1061 pdata->wol |= WAKE_PHY;
1062 if (wol->wolopts & WAKE_ARP)
1063 pdata->wol |= WAKE_ARP;
1064
1065 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1066
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001067 phy_ethtool_set_wol(netdev->phydev, wol);
1068
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001069 usb_autopm_put_interface(dev->intf);
1070
1071 return ret;
1072}
1073
1074static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1075{
1076 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001077 struct phy_device *phydev = net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001078 int ret;
1079 u32 buf;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001080
1081 ret = usb_autopm_get_interface(dev->intf);
1082 if (ret < 0)
1083 return ret;
1084
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001085 ret = phy_ethtool_get_eee(phydev, edata);
1086 if (ret < 0)
1087 goto exit;
1088
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001089 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1090 if (buf & MAC_CR_EEE_EN_) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001091 edata->eee_enabled = true;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001092 edata->eee_active = !!(edata->advertised &
1093 edata->lp_advertised);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001094 edata->tx_lpi_enabled = true;
1095 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1096 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1097 edata->tx_lpi_timer = buf;
1098 } else {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001099 edata->eee_enabled = false;
1100 edata->eee_active = false;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001101 edata->tx_lpi_enabled = false;
1102 edata->tx_lpi_timer = 0;
1103 }
1104
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001105 ret = 0;
1106exit:
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001107 usb_autopm_put_interface(dev->intf);
1108
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001109 return ret;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001110}
1111
1112static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1113{
1114 struct lan78xx_net *dev = netdev_priv(net);
1115 int ret;
1116 u32 buf;
1117
1118 ret = usb_autopm_get_interface(dev->intf);
1119 if (ret < 0)
1120 return ret;
1121
1122 if (edata->eee_enabled) {
1123 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124 buf |= MAC_CR_EEE_EN_;
1125 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001127 phy_ethtool_set_eee(net->phydev, edata);
1128
1129 buf = (u32)edata->tx_lpi_timer;
1130 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001131 } else {
1132 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1133 buf &= ~MAC_CR_EEE_EN_;
1134 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1135 }
1136
1137 usb_autopm_put_interface(dev->intf);
1138
1139 return 0;
1140}
1141
1142static u32 lan78xx_get_link(struct net_device *net)
1143{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001144 phy_read_status(net->phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001145
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001146 return net->phydev->link;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001147}
1148
1149int lan78xx_nway_reset(struct net_device *net)
1150{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001151 return phy_start_aneg(net->phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001152}
1153
1154static void lan78xx_get_drvinfo(struct net_device *net,
1155 struct ethtool_drvinfo *info)
1156{
1157 struct lan78xx_net *dev = netdev_priv(net);
1158
1159 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1160 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1161 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1162}
1163
1164static u32 lan78xx_get_msglevel(struct net_device *net)
1165{
1166 struct lan78xx_net *dev = netdev_priv(net);
1167
1168 return dev->msg_enable;
1169}
1170
1171static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1172{
1173 struct lan78xx_net *dev = netdev_priv(net);
1174
1175 dev->msg_enable = level;
1176}
1177
Woojung.Huh@microchip.com758c5c12015-09-16 23:41:14 +00001178static int lan78xx_get_mdix_status(struct net_device *net)
1179{
1180 struct phy_device *phydev = net->phydev;
1181 int buf;
1182
1183 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1184 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1185 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1186
1187 return buf;
1188}
1189
1190static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1191{
1192 struct lan78xx_net *dev = netdev_priv(net);
1193 struct phy_device *phydev = net->phydev;
1194 int buf;
1195
1196 if (mdix_ctrl == ETH_TP_MDI) {
1197 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198 LAN88XX_EXT_PAGE_SPACE_1);
1199 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1203 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204 LAN88XX_EXT_PAGE_SPACE_0);
1205 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1206 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207 LAN88XX_EXT_PAGE_SPACE_1);
1208 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1212 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213 LAN88XX_EXT_PAGE_SPACE_0);
1214 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1215 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1216 LAN88XX_EXT_PAGE_SPACE_1);
1217 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1218 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1219 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1220 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1221 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1222 LAN88XX_EXT_PAGE_SPACE_0);
1223 }
1224 dev->mdix_ctrl = mdix_ctrl;
1225}
1226
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001227static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1228{
1229 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001230 struct phy_device *phydev = net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001231 int ret;
1232 int buf;
1233
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001234 ret = usb_autopm_get_interface(dev->intf);
1235 if (ret < 0)
1236 return ret;
1237
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001238 ret = phy_ethtool_gset(phydev, cmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001239
Woojung.Huh@microchip.com758c5c12015-09-16 23:41:14 +00001240 buf = lan78xx_get_mdix_status(net);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001241
Woojung.Huh@microchip.combdfba55e2015-09-16 23:41:07 +00001242 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1243 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001244 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1245 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
Woojung.Huh@microchip.combdfba55e2015-09-16 23:41:07 +00001246 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001247 cmd->eth_tp_mdix = ETH_TP_MDI;
1248 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
Woojung.Huh@microchip.combdfba55e2015-09-16 23:41:07 +00001249 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001250 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1251 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1252 }
1253
1254 usb_autopm_put_interface(dev->intf);
1255
1256 return ret;
1257}
1258
1259static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1260{
1261 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001262 struct phy_device *phydev = net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001263 int ret = 0;
1264 int temp;
1265
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001266 ret = usb_autopm_get_interface(dev->intf);
1267 if (ret < 0)
1268 return ret;
1269
1270 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
Woojung.Huh@microchip.com758c5c12015-09-16 23:41:14 +00001271 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001272 }
1273
1274 /* change speed & duplex */
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001275 ret = phy_ethtool_sset(phydev, cmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001276
1277 if (!cmd->autoneg) {
1278 /* force link down */
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001279 temp = phy_read(phydev, MII_BMCR);
1280 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001281 mdelay(1);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001282 phy_write(phydev, MII_BMCR, temp);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001283 }
1284
1285 usb_autopm_put_interface(dev->intf);
1286
1287 return ret;
1288}
1289
1290static const struct ethtool_ops lan78xx_ethtool_ops = {
1291 .get_link = lan78xx_get_link,
1292 .nway_reset = lan78xx_nway_reset,
1293 .get_drvinfo = lan78xx_get_drvinfo,
1294 .get_msglevel = lan78xx_get_msglevel,
1295 .set_msglevel = lan78xx_set_msglevel,
1296 .get_settings = lan78xx_get_settings,
1297 .set_settings = lan78xx_set_settings,
1298 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1299 .get_eeprom = lan78xx_ethtool_get_eeprom,
1300 .set_eeprom = lan78xx_ethtool_set_eeprom,
1301 .get_ethtool_stats = lan78xx_get_stats,
1302 .get_sset_count = lan78xx_get_sset_count,
1303 .get_strings = lan78xx_get_strings,
1304 .get_wol = lan78xx_get_wol,
1305 .set_wol = lan78xx_set_wol,
1306 .get_eee = lan78xx_get_eee,
1307 .set_eee = lan78xx_set_eee,
1308};
1309
1310static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1311{
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001312 if (!netif_running(netdev))
1313 return -EINVAL;
1314
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001315 return phy_mii_ioctl(netdev->phydev, rq, cmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001316}
1317
1318static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1319{
1320 u32 addr_lo, addr_hi;
1321 int ret;
1322 u8 addr[6];
1323
1324 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1325 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1326
1327 addr[0] = addr_lo & 0xFF;
1328 addr[1] = (addr_lo >> 8) & 0xFF;
1329 addr[2] = (addr_lo >> 16) & 0xFF;
1330 addr[3] = (addr_lo >> 24) & 0xFF;
1331 addr[4] = addr_hi & 0xFF;
1332 addr[5] = (addr_hi >> 8) & 0xFF;
1333
1334 if (!is_valid_ether_addr(addr)) {
1335 /* reading mac address from EEPROM or OTP */
1336 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1337 addr) == 0) ||
1338 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1339 addr) == 0)) {
1340 if (is_valid_ether_addr(addr)) {
1341 /* eeprom values are valid so use them */
1342 netif_dbg(dev, ifup, dev->net,
1343 "MAC address read from EEPROM");
1344 } else {
1345 /* generate random MAC */
1346 random_ether_addr(addr);
1347 netif_dbg(dev, ifup, dev->net,
1348 "MAC address set to random addr");
1349 }
1350
1351 addr_lo = addr[0] | (addr[1] << 8) |
1352 (addr[2] << 16) | (addr[3] << 24);
1353 addr_hi = addr[4] | (addr[5] << 8);
1354
1355 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1356 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1357 } else {
1358 /* generate random MAC */
1359 random_ether_addr(addr);
1360 netif_dbg(dev, ifup, dev->net,
1361 "MAC address set to random addr");
1362 }
1363 }
1364
1365 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1366 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1367
1368 ether_addr_copy(dev->net->dev_addr, addr);
1369}
1370
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001371/* MDIO read and write wrappers for phylib */
1372static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001373{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001374 struct lan78xx_net *dev = bus->priv;
1375 u32 val, addr;
1376 int ret;
1377
1378 ret = usb_autopm_get_interface(dev->intf);
1379 if (ret < 0)
1380 return ret;
1381
1382 mutex_lock(&dev->phy_mutex);
1383
1384 /* confirm MII not busy */
1385 ret = lan78xx_phy_wait_not_busy(dev);
1386 if (ret < 0)
1387 goto done;
1388
1389 /* set the address, index & direction (read from PHY) */
1390 addr = mii_access(phy_id, idx, MII_READ);
1391 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1392
1393 ret = lan78xx_phy_wait_not_busy(dev);
1394 if (ret < 0)
1395 goto done;
1396
1397 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1398
1399 ret = (int)(val & 0xFFFF);
1400
1401done:
1402 mutex_unlock(&dev->phy_mutex);
1403 usb_autopm_put_interface(dev->intf);
1404 return ret;
1405}
1406
1407static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1408 u16 regval)
1409{
1410 struct lan78xx_net *dev = bus->priv;
1411 u32 val, addr;
1412 int ret;
1413
1414 ret = usb_autopm_get_interface(dev->intf);
1415 if (ret < 0)
1416 return ret;
1417
1418 mutex_lock(&dev->phy_mutex);
1419
1420 /* confirm MII not busy */
1421 ret = lan78xx_phy_wait_not_busy(dev);
1422 if (ret < 0)
1423 goto done;
1424
1425 val = (u32)regval;
1426 ret = lan78xx_write_reg(dev, MII_DATA, val);
1427
1428 /* set the address, index & direction (write to PHY) */
1429 addr = mii_access(phy_id, idx, MII_WRITE);
1430 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1431
1432 ret = lan78xx_phy_wait_not_busy(dev);
1433 if (ret < 0)
1434 goto done;
1435
1436done:
1437 mutex_unlock(&dev->phy_mutex);
1438 usb_autopm_put_interface(dev->intf);
1439 return 0;
1440}
1441
1442static int lan78xx_mdio_init(struct lan78xx_net *dev)
1443{
1444 int ret;
1445 int i;
1446
1447 dev->mdiobus = mdiobus_alloc();
1448 if (!dev->mdiobus) {
1449 netdev_err(dev->net, "can't allocate MDIO bus\n");
1450 return -ENOMEM;
1451 }
1452
1453 dev->mdiobus->priv = (void *)dev;
1454 dev->mdiobus->read = lan78xx_mdiobus_read;
1455 dev->mdiobus->write = lan78xx_mdiobus_write;
1456 dev->mdiobus->name = "lan78xx-mdiobus";
1457
1458 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1459 dev->udev->bus->busnum, dev->udev->devnum);
1460
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001461 /* handle our own interrupt */
1462 for (i = 0; i < PHY_MAX_ADDR; i++)
1463 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1464
1465 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1466 case 0x78000000:
1467 case 0x78500000:
1468 /* set to internal PHY id */
1469 dev->mdiobus->phy_mask = ~(1 << 1);
1470 break;
1471 }
1472
1473 ret = mdiobus_register(dev->mdiobus);
1474 if (ret) {
1475 netdev_err(dev->net, "can't register MDIO bus\n");
Andrew Lunne7f4dc32016-01-06 20:11:15 +01001476 goto exit1;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001477 }
1478
1479 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1480 return 0;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001481exit1:
1482 mdiobus_free(dev->mdiobus);
1483 return ret;
1484}
1485
1486static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1487{
1488 mdiobus_unregister(dev->mdiobus);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001489 mdiobus_free(dev->mdiobus);
1490}
1491
1492static void lan78xx_link_status_change(struct net_device *net)
1493{
1494 /* nothing to do */
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001495}
1496
1497static int lan78xx_phy_init(struct lan78xx_net *dev)
1498{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001499 int ret;
1500 struct phy_device *phydev = dev->net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001501
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001502 phydev = phy_find_first(dev->mdiobus);
1503 if (!phydev) {
1504 netdev_err(dev->net, "no PHY found\n");
1505 return -EIO;
1506 }
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001507
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001508 ret = phy_connect_direct(dev->net, phydev,
1509 lan78xx_link_status_change,
1510 PHY_INTERFACE_MODE_GMII);
1511 if (ret) {
1512 netdev_err(dev->net, "can't attach PHY to %s\n",
1513 dev->mdiobus->id);
1514 return -EIO;
1515 }
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001516
1517 /* set to AUTOMDIX */
Woojung.Huh@microchip.com758c5c12015-09-16 23:41:14 +00001518 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001519
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001520 /* MAC doesn't support 1000T Half */
1521 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1522 phydev->supported |= (SUPPORTED_10baseT_Half |
1523 SUPPORTED_10baseT_Full |
1524 SUPPORTED_100baseT_Half |
1525 SUPPORTED_100baseT_Full |
1526 SUPPORTED_1000baseT_Full |
1527 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1528 genphy_config_aneg(phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001529
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001530 /* Workaround to enable PHY interrupt.
1531 * phy_start_interrupts() is API for requesting and enabling
1532 * PHY interrupt. However, USB-to-Ethernet device can't use
1533 * request_irq() called in phy_start_interrupts().
1534 * Set PHY to PHY_HALTED and call phy_start()
1535 * to make a call to phy_enable_interrupts()
1536 */
1537 phy_stop(phydev);
1538 phy_start(phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001539
1540 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1541
1542 return 0;
1543}
1544
1545static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1546{
1547 int ret = 0;
1548 u32 buf;
1549 bool rxenabled;
1550
1551 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1552
1553 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1554
1555 if (rxenabled) {
1556 buf &= ~MAC_RX_RXEN_;
1557 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1558 }
1559
1560 /* add 4 to size for FCS */
1561 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1562 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1563
1564 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1565
1566 if (rxenabled) {
1567 buf |= MAC_RX_RXEN_;
1568 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1569 }
1570
1571 return 0;
1572}
1573
1574static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1575{
1576 struct sk_buff *skb;
1577 unsigned long flags;
1578 int count = 0;
1579
1580 spin_lock_irqsave(&q->lock, flags);
1581 while (!skb_queue_empty(q)) {
1582 struct skb_data *entry;
1583 struct urb *urb;
1584 int ret;
1585
1586 skb_queue_walk(q, skb) {
1587 entry = (struct skb_data *)skb->cb;
1588 if (entry->state != unlink_start)
1589 goto found;
1590 }
1591 break;
1592found:
1593 entry->state = unlink_start;
1594 urb = entry->urb;
1595
1596 /* Get reference count of the URB to avoid it to be
1597 * freed during usb_unlink_urb, which may trigger
1598 * use-after-free problem inside usb_unlink_urb since
1599 * usb_unlink_urb is always racing with .complete
1600 * handler(include defer_bh).
1601 */
1602 usb_get_urb(urb);
1603 spin_unlock_irqrestore(&q->lock, flags);
1604 /* during some PM-driven resume scenarios,
1605 * these (async) unlinks complete immediately
1606 */
1607 ret = usb_unlink_urb(urb);
1608 if (ret != -EINPROGRESS && ret != 0)
1609 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1610 else
1611 count++;
1612 usb_put_urb(urb);
1613 spin_lock_irqsave(&q->lock, flags);
1614 }
1615 spin_unlock_irqrestore(&q->lock, flags);
1616 return count;
1617}
1618
1619static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1620{
1621 struct lan78xx_net *dev = netdev_priv(netdev);
1622 int ll_mtu = new_mtu + netdev->hard_header_len;
1623 int old_hard_mtu = dev->hard_mtu;
1624 int old_rx_urb_size = dev->rx_urb_size;
1625 int ret;
1626
1627 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1628 return -EINVAL;
1629
1630 if (new_mtu <= 0)
1631 return -EINVAL;
1632 /* no second zero-length packet read wanted after mtu-sized packets */
1633 if ((ll_mtu % dev->maxpacket) == 0)
1634 return -EDOM;
1635
1636 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1637
1638 netdev->mtu = new_mtu;
1639
1640 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1641 if (dev->rx_urb_size == old_hard_mtu) {
1642 dev->rx_urb_size = dev->hard_mtu;
1643 if (dev->rx_urb_size > old_rx_urb_size) {
1644 if (netif_running(dev->net)) {
1645 unlink_urbs(dev, &dev->rxq);
1646 tasklet_schedule(&dev->bh);
1647 }
1648 }
1649 }
1650
1651 return 0;
1652}
1653
1654int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1655{
1656 struct lan78xx_net *dev = netdev_priv(netdev);
1657 struct sockaddr *addr = p;
1658 u32 addr_lo, addr_hi;
1659 int ret;
1660
1661 if (netif_running(netdev))
1662 return -EBUSY;
1663
1664 if (!is_valid_ether_addr(addr->sa_data))
1665 return -EADDRNOTAVAIL;
1666
1667 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1668
1669 addr_lo = netdev->dev_addr[0] |
1670 netdev->dev_addr[1] << 8 |
1671 netdev->dev_addr[2] << 16 |
1672 netdev->dev_addr[3] << 24;
1673 addr_hi = netdev->dev_addr[4] |
1674 netdev->dev_addr[5] << 8;
1675
1676 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1677 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1678
1679 return 0;
1680}
1681
1682/* Enable or disable Rx checksum offload engine */
1683static int lan78xx_set_features(struct net_device *netdev,
1684 netdev_features_t features)
1685{
1686 struct lan78xx_net *dev = netdev_priv(netdev);
1687 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1688 unsigned long flags;
1689 int ret;
1690
1691 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1692
1693 if (features & NETIF_F_RXCSUM) {
1694 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1695 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1696 } else {
1697 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1698 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1699 }
1700
1701 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1702 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1703 else
1704 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1705
1706 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1707
1708 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1709
1710 return 0;
1711}
1712
1713static void lan78xx_deferred_vlan_write(struct work_struct *param)
1714{
1715 struct lan78xx_priv *pdata =
1716 container_of(param, struct lan78xx_priv, set_vlan);
1717 struct lan78xx_net *dev = pdata->dev;
1718
1719 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1720 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1721}
1722
1723static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1724 __be16 proto, u16 vid)
1725{
1726 struct lan78xx_net *dev = netdev_priv(netdev);
1727 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1728 u16 vid_bit_index;
1729 u16 vid_dword_index;
1730
1731 vid_dword_index = (vid >> 5) & 0x7F;
1732 vid_bit_index = vid & 0x1F;
1733
1734 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1735
1736 /* defer register writes to a sleepable context */
1737 schedule_work(&pdata->set_vlan);
1738
1739 return 0;
1740}
1741
1742static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1743 __be16 proto, u16 vid)
1744{
1745 struct lan78xx_net *dev = netdev_priv(netdev);
1746 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1747 u16 vid_bit_index;
1748 u16 vid_dword_index;
1749
1750 vid_dword_index = (vid >> 5) & 0x7F;
1751 vid_bit_index = vid & 0x1F;
1752
1753 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1754
1755 /* defer register writes to a sleepable context */
1756 schedule_work(&pdata->set_vlan);
1757
1758 return 0;
1759}
1760
1761static void lan78xx_init_ltm(struct lan78xx_net *dev)
1762{
1763 int ret;
1764 u32 buf;
1765 u32 regs[6] = { 0 };
1766
1767 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1768 if (buf & USB_CFG1_LTM_ENABLE_) {
1769 u8 temp[2];
1770 /* Get values from EEPROM first */
1771 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1772 if (temp[0] == 24) {
1773 ret = lan78xx_read_raw_eeprom(dev,
1774 temp[1] * 2,
1775 24,
1776 (u8 *)regs);
1777 if (ret < 0)
1778 return;
1779 }
1780 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1781 if (temp[0] == 24) {
1782 ret = lan78xx_read_raw_otp(dev,
1783 temp[1] * 2,
1784 24,
1785 (u8 *)regs);
1786 if (ret < 0)
1787 return;
1788 }
1789 }
1790 }
1791
1792 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1793 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1794 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1795 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1796 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1797 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1798}
1799
1800static int lan78xx_reset(struct lan78xx_net *dev)
1801{
1802 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1803 u32 buf;
1804 int ret = 0;
1805 unsigned long timeout;
1806
1807 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1808 buf |= HW_CFG_LRST_;
1809 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1810
1811 timeout = jiffies + HZ;
1812 do {
1813 mdelay(1);
1814 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1815 if (time_after(jiffies, timeout)) {
1816 netdev_warn(dev->net,
1817 "timeout on completion of LiteReset");
1818 return -EIO;
1819 }
1820 } while (buf & HW_CFG_LRST_);
1821
1822 lan78xx_init_mac_address(dev);
1823
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001824 /* save DEVID for later usage */
1825 ret = lan78xx_read_reg(dev, ID_REV, &buf);
1826 dev->devid = buf;
1827
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001828 /* Respond to the IN token with a NAK */
1829 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1830 buf |= USB_CFG_BIR_;
1831 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1832
1833 /* Init LTM */
1834 lan78xx_init_ltm(dev);
1835
1836 dev->net->hard_header_len += TX_OVERHEAD;
1837 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1838
1839 if (dev->udev->speed == USB_SPEED_SUPER) {
1840 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1841 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1842 dev->rx_qlen = 4;
1843 dev->tx_qlen = 4;
1844 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1845 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1846 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1847 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1848 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1849 } else {
1850 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1851 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1852 dev->rx_qlen = 4;
1853 }
1854
1855 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1856 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1857
1858 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1859 buf |= HW_CFG_MEF_;
1860 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1861
1862 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1863 buf |= USB_CFG_BCE_;
1864 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1865
1866 /* set FIFO sizes */
1867 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1868 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1869
1870 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1871 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1872
1873 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1874 ret = lan78xx_write_reg(dev, FLOW, 0);
1875 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1876
1877 /* Don't need rfe_ctl_lock during initialisation */
1878 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1879 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1880 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1881
1882 /* Enable or disable checksum offload engines */
1883 lan78xx_set_features(dev->net, dev->net->features);
1884
1885 lan78xx_set_multicast(dev->net);
1886
1887 /* reset PHY */
1888 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1889 buf |= PMT_CTL_PHY_RST_;
1890 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1891
1892 timeout = jiffies + HZ;
1893 do {
1894 mdelay(1);
1895 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1896 if (time_after(jiffies, timeout)) {
1897 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1898 return -EIO;
1899 }
Woojung.Huh@microchip.com6c595b02015-09-16 23:40:39 +00001900 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001901
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001902 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001903 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001904 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1905
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001906 /* enable PHY interrupts */
1907 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1908 buf |= INT_ENP_PHY_INT;
1909 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1910
1911 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1912 buf |= MAC_TX_TXEN_;
1913 ret = lan78xx_write_reg(dev, MAC_TX, buf);
1914
1915 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1916 buf |= FCT_TX_CTL_EN_;
1917 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1918
1919 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1920
1921 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1922 buf |= MAC_RX_RXEN_;
1923 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1924
1925 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1926 buf |= FCT_RX_CTL_EN_;
1927 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1928
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001929 return 0;
1930}
1931
1932static int lan78xx_open(struct net_device *net)
1933{
1934 struct lan78xx_net *dev = netdev_priv(net);
1935 int ret;
1936
1937 ret = usb_autopm_get_interface(dev->intf);
1938 if (ret < 0)
1939 goto out;
1940
1941 ret = lan78xx_reset(dev);
1942 if (ret < 0)
1943 goto done;
1944
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001945 ret = lan78xx_phy_init(dev);
1946 if (ret < 0)
1947 goto done;
1948
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001949 /* for Link Check */
1950 if (dev->urb_intr) {
1951 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1952 if (ret < 0) {
1953 netif_err(dev, ifup, dev->net,
1954 "intr submit %d\n", ret);
1955 goto done;
1956 }
1957 }
1958
1959 set_bit(EVENT_DEV_OPEN, &dev->flags);
1960
1961 netif_start_queue(net);
1962
1963 dev->link_on = false;
1964
1965 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1966done:
1967 usb_autopm_put_interface(dev->intf);
1968
1969out:
1970 return ret;
1971}
1972
1973static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1974{
1975 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1976 DECLARE_WAITQUEUE(wait, current);
1977 int temp;
1978
1979 /* ensure there are no more active urbs */
1980 add_wait_queue(&unlink_wakeup, &wait);
1981 set_current_state(TASK_UNINTERRUPTIBLE);
1982 dev->wait = &unlink_wakeup;
1983 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1984
1985 /* maybe wait for deletions to finish. */
1986 while (!skb_queue_empty(&dev->rxq) &&
1987 !skb_queue_empty(&dev->txq) &&
1988 !skb_queue_empty(&dev->done)) {
1989 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1990 set_current_state(TASK_UNINTERRUPTIBLE);
1991 netif_dbg(dev, ifdown, dev->net,
1992 "waited for %d urb completions\n", temp);
1993 }
1994 set_current_state(TASK_RUNNING);
1995 dev->wait = NULL;
1996 remove_wait_queue(&unlink_wakeup, &wait);
1997}
1998
1999int lan78xx_stop(struct net_device *net)
2000{
2001 struct lan78xx_net *dev = netdev_priv(net);
2002
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00002003 phy_stop(net->phydev);
2004 phy_disconnect(net->phydev);
2005 net->phydev = NULL;
2006
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002007 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2008 netif_stop_queue(net);
2009
2010 netif_info(dev, ifdown, dev->net,
2011 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2012 net->stats.rx_packets, net->stats.tx_packets,
2013 net->stats.rx_errors, net->stats.tx_errors);
2014
2015 lan78xx_terminate_urbs(dev);
2016
2017 usb_kill_urb(dev->urb_intr);
2018
2019 skb_queue_purge(&dev->rxq_pause);
2020
2021 /* deferred work (task, timer, softirq) must also stop.
2022 * can't flush_scheduled_work() until we drop rtnl (later),
2023 * else workers could deadlock; so make workers a NOP.
2024 */
2025 dev->flags = 0;
2026 cancel_delayed_work_sync(&dev->wq);
2027 tasklet_kill(&dev->bh);
2028
2029 usb_autopm_put_interface(dev->intf);
2030
2031 return 0;
2032}
2033
2034static int lan78xx_linearize(struct sk_buff *skb)
2035{
2036 return skb_linearize(skb);
2037}
2038
2039static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2040 struct sk_buff *skb, gfp_t flags)
2041{
2042 u32 tx_cmd_a, tx_cmd_b;
2043
2044 if (skb_headroom(skb) < TX_OVERHEAD) {
2045 struct sk_buff *skb2;
2046
2047 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2048 dev_kfree_skb_any(skb);
2049 skb = skb2;
2050 if (!skb)
2051 return NULL;
2052 }
2053
2054 if (lan78xx_linearize(skb) < 0)
2055 return NULL;
2056
2057 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2058
2059 if (skb->ip_summed == CHECKSUM_PARTIAL)
2060 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2061
2062 tx_cmd_b = 0;
2063 if (skb_is_gso(skb)) {
2064 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2065
2066 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2067
2068 tx_cmd_a |= TX_CMD_A_LSO_;
2069 }
2070
2071 if (skb_vlan_tag_present(skb)) {
2072 tx_cmd_a |= TX_CMD_A_IVTG_;
2073 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2074 }
2075
2076 skb_push(skb, 4);
2077 cpu_to_le32s(&tx_cmd_b);
2078 memcpy(skb->data, &tx_cmd_b, 4);
2079
2080 skb_push(skb, 4);
2081 cpu_to_le32s(&tx_cmd_a);
2082 memcpy(skb->data, &tx_cmd_a, 4);
2083
2084 return skb;
2085}
2086
2087static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2088 struct sk_buff_head *list, enum skb_state state)
2089{
2090 unsigned long flags;
2091 enum skb_state old_state;
2092 struct skb_data *entry = (struct skb_data *)skb->cb;
2093
2094 spin_lock_irqsave(&list->lock, flags);
2095 old_state = entry->state;
2096 entry->state = state;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002097
2098 __skb_unlink(skb, list);
2099 spin_unlock(&list->lock);
2100 spin_lock(&dev->done.lock);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002101
2102 __skb_queue_tail(&dev->done, skb);
2103 if (skb_queue_len(&dev->done) == 1)
2104 tasklet_schedule(&dev->bh);
2105 spin_unlock_irqrestore(&dev->done.lock, flags);
2106
2107 return old_state;
2108}
2109
2110static void tx_complete(struct urb *urb)
2111{
2112 struct sk_buff *skb = (struct sk_buff *)urb->context;
2113 struct skb_data *entry = (struct skb_data *)skb->cb;
2114 struct lan78xx_net *dev = entry->dev;
2115
2116 if (urb->status == 0) {
2117 dev->net->stats.tx_packets++;
2118 dev->net->stats.tx_bytes += entry->length;
2119 } else {
2120 dev->net->stats.tx_errors++;
2121
2122 switch (urb->status) {
2123 case -EPIPE:
2124 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2125 break;
2126
2127 /* software-driven interface shutdown */
2128 case -ECONNRESET:
2129 case -ESHUTDOWN:
2130 break;
2131
2132 case -EPROTO:
2133 case -ETIME:
2134 case -EILSEQ:
2135 netif_stop_queue(dev->net);
2136 break;
2137 default:
2138 netif_dbg(dev, tx_err, dev->net,
2139 "tx err %d\n", entry->urb->status);
2140 break;
2141 }
2142 }
2143
2144 usb_autopm_put_interface_async(dev->intf);
2145
Woojung.Huh@microchip.com81c38e82015-08-11 15:21:41 +00002146 defer_bh(dev, skb, &dev->txq, tx_done);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002147}
2148
2149static void lan78xx_queue_skb(struct sk_buff_head *list,
2150 struct sk_buff *newsk, enum skb_state state)
2151{
2152 struct skb_data *entry = (struct skb_data *)newsk->cb;
2153
2154 __skb_queue_tail(list, newsk);
2155 entry->state = state;
2156}
2157
2158netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2159{
2160 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.com81c38e82015-08-11 15:21:41 +00002161 struct sk_buff *skb2 = NULL;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002162
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002163 if (skb) {
Woojung.Huh@microchip.com81c38e82015-08-11 15:21:41 +00002164 skb_tx_timestamp(skb);
2165 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2166 }
2167
2168 if (skb2) {
2169 skb_queue_tail(&dev->txq_pend, skb2);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002170
2171 if (skb_queue_len(&dev->txq_pend) > 10)
2172 netif_stop_queue(net);
2173 } else {
2174 netif_dbg(dev, tx_err, dev->net,
2175 "lan78xx_tx_prep return NULL\n");
2176 dev->net->stats.tx_errors++;
2177 dev->net->stats.tx_dropped++;
2178 }
2179
2180 tasklet_schedule(&dev->bh);
2181
2182 return NETDEV_TX_OK;
2183}
2184
2185int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2186{
2187 int tmp;
2188 struct usb_host_interface *alt = NULL;
2189 struct usb_host_endpoint *in = NULL, *out = NULL;
2190 struct usb_host_endpoint *status = NULL;
2191
2192 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2193 unsigned ep;
2194
2195 in = NULL;
2196 out = NULL;
2197 status = NULL;
2198 alt = intf->altsetting + tmp;
2199
2200 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2201 struct usb_host_endpoint *e;
2202 int intr = 0;
2203
2204 e = alt->endpoint + ep;
2205 switch (e->desc.bmAttributes) {
2206 case USB_ENDPOINT_XFER_INT:
2207 if (!usb_endpoint_dir_in(&e->desc))
2208 continue;
2209 intr = 1;
2210 /* FALLTHROUGH */
2211 case USB_ENDPOINT_XFER_BULK:
2212 break;
2213 default:
2214 continue;
2215 }
2216 if (usb_endpoint_dir_in(&e->desc)) {
2217 if (!intr && !in)
2218 in = e;
2219 else if (intr && !status)
2220 status = e;
2221 } else {
2222 if (!out)
2223 out = e;
2224 }
2225 }
2226 if (in && out)
2227 break;
2228 }
2229 if (!alt || !in || !out)
2230 return -EINVAL;
2231
2232 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2233 in->desc.bEndpointAddress &
2234 USB_ENDPOINT_NUMBER_MASK);
2235 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2236 out->desc.bEndpointAddress &
2237 USB_ENDPOINT_NUMBER_MASK);
2238 dev->ep_intr = status;
2239
2240 return 0;
2241}
2242
2243static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2244{
2245 struct lan78xx_priv *pdata = NULL;
2246 int ret;
2247 int i;
2248
2249 ret = lan78xx_get_endpoints(dev, intf);
2250
2251 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2252
2253 pdata = (struct lan78xx_priv *)(dev->data[0]);
2254 if (!pdata) {
2255 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2256 return -ENOMEM;
2257 }
2258
2259 pdata->dev = dev;
2260
2261 spin_lock_init(&pdata->rfe_ctl_lock);
2262 mutex_init(&pdata->dataport_mutex);
2263
2264 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2265
2266 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2267 pdata->vlan_table[i] = 0;
2268
2269 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2270
2271 dev->net->features = 0;
2272
2273 if (DEFAULT_TX_CSUM_ENABLE)
2274 dev->net->features |= NETIF_F_HW_CSUM;
2275
2276 if (DEFAULT_RX_CSUM_ENABLE)
2277 dev->net->features |= NETIF_F_RXCSUM;
2278
2279 if (DEFAULT_TSO_CSUM_ENABLE)
2280 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2281
2282 dev->net->hw_features = dev->net->features;
2283
2284 /* Init all registers */
2285 ret = lan78xx_reset(dev);
2286
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00002287 lan78xx_mdio_init(dev);
2288
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002289 dev->net->flags |= IFF_MULTICAST;
2290
2291 pdata->wol = WAKE_MAGIC;
2292
2293 return 0;
2294}
2295
2296static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2297{
2298 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2299
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00002300 lan78xx_remove_mdio(dev);
2301
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002302 if (pdata) {
2303 netif_dbg(dev, ifdown, dev->net, "free pdata");
2304 kfree(pdata);
2305 pdata = NULL;
2306 dev->data[0] = 0;
2307 }
2308}
2309
2310static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2311 struct sk_buff *skb,
2312 u32 rx_cmd_a, u32 rx_cmd_b)
2313{
2314 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2315 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2316 skb->ip_summed = CHECKSUM_NONE;
2317 } else {
2318 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2319 skb->ip_summed = CHECKSUM_COMPLETE;
2320 }
2321}
2322
2323void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2324{
2325 int status;
2326
2327 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2328 skb_queue_tail(&dev->rxq_pause, skb);
2329 return;
2330 }
2331
2332 skb->protocol = eth_type_trans(skb, dev->net);
2333 dev->net->stats.rx_packets++;
2334 dev->net->stats.rx_bytes += skb->len;
2335
2336 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2337 skb->len + sizeof(struct ethhdr), skb->protocol);
2338 memset(skb->cb, 0, sizeof(struct skb_data));
2339
2340 if (skb_defer_rx_timestamp(skb))
2341 return;
2342
2343 status = netif_rx(skb);
2344 if (status != NET_RX_SUCCESS)
2345 netif_dbg(dev, rx_err, dev->net,
2346 "netif_rx status %d\n", status);
2347}
2348
2349static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2350{
2351 if (skb->len < dev->net->hard_header_len)
2352 return 0;
2353
2354 while (skb->len > 0) {
2355 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2356 u16 rx_cmd_c;
2357 struct sk_buff *skb2;
2358 unsigned char *packet;
2359
2360 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2361 le32_to_cpus(&rx_cmd_a);
2362 skb_pull(skb, sizeof(rx_cmd_a));
2363
2364 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2365 le32_to_cpus(&rx_cmd_b);
2366 skb_pull(skb, sizeof(rx_cmd_b));
2367
2368 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2369 le16_to_cpus(&rx_cmd_c);
2370 skb_pull(skb, sizeof(rx_cmd_c));
2371
2372 packet = skb->data;
2373
2374 /* get the packet length */
2375 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2376 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2377
2378 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2379 netif_dbg(dev, rx_err, dev->net,
2380 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2381 } else {
2382 /* last frame in this batch */
2383 if (skb->len == size) {
2384 lan78xx_rx_csum_offload(dev, skb,
2385 rx_cmd_a, rx_cmd_b);
2386
2387 skb_trim(skb, skb->len - 4); /* remove fcs */
2388 skb->truesize = size + sizeof(struct sk_buff);
2389
2390 return 1;
2391 }
2392
2393 skb2 = skb_clone(skb, GFP_ATOMIC);
2394 if (unlikely(!skb2)) {
2395 netdev_warn(dev->net, "Error allocating skb");
2396 return 0;
2397 }
2398
2399 skb2->len = size;
2400 skb2->data = packet;
2401 skb_set_tail_pointer(skb2, size);
2402
2403 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2404
2405 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2406 skb2->truesize = size + sizeof(struct sk_buff);
2407
2408 lan78xx_skb_return(dev, skb2);
2409 }
2410
2411 skb_pull(skb, size);
2412
2413 /* padding bytes before the next frame starts */
2414 if (skb->len)
2415 skb_pull(skb, align_count);
2416 }
2417
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002418 return 1;
2419}
2420
2421static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2422{
2423 if (!lan78xx_rx(dev, skb)) {
2424 dev->net->stats.rx_errors++;
2425 goto done;
2426 }
2427
2428 if (skb->len) {
2429 lan78xx_skb_return(dev, skb);
2430 return;
2431 }
2432
2433 netif_dbg(dev, rx_err, dev->net, "drop\n");
2434 dev->net->stats.rx_errors++;
2435done:
2436 skb_queue_tail(&dev->done, skb);
2437}
2438
2439static void rx_complete(struct urb *urb);
2440
2441static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2442{
2443 struct sk_buff *skb;
2444 struct skb_data *entry;
2445 unsigned long lockflags;
2446 size_t size = dev->rx_urb_size;
2447 int ret = 0;
2448
2449 skb = netdev_alloc_skb_ip_align(dev->net, size);
2450 if (!skb) {
2451 usb_free_urb(urb);
2452 return -ENOMEM;
2453 }
2454
2455 entry = (struct skb_data *)skb->cb;
2456 entry->urb = urb;
2457 entry->dev = dev;
2458 entry->length = 0;
2459
2460 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2461 skb->data, size, rx_complete, skb);
2462
2463 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2464
2465 if (netif_device_present(dev->net) &&
2466 netif_running(dev->net) &&
2467 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2468 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2469 ret = usb_submit_urb(urb, GFP_ATOMIC);
2470 switch (ret) {
2471 case 0:
2472 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2473 break;
2474 case -EPIPE:
2475 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2476 break;
2477 case -ENODEV:
2478 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2479 netif_device_detach(dev->net);
2480 break;
2481 case -EHOSTUNREACH:
2482 ret = -ENOLINK;
2483 break;
2484 default:
2485 netif_dbg(dev, rx_err, dev->net,
2486 "rx submit, %d\n", ret);
2487 tasklet_schedule(&dev->bh);
2488 }
2489 } else {
2490 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2491 ret = -ENOLINK;
2492 }
2493 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2494 if (ret) {
2495 dev_kfree_skb_any(skb);
2496 usb_free_urb(urb);
2497 }
2498 return ret;
2499}
2500
2501static void rx_complete(struct urb *urb)
2502{
2503 struct sk_buff *skb = (struct sk_buff *)urb->context;
2504 struct skb_data *entry = (struct skb_data *)skb->cb;
2505 struct lan78xx_net *dev = entry->dev;
2506 int urb_status = urb->status;
2507 enum skb_state state;
2508
2509 skb_put(skb, urb->actual_length);
2510 state = rx_done;
2511 entry->urb = NULL;
2512
2513 switch (urb_status) {
2514 case 0:
2515 if (skb->len < dev->net->hard_header_len) {
2516 state = rx_cleanup;
2517 dev->net->stats.rx_errors++;
2518 dev->net->stats.rx_length_errors++;
2519 netif_dbg(dev, rx_err, dev->net,
2520 "rx length %d\n", skb->len);
2521 }
2522 usb_mark_last_busy(dev->udev);
2523 break;
2524 case -EPIPE:
2525 dev->net->stats.rx_errors++;
2526 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2527 /* FALLTHROUGH */
2528 case -ECONNRESET: /* async unlink */
2529 case -ESHUTDOWN: /* hardware gone */
2530 netif_dbg(dev, ifdown, dev->net,
2531 "rx shutdown, code %d\n", urb_status);
2532 state = rx_cleanup;
2533 entry->urb = urb;
2534 urb = NULL;
2535 break;
2536 case -EPROTO:
2537 case -ETIME:
2538 case -EILSEQ:
2539 dev->net->stats.rx_errors++;
2540 state = rx_cleanup;
2541 entry->urb = urb;
2542 urb = NULL;
2543 break;
2544
2545 /* data overrun ... flush fifo? */
2546 case -EOVERFLOW:
2547 dev->net->stats.rx_over_errors++;
2548 /* FALLTHROUGH */
2549
2550 default:
2551 state = rx_cleanup;
2552 dev->net->stats.rx_errors++;
2553 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2554 break;
2555 }
2556
2557 state = defer_bh(dev, skb, &dev->rxq, state);
2558
2559 if (urb) {
2560 if (netif_running(dev->net) &&
2561 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2562 state != unlink_start) {
2563 rx_submit(dev, urb, GFP_ATOMIC);
2564 return;
2565 }
2566 usb_free_urb(urb);
2567 }
2568 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2569}
2570
2571static void lan78xx_tx_bh(struct lan78xx_net *dev)
2572{
2573 int length;
2574 struct urb *urb = NULL;
2575 struct skb_data *entry;
2576 unsigned long flags;
2577 struct sk_buff_head *tqp = &dev->txq_pend;
2578 struct sk_buff *skb, *skb2;
2579 int ret;
2580 int count, pos;
2581 int skb_totallen, pkt_cnt;
2582
2583 skb_totallen = 0;
2584 pkt_cnt = 0;
2585 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2586 if (skb_is_gso(skb)) {
2587 if (pkt_cnt) {
2588 /* handle previous packets first */
2589 break;
2590 }
2591 length = skb->len;
2592 skb2 = skb_dequeue(tqp);
2593 goto gso_skb;
2594 }
2595
2596 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2597 break;
2598 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2599 pkt_cnt++;
2600 }
2601
2602 /* copy to a single skb */
2603 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2604 if (!skb)
2605 goto drop;
2606
2607 skb_put(skb, skb_totallen);
2608
2609 for (count = pos = 0; count < pkt_cnt; count++) {
2610 skb2 = skb_dequeue(tqp);
2611 if (skb2) {
2612 memcpy(skb->data + pos, skb2->data, skb2->len);
2613 pos += roundup(skb2->len, sizeof(u32));
2614 dev_kfree_skb(skb2);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002615 }
2616 }
2617
2618 length = skb_totallen;
2619
2620gso_skb:
2621 urb = usb_alloc_urb(0, GFP_ATOMIC);
2622 if (!urb) {
2623 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2624 goto drop;
2625 }
2626
2627 entry = (struct skb_data *)skb->cb;
2628 entry->urb = urb;
2629 entry->dev = dev;
2630 entry->length = length;
2631
2632 spin_lock_irqsave(&dev->txq.lock, flags);
2633 ret = usb_autopm_get_interface_async(dev->intf);
2634 if (ret < 0) {
2635 spin_unlock_irqrestore(&dev->txq.lock, flags);
2636 goto drop;
2637 }
2638
2639 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2640 skb->data, skb->len, tx_complete, skb);
2641
2642 if (length % dev->maxpacket == 0) {
2643 /* send USB_ZERO_PACKET */
2644 urb->transfer_flags |= URB_ZERO_PACKET;
2645 }
2646
2647#ifdef CONFIG_PM
2648 /* if this triggers the device is still a sleep */
2649 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2650 /* transmission will be done in resume */
2651 usb_anchor_urb(urb, &dev->deferred);
2652 /* no use to process more packets */
2653 netif_stop_queue(dev->net);
2654 usb_put_urb(urb);
2655 spin_unlock_irqrestore(&dev->txq.lock, flags);
2656 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2657 return;
2658 }
2659#endif
2660
2661 ret = usb_submit_urb(urb, GFP_ATOMIC);
2662 switch (ret) {
2663 case 0:
2664 dev->net->trans_start = jiffies;
2665 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2666 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2667 netif_stop_queue(dev->net);
2668 break;
2669 case -EPIPE:
2670 netif_stop_queue(dev->net);
2671 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2672 usb_autopm_put_interface_async(dev->intf);
2673 break;
2674 default:
2675 usb_autopm_put_interface_async(dev->intf);
2676 netif_dbg(dev, tx_err, dev->net,
2677 "tx: submit urb err %d\n", ret);
2678 break;
2679 }
2680
2681 spin_unlock_irqrestore(&dev->txq.lock, flags);
2682
2683 if (ret) {
2684 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2685drop:
2686 dev->net->stats.tx_dropped++;
2687 if (skb)
2688 dev_kfree_skb_any(skb);
2689 usb_free_urb(urb);
2690 } else
2691 netif_dbg(dev, tx_queued, dev->net,
2692 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2693}
2694
2695static void lan78xx_rx_bh(struct lan78xx_net *dev)
2696{
2697 struct urb *urb;
2698 int i;
2699
2700 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2701 for (i = 0; i < 10; i++) {
2702 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2703 break;
2704 urb = usb_alloc_urb(0, GFP_ATOMIC);
2705 if (urb)
2706 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2707 return;
2708 }
2709
2710 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2711 tasklet_schedule(&dev->bh);
2712 }
2713 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2714 netif_wake_queue(dev->net);
2715}
2716
2717static void lan78xx_bh(unsigned long param)
2718{
2719 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2720 struct sk_buff *skb;
2721 struct skb_data *entry;
2722
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002723 while ((skb = skb_dequeue(&dev->done))) {
2724 entry = (struct skb_data *)(skb->cb);
2725 switch (entry->state) {
2726 case rx_done:
2727 entry->state = rx_cleanup;
2728 rx_process(dev, skb);
2729 continue;
2730 case tx_done:
2731 usb_free_urb(entry->urb);
2732 dev_kfree_skb(skb);
2733 continue;
2734 case rx_cleanup:
2735 usb_free_urb(entry->urb);
2736 dev_kfree_skb(skb);
2737 continue;
2738 default:
2739 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2740 return;
2741 }
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002742 }
2743
2744 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2745 if (!skb_queue_empty(&dev->txq_pend))
2746 lan78xx_tx_bh(dev);
2747
2748 if (!timer_pending(&dev->delay) &&
2749 !test_bit(EVENT_RX_HALT, &dev->flags))
2750 lan78xx_rx_bh(dev);
2751 }
2752}
2753
2754static void lan78xx_delayedwork(struct work_struct *work)
2755{
2756 int status;
2757 struct lan78xx_net *dev;
2758
2759 dev = container_of(work, struct lan78xx_net, wq.work);
2760
2761 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2762 unlink_urbs(dev, &dev->txq);
2763 status = usb_autopm_get_interface(dev->intf);
2764 if (status < 0)
2765 goto fail_pipe;
2766 status = usb_clear_halt(dev->udev, dev->pipe_out);
2767 usb_autopm_put_interface(dev->intf);
2768 if (status < 0 &&
2769 status != -EPIPE &&
2770 status != -ESHUTDOWN) {
2771 if (netif_msg_tx_err(dev))
2772fail_pipe:
2773 netdev_err(dev->net,
2774 "can't clear tx halt, status %d\n",
2775 status);
2776 } else {
2777 clear_bit(EVENT_TX_HALT, &dev->flags);
2778 if (status != -ESHUTDOWN)
2779 netif_wake_queue(dev->net);
2780 }
2781 }
2782 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2783 unlink_urbs(dev, &dev->rxq);
2784 status = usb_autopm_get_interface(dev->intf);
2785 if (status < 0)
2786 goto fail_halt;
2787 status = usb_clear_halt(dev->udev, dev->pipe_in);
2788 usb_autopm_put_interface(dev->intf);
2789 if (status < 0 &&
2790 status != -EPIPE &&
2791 status != -ESHUTDOWN) {
2792 if (netif_msg_rx_err(dev))
2793fail_halt:
2794 netdev_err(dev->net,
2795 "can't clear rx halt, status %d\n",
2796 status);
2797 } else {
2798 clear_bit(EVENT_RX_HALT, &dev->flags);
2799 tasklet_schedule(&dev->bh);
2800 }
2801 }
2802
2803 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2804 int ret = 0;
2805
2806 clear_bit(EVENT_LINK_RESET, &dev->flags);
2807 status = usb_autopm_get_interface(dev->intf);
2808 if (status < 0)
2809 goto skip_reset;
2810 if (lan78xx_link_reset(dev) < 0) {
2811 usb_autopm_put_interface(dev->intf);
2812skip_reset:
2813 netdev_info(dev->net, "link reset failed (%d)\n",
2814 ret);
2815 } else {
2816 usb_autopm_put_interface(dev->intf);
2817 }
2818 }
2819}
2820
2821static void intr_complete(struct urb *urb)
2822{
2823 struct lan78xx_net *dev = urb->context;
2824 int status = urb->status;
2825
2826 switch (status) {
2827 /* success */
2828 case 0:
2829 lan78xx_status(dev, urb);
2830 break;
2831
2832 /* software-driven interface shutdown */
2833 case -ENOENT: /* urb killed */
2834 case -ESHUTDOWN: /* hardware gone */
2835 netif_dbg(dev, ifdown, dev->net,
2836 "intr shutdown, code %d\n", status);
2837 return;
2838
2839 /* NOTE: not throttling like RX/TX, since this endpoint
2840 * already polls infrequently
2841 */
2842 default:
2843 netdev_dbg(dev->net, "intr status %d\n", status);
2844 break;
2845 }
2846
2847 if (!netif_running(dev->net))
2848 return;
2849
2850 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2851 status = usb_submit_urb(urb, GFP_ATOMIC);
2852 if (status != 0)
2853 netif_err(dev, timer, dev->net,
2854 "intr resubmit --> %d\n", status);
2855}
2856
2857static void lan78xx_disconnect(struct usb_interface *intf)
2858{
2859 struct lan78xx_net *dev;
2860 struct usb_device *udev;
2861 struct net_device *net;
2862
2863 dev = usb_get_intfdata(intf);
2864 usb_set_intfdata(intf, NULL);
2865 if (!dev)
2866 return;
2867
2868 udev = interface_to_usbdev(intf);
2869
2870 net = dev->net;
2871 unregister_netdev(net);
2872
2873 cancel_delayed_work_sync(&dev->wq);
2874
2875 usb_scuttle_anchored_urbs(&dev->deferred);
2876
2877 lan78xx_unbind(dev, intf);
2878
2879 usb_kill_urb(dev->urb_intr);
2880 usb_free_urb(dev->urb_intr);
2881
2882 free_netdev(net);
2883 usb_put_dev(udev);
2884}
2885
2886void lan78xx_tx_timeout(struct net_device *net)
2887{
2888 struct lan78xx_net *dev = netdev_priv(net);
2889
2890 unlink_urbs(dev, &dev->txq);
2891 tasklet_schedule(&dev->bh);
2892}
2893
2894static const struct net_device_ops lan78xx_netdev_ops = {
2895 .ndo_open = lan78xx_open,
2896 .ndo_stop = lan78xx_stop,
2897 .ndo_start_xmit = lan78xx_start_xmit,
2898 .ndo_tx_timeout = lan78xx_tx_timeout,
2899 .ndo_change_mtu = lan78xx_change_mtu,
2900 .ndo_set_mac_address = lan78xx_set_mac_addr,
2901 .ndo_validate_addr = eth_validate_addr,
2902 .ndo_do_ioctl = lan78xx_ioctl,
2903 .ndo_set_rx_mode = lan78xx_set_multicast,
2904 .ndo_set_features = lan78xx_set_features,
2905 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
2906 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
2907};
2908
2909static int lan78xx_probe(struct usb_interface *intf,
2910 const struct usb_device_id *id)
2911{
2912 struct lan78xx_net *dev;
2913 struct net_device *netdev;
2914 struct usb_device *udev;
2915 int ret;
2916 unsigned maxp;
2917 unsigned period;
2918 u8 *buf = NULL;
2919
2920 udev = interface_to_usbdev(intf);
2921 udev = usb_get_dev(udev);
2922
2923 ret = -ENOMEM;
2924 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2925 if (!netdev) {
2926 dev_err(&intf->dev, "Error: OOM\n");
2927 goto out1;
2928 }
2929
2930 /* netdev_printk() needs this */
2931 SET_NETDEV_DEV(netdev, &intf->dev);
2932
2933 dev = netdev_priv(netdev);
2934 dev->udev = udev;
2935 dev->intf = intf;
2936 dev->net = netdev;
2937 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2938 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2939
2940 skb_queue_head_init(&dev->rxq);
2941 skb_queue_head_init(&dev->txq);
2942 skb_queue_head_init(&dev->done);
2943 skb_queue_head_init(&dev->rxq_pause);
2944 skb_queue_head_init(&dev->txq_pend);
2945 mutex_init(&dev->phy_mutex);
2946
2947 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2948 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2949 init_usb_anchor(&dev->deferred);
2950
2951 netdev->netdev_ops = &lan78xx_netdev_ops;
2952 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2953 netdev->ethtool_ops = &lan78xx_ethtool_ops;
2954
2955 ret = lan78xx_bind(dev, intf);
2956 if (ret < 0)
2957 goto out2;
2958 strcpy(netdev->name, "eth%d");
2959
2960 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2961 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2962
2963 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
2964 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
2965 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
2966
2967 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2968 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2969
2970 dev->pipe_intr = usb_rcvintpipe(dev->udev,
2971 dev->ep_intr->desc.bEndpointAddress &
2972 USB_ENDPOINT_NUMBER_MASK);
2973 period = dev->ep_intr->desc.bInterval;
2974
2975 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2976 buf = kmalloc(maxp, GFP_KERNEL);
2977 if (buf) {
2978 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2979 if (!dev->urb_intr) {
2980 kfree(buf);
2981 goto out3;
2982 } else {
2983 usb_fill_int_urb(dev->urb_intr, dev->udev,
2984 dev->pipe_intr, buf, maxp,
2985 intr_complete, dev, period);
2986 }
2987 }
2988
2989 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
2990
2991 /* driver requires remote-wakeup capability during autosuspend. */
2992 intf->needs_remote_wakeup = 1;
2993
2994 ret = register_netdev(netdev);
2995 if (ret != 0) {
2996 netif_err(dev, probe, netdev, "couldn't register the device\n");
2997 goto out2;
2998 }
2999
3000 usb_set_intfdata(intf, dev);
3001
3002 ret = device_set_wakeup_enable(&udev->dev, true);
3003
3004 /* Default delay of 2sec has more overhead than advantage.
3005 * Set to 10sec as default.
3006 */
3007 pm_runtime_set_autosuspend_delay(&udev->dev,
3008 DEFAULT_AUTOSUSPEND_DELAY);
3009
3010 return 0;
3011
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00003012out3:
3013 lan78xx_unbind(dev, intf);
3014out2:
3015 free_netdev(netdev);
3016out1:
3017 usb_put_dev(udev);
3018
3019 return ret;
3020}
3021
3022static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3023{
3024 const u16 crc16poly = 0x8005;
3025 int i;
3026 u16 bit, crc, msb;
3027 u8 data;
3028
3029 crc = 0xFFFF;
3030 for (i = 0; i < len; i++) {
3031 data = *buf++;
3032 for (bit = 0; bit < 8; bit++) {
3033 msb = crc >> 15;
3034 crc <<= 1;
3035
3036 if (msb ^ (u16)(data & 1)) {
3037 crc ^= crc16poly;
3038 crc |= (u16)0x0001U;
3039 }
3040 data >>= 1;
3041 }
3042 }
3043
3044 return crc;
3045}
3046
3047static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3048{
3049 u32 buf;
3050 int ret;
3051 int mask_index;
3052 u16 crc;
3053 u32 temp_wucsr;
3054 u32 temp_pmt_ctl;
3055 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3056 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3057 const u8 arp_type[2] = { 0x08, 0x06 };
3058
3059 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3060 buf &= ~MAC_TX_TXEN_;
3061 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3062 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3063 buf &= ~MAC_RX_RXEN_;
3064 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3065
3066 ret = lan78xx_write_reg(dev, WUCSR, 0);
3067 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3068 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3069
3070 temp_wucsr = 0;
3071
3072 temp_pmt_ctl = 0;
3073 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3074 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3075 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3076
3077 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3078 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3079
3080 mask_index = 0;
3081 if (wol & WAKE_PHY) {
3082 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3083
3084 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3085 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3086 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3087 }
3088 if (wol & WAKE_MAGIC) {
3089 temp_wucsr |= WUCSR_MPEN_;
3090
3091 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3092 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3093 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3094 }
3095 if (wol & WAKE_BCAST) {
3096 temp_wucsr |= WUCSR_BCST_EN_;
3097
3098 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3099 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3100 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3101 }
3102 if (wol & WAKE_MCAST) {
3103 temp_wucsr |= WUCSR_WAKE_EN_;
3104
3105 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3106 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3107 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3108 WUF_CFGX_EN_ |
3109 WUF_CFGX_TYPE_MCAST_ |
3110 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3111 (crc & WUF_CFGX_CRC16_MASK_));
3112
3113 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3114 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3115 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3116 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3117 mask_index++;
3118
3119 /* for IPv6 Multicast */
3120 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3121 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3122 WUF_CFGX_EN_ |
3123 WUF_CFGX_TYPE_MCAST_ |
3124 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3125 (crc & WUF_CFGX_CRC16_MASK_));
3126
3127 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3128 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3129 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3130 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3131 mask_index++;
3132
3133 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3134 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3135 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3136 }
3137 if (wol & WAKE_UCAST) {
3138 temp_wucsr |= WUCSR_PFDA_EN_;
3139
3140 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3141 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3142 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3143 }
3144 if (wol & WAKE_ARP) {
3145 temp_wucsr |= WUCSR_WAKE_EN_;
3146
3147 /* set WUF_CFG & WUF_MASK
3148 * for packettype (offset 12,13) = ARP (0x0806)
3149 */
3150 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3151 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3152 WUF_CFGX_EN_ |
3153 WUF_CFGX_TYPE_ALL_ |
3154 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3155 (crc & WUF_CFGX_CRC16_MASK_));
3156
3157 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3158 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3159 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3160 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3161 mask_index++;
3162
3163 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3164 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3165 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3166 }
3167
3168 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3169
3170 /* when multiple WOL bits are set */
3171 if (hweight_long((unsigned long)wol) > 1) {
3172 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3173 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3174 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3175 }
3176 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3177
3178 /* clear WUPS */
3179 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3180 buf |= PMT_CTL_WUPS_MASK_;
3181 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3182
3183 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3184 buf |= MAC_RX_RXEN_;
3185 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3186
3187 return 0;
3188}
3189
3190int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3191{
3192 struct lan78xx_net *dev = usb_get_intfdata(intf);
3193 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3194 u32 buf;
3195 int ret;
3196 int event;
3197
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00003198 event = message.event;
3199
3200 if (!dev->suspend_count++) {
3201 spin_lock_irq(&dev->txq.lock);
3202 /* don't autosuspend while transmitting */
3203 if ((skb_queue_len(&dev->txq) ||
3204 skb_queue_len(&dev->txq_pend)) &&
3205 PMSG_IS_AUTO(message)) {
3206 spin_unlock_irq(&dev->txq.lock);
3207 ret = -EBUSY;
3208 goto out;
3209 } else {
3210 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3211 spin_unlock_irq(&dev->txq.lock);
3212 }
3213
3214 /* stop TX & RX */
3215 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3216 buf &= ~MAC_TX_TXEN_;
3217 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3218 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3219 buf &= ~MAC_RX_RXEN_;
3220 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3221
3222 /* empty out the rx and queues */
3223 netif_device_detach(dev->net);
3224 lan78xx_terminate_urbs(dev);
3225 usb_kill_urb(dev->urb_intr);
3226
3227 /* reattach */
3228 netif_device_attach(dev->net);
3229 }
3230
3231 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3232 if (PMSG_IS_AUTO(message)) {
3233 /* auto suspend (selective suspend) */
3234 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3235 buf &= ~MAC_TX_TXEN_;
3236 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3237 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3238 buf &= ~MAC_RX_RXEN_;
3239 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3240
3241 ret = lan78xx_write_reg(dev, WUCSR, 0);
3242 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3243 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3244
3245 /* set goodframe wakeup */
3246 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3247
3248 buf |= WUCSR_RFE_WAKE_EN_;
3249 buf |= WUCSR_STORE_WAKE_;
3250
3251 ret = lan78xx_write_reg(dev, WUCSR, buf);
3252
3253 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3254
3255 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3256 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3257
3258 buf |= PMT_CTL_PHY_WAKE_EN_;
3259 buf |= PMT_CTL_WOL_EN_;
3260 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3261 buf |= PMT_CTL_SUS_MODE_3_;
3262
3263 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3264
3265 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3266
3267 buf |= PMT_CTL_WUPS_MASK_;
3268
3269 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3270
3271 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3272 buf |= MAC_RX_RXEN_;
3273 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3274 } else {
3275 lan78xx_set_suspend(dev, pdata->wol);
3276 }
3277 }
3278
Woojung.Huh@microchip.com49d28b562015-09-25 21:13:48 +00003279 ret = 0;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00003280out:
3281 return ret;
3282}
3283
3284int lan78xx_resume(struct usb_interface *intf)
3285{
3286 struct lan78xx_net *dev = usb_get_intfdata(intf);
3287 struct sk_buff *skb;
3288 struct urb *res;
3289 int ret;
3290 u32 buf;
3291
3292 if (!--dev->suspend_count) {
3293 /* resume interrupt URBs */
3294 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3295 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3296
3297 spin_lock_irq(&dev->txq.lock);
3298 while ((res = usb_get_from_anchor(&dev->deferred))) {
3299 skb = (struct sk_buff *)res->context;
3300 ret = usb_submit_urb(res, GFP_ATOMIC);
3301 if (ret < 0) {
3302 dev_kfree_skb_any(skb);
3303 usb_free_urb(res);
3304 usb_autopm_put_interface_async(dev->intf);
3305 } else {
3306 dev->net->trans_start = jiffies;
3307 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3308 }
3309 }
3310
3311 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3312 spin_unlock_irq(&dev->txq.lock);
3313
3314 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3315 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3316 netif_start_queue(dev->net);
3317 tasklet_schedule(&dev->bh);
3318 }
3319 }
3320
3321 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3322 ret = lan78xx_write_reg(dev, WUCSR, 0);
3323 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3324
3325 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3326 WUCSR2_ARP_RCD_ |
3327 WUCSR2_IPV6_TCPSYN_RCD_ |
3328 WUCSR2_IPV4_TCPSYN_RCD_);
3329
3330 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3331 WUCSR_EEE_RX_WAKE_ |
3332 WUCSR_PFDA_FR_ |
3333 WUCSR_RFE_WAKE_FR_ |
3334 WUCSR_WUFR_ |
3335 WUCSR_MPR_ |
3336 WUCSR_BCST_FR_);
3337
3338 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3339 buf |= MAC_TX_TXEN_;
3340 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3341
3342 return 0;
3343}
3344
3345int lan78xx_reset_resume(struct usb_interface *intf)
3346{
3347 struct lan78xx_net *dev = usb_get_intfdata(intf);
3348
3349 lan78xx_reset(dev);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00003350
3351 lan78xx_phy_init(dev);
3352
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00003353 return lan78xx_resume(intf);
3354}
3355
3356static const struct usb_device_id products[] = {
3357 {
3358 /* LAN7800 USB Gigabit Ethernet Device */
3359 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3360 },
3361 {
3362 /* LAN7850 USB Gigabit Ethernet Device */
3363 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3364 },
3365 {},
3366};
3367MODULE_DEVICE_TABLE(usb, products);
3368
3369static struct usb_driver lan78xx_driver = {
3370 .name = DRIVER_NAME,
3371 .id_table = products,
3372 .probe = lan78xx_probe,
3373 .disconnect = lan78xx_disconnect,
3374 .suspend = lan78xx_suspend,
3375 .resume = lan78xx_resume,
3376 .reset_resume = lan78xx_reset_resume,
3377 .supports_autosuspend = 1,
3378 .disable_hub_initiated_lpm = 1,
3379};
3380
3381module_usb_driver(lan78xx_driver);
3382
3383MODULE_AUTHOR(DRIVER_AUTHOR);
3384MODULE_DESCRIPTION(DRIVER_DESC);
3385MODULE_LICENSE("GPL");