blob: 5d9c527356a2eccc5d5f795df0ef0d682344570d [file] [log] [blame]
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000022#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
33#include "lan78xx.h"
34
35#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
36#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
37#define DRIVER_NAME "lan78xx"
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +000038#define DRIVER_VERSION "1.0.1"
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000039
40#define TX_TIMEOUT_JIFFIES (5 * HZ)
41#define THROTTLE_JIFFIES (HZ / 8)
42#define UNLINK_TIMEOUT_MS 3
43
44#define RX_MAX_QUEUE_MEMORY (60 * 1518)
45
46#define SS_USB_PKT_SIZE (1024)
47#define HS_USB_PKT_SIZE (512)
48#define FS_USB_PKT_SIZE (64)
49
50#define MAX_RX_FIFO_SIZE (12 * 1024)
51#define MAX_TX_FIFO_SIZE (12 * 1024)
52#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
53#define DEFAULT_BULK_IN_DELAY (0x0800)
54#define MAX_SINGLE_PACKET_SIZE (9000)
55#define DEFAULT_TX_CSUM_ENABLE (true)
56#define DEFAULT_RX_CSUM_ENABLE (true)
57#define DEFAULT_TSO_CSUM_ENABLE (true)
58#define DEFAULT_VLAN_FILTER_ENABLE (true)
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +000059#define TX_OVERHEAD (8)
60#define RXW_PADDING 2
61
62#define LAN78XX_USB_VENDOR_ID (0x0424)
63#define LAN7800_USB_PRODUCT_ID (0x7800)
64#define LAN7850_USB_PRODUCT_ID (0x7850)
65#define LAN78XX_EEPROM_MAGIC (0x78A5)
66#define LAN78XX_OTP_MAGIC (0x78F3)
67
68#define MII_READ 1
69#define MII_WRITE 0
70
71#define EEPROM_INDICATOR (0xA5)
72#define EEPROM_MAC_OFFSET (0x01)
73#define MAX_EEPROM_SIZE 512
74#define OTP_INDICATOR_1 (0xF3)
75#define OTP_INDICATOR_2 (0xF7)
76
77#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
78 WAKE_MCAST | WAKE_BCAST | \
79 WAKE_ARP | WAKE_MAGIC)
80
81/* USB related defines */
82#define BULK_IN_PIPE 1
83#define BULK_OUT_PIPE 2
84
85/* default autosuspend delay (mSec)*/
86#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
87
88static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
89 "RX FCS Errors",
90 "RX Alignment Errors",
91 "Rx Fragment Errors",
92 "RX Jabber Errors",
93 "RX Undersize Frame Errors",
94 "RX Oversize Frame Errors",
95 "RX Dropped Frames",
96 "RX Unicast Byte Count",
97 "RX Broadcast Byte Count",
98 "RX Multicast Byte Count",
99 "RX Unicast Frames",
100 "RX Broadcast Frames",
101 "RX Multicast Frames",
102 "RX Pause Frames",
103 "RX 64 Byte Frames",
104 "RX 65 - 127 Byte Frames",
105 "RX 128 - 255 Byte Frames",
106 "RX 256 - 511 Bytes Frames",
107 "RX 512 - 1023 Byte Frames",
108 "RX 1024 - 1518 Byte Frames",
109 "RX Greater 1518 Byte Frames",
110 "EEE RX LPI Transitions",
111 "EEE RX LPI Time",
112 "TX FCS Errors",
113 "TX Excess Deferral Errors",
114 "TX Carrier Errors",
115 "TX Bad Byte Count",
116 "TX Single Collisions",
117 "TX Multiple Collisions",
118 "TX Excessive Collision",
119 "TX Late Collisions",
120 "TX Unicast Byte Count",
121 "TX Broadcast Byte Count",
122 "TX Multicast Byte Count",
123 "TX Unicast Frames",
124 "TX Broadcast Frames",
125 "TX Multicast Frames",
126 "TX Pause Frames",
127 "TX 64 Byte Frames",
128 "TX 65 - 127 Byte Frames",
129 "TX 128 - 255 Byte Frames",
130 "TX 256 - 511 Bytes Frames",
131 "TX 512 - 1023 Byte Frames",
132 "TX 1024 - 1518 Byte Frames",
133 "TX Greater 1518 Byte Frames",
134 "EEE TX LPI Transitions",
135 "EEE TX LPI Time",
136};
137
138struct lan78xx_statstage {
139 u32 rx_fcs_errors;
140 u32 rx_alignment_errors;
141 u32 rx_fragment_errors;
142 u32 rx_jabber_errors;
143 u32 rx_undersize_frame_errors;
144 u32 rx_oversize_frame_errors;
145 u32 rx_dropped_frames;
146 u32 rx_unicast_byte_count;
147 u32 rx_broadcast_byte_count;
148 u32 rx_multicast_byte_count;
149 u32 rx_unicast_frames;
150 u32 rx_broadcast_frames;
151 u32 rx_multicast_frames;
152 u32 rx_pause_frames;
153 u32 rx_64_byte_frames;
154 u32 rx_65_127_byte_frames;
155 u32 rx_128_255_byte_frames;
156 u32 rx_256_511_bytes_frames;
157 u32 rx_512_1023_byte_frames;
158 u32 rx_1024_1518_byte_frames;
159 u32 rx_greater_1518_byte_frames;
160 u32 eee_rx_lpi_transitions;
161 u32 eee_rx_lpi_time;
162 u32 tx_fcs_errors;
163 u32 tx_excess_deferral_errors;
164 u32 tx_carrier_errors;
165 u32 tx_bad_byte_count;
166 u32 tx_single_collisions;
167 u32 tx_multiple_collisions;
168 u32 tx_excessive_collision;
169 u32 tx_late_collisions;
170 u32 tx_unicast_byte_count;
171 u32 tx_broadcast_byte_count;
172 u32 tx_multicast_byte_count;
173 u32 tx_unicast_frames;
174 u32 tx_broadcast_frames;
175 u32 tx_multicast_frames;
176 u32 tx_pause_frames;
177 u32 tx_64_byte_frames;
178 u32 tx_65_127_byte_frames;
179 u32 tx_128_255_byte_frames;
180 u32 tx_256_511_bytes_frames;
181 u32 tx_512_1023_byte_frames;
182 u32 tx_1024_1518_byte_frames;
183 u32 tx_greater_1518_byte_frames;
184 u32 eee_tx_lpi_transitions;
185 u32 eee_tx_lpi_time;
186};
187
188struct lan78xx_net;
189
190struct lan78xx_priv {
191 struct lan78xx_net *dev;
192 u32 rfe_ctl;
193 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
194 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
195 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
196 struct mutex dataport_mutex; /* for dataport access */
197 spinlock_t rfe_ctl_lock; /* for rfe register access */
198 struct work_struct set_multicast;
199 struct work_struct set_vlan;
200 u32 wol;
201};
202
203enum skb_state {
204 illegal = 0,
205 tx_start,
206 tx_done,
207 rx_start,
208 rx_done,
209 rx_cleanup,
210 unlink_start
211};
212
213struct skb_data { /* skb->cb is one of these */
214 struct urb *urb;
215 struct lan78xx_net *dev;
216 enum skb_state state;
217 size_t length;
218};
219
220struct usb_context {
221 struct usb_ctrlrequest req;
222 struct lan78xx_net *dev;
223};
224
225#define EVENT_TX_HALT 0
226#define EVENT_RX_HALT 1
227#define EVENT_RX_MEMORY 2
228#define EVENT_STS_SPLIT 3
229#define EVENT_LINK_RESET 4
230#define EVENT_RX_PAUSED 5
231#define EVENT_DEV_WAKING 6
232#define EVENT_DEV_ASLEEP 7
233#define EVENT_DEV_OPEN 8
234
235struct lan78xx_net {
236 struct net_device *net;
237 struct usb_device *udev;
238 struct usb_interface *intf;
239 void *driver_priv;
240
241 int rx_qlen;
242 int tx_qlen;
243 struct sk_buff_head rxq;
244 struct sk_buff_head txq;
245 struct sk_buff_head done;
246 struct sk_buff_head rxq_pause;
247 struct sk_buff_head txq_pend;
248
249 struct tasklet_struct bh;
250 struct delayed_work wq;
251
252 struct usb_host_endpoint *ep_blkin;
253 struct usb_host_endpoint *ep_blkout;
254 struct usb_host_endpoint *ep_intr;
255
256 int msg_enable;
257
258 struct urb *urb_intr;
259 struct usb_anchor deferred;
260
261 struct mutex phy_mutex; /* for phy access */
262 unsigned pipe_in, pipe_out, pipe_intr;
263
264 u32 hard_mtu; /* count any extra framing */
265 size_t rx_urb_size; /* size for rx urbs */
266
267 unsigned long flags;
268
269 wait_queue_head_t *wait;
270 unsigned char suspend_count;
271
272 unsigned maxpacket;
273 struct timer_list delay;
274
275 unsigned long data[5];
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000276
277 int link_on;
278 u8 mdix_ctrl;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000279
280 u32 devid;
281 struct mii_bus *mdiobus;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000282};
283
284/* use ethtool to change the level for any given device */
285static int msg_level = -1;
286module_param(msg_level, int, 0);
287MODULE_PARM_DESC(msg_level, "Override default message level");
288
289static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
290{
291 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
292 int ret;
293
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000294 if (!buf)
295 return -ENOMEM;
296
297 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
298 USB_VENDOR_REQUEST_READ_REGISTER,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
301 if (likely(ret >= 0)) {
302 le32_to_cpus(buf);
303 *data = *buf;
304 } else {
305 netdev_warn(dev->net,
306 "Failed to read register index 0x%08x. ret = %d",
307 index, ret);
308 }
309
310 kfree(buf);
311
312 return ret;
313}
314
315static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
316{
317 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
318 int ret;
319
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000320 if (!buf)
321 return -ENOMEM;
322
323 *buf = data;
324 cpu_to_le32s(buf);
325
326 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
327 USB_VENDOR_REQUEST_WRITE_REGISTER,
328 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
329 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
330 if (unlikely(ret < 0)) {
331 netdev_warn(dev->net,
332 "Failed to write register index 0x%08x. ret = %d",
333 index, ret);
334 }
335
336 kfree(buf);
337
338 return ret;
339}
340
341static int lan78xx_read_stats(struct lan78xx_net *dev,
342 struct lan78xx_statstage *data)
343{
344 int ret = 0;
345 int i;
346 struct lan78xx_statstage *stats;
347 u32 *src;
348 u32 *dst;
349
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000350 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
351 if (!stats)
352 return -ENOMEM;
353
354 ret = usb_control_msg(dev->udev,
355 usb_rcvctrlpipe(dev->udev, 0),
356 USB_VENDOR_REQUEST_GET_STATS,
357 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
358 0,
359 0,
360 (void *)stats,
361 sizeof(*stats),
362 USB_CTRL_SET_TIMEOUT);
363 if (likely(ret >= 0)) {
364 src = (u32 *)stats;
365 dst = (u32 *)data;
366 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
367 le32_to_cpus(&src[i]);
368 dst[i] = src[i];
369 }
370 } else {
371 netdev_warn(dev->net,
372 "Failed to read stat ret = 0x%x", ret);
373 }
374
375 kfree(stats);
376
377 return ret;
378}
379
380/* Loop until the read is completed with timeout called with phy_mutex held */
381static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
382{
383 unsigned long start_time = jiffies;
384 u32 val;
385 int ret;
386
387 do {
388 ret = lan78xx_read_reg(dev, MII_ACC, &val);
389 if (unlikely(ret < 0))
390 return -EIO;
391
392 if (!(val & MII_ACC_MII_BUSY_))
393 return 0;
394 } while (!time_after(jiffies, start_time + HZ));
395
396 return -EIO;
397}
398
399static inline u32 mii_access(int id, int index, int read)
400{
401 u32 ret;
402
403 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
404 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
405 if (read)
406 ret |= MII_ACC_MII_READ_;
407 else
408 ret |= MII_ACC_MII_WRITE_;
409 ret |= MII_ACC_MII_BUSY_;
410
411 return ret;
412}
413
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000414static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
415{
416 unsigned long start_time = jiffies;
417 u32 val;
418 int ret;
419
420 do {
421 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
422 if (unlikely(ret < 0))
423 return -EIO;
424
425 if (!(val & E2P_CMD_EPC_BUSY_) ||
426 (val & E2P_CMD_EPC_TIMEOUT_))
427 break;
428 usleep_range(40, 100);
429 } while (!time_after(jiffies, start_time + HZ));
430
431 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
432 netdev_warn(dev->net, "EEPROM read operation timeout");
433 return -EIO;
434 }
435
436 return 0;
437}
438
439static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
440{
441 unsigned long start_time = jiffies;
442 u32 val;
443 int ret;
444
445 do {
446 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
447 if (unlikely(ret < 0))
448 return -EIO;
449
450 if (!(val & E2P_CMD_EPC_BUSY_))
451 return 0;
452
453 usleep_range(40, 100);
454 } while (!time_after(jiffies, start_time + HZ));
455
456 netdev_warn(dev->net, "EEPROM is busy");
457 return -EIO;
458}
459
460static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
461 u32 length, u8 *data)
462{
463 u32 val;
464 int i, ret;
465
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000466 ret = lan78xx_eeprom_confirm_not_busy(dev);
467 if (ret)
468 return ret;
469
470 for (i = 0; i < length; i++) {
471 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
472 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
473 ret = lan78xx_write_reg(dev, E2P_CMD, val);
474 if (unlikely(ret < 0))
475 return -EIO;
476
477 ret = lan78xx_wait_eeprom(dev);
478 if (ret < 0)
479 return ret;
480
481 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
482 if (unlikely(ret < 0))
483 return -EIO;
484
485 data[i] = val & 0xFF;
486 offset++;
487 }
488
489 return 0;
490}
491
492static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
493 u32 length, u8 *data)
494{
495 u8 sig;
496 int ret;
497
498 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
499 if ((ret == 0) && (sig == EEPROM_INDICATOR))
500 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
501 else
502 ret = -EINVAL;
503
504 return ret;
505}
506
507static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
508 u32 length, u8 *data)
509{
510 u32 val;
511 int i, ret;
512
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000513 ret = lan78xx_eeprom_confirm_not_busy(dev);
514 if (ret)
515 return ret;
516
517 /* Issue write/erase enable command */
518 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
519 ret = lan78xx_write_reg(dev, E2P_CMD, val);
520 if (unlikely(ret < 0))
521 return -EIO;
522
523 ret = lan78xx_wait_eeprom(dev);
524 if (ret < 0)
525 return ret;
526
527 for (i = 0; i < length; i++) {
528 /* Fill data register */
529 val = data[i];
530 ret = lan78xx_write_reg(dev, E2P_DATA, val);
531 if (ret < 0)
532 return ret;
533
534 /* Send "write" command */
535 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
536 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
537 ret = lan78xx_write_reg(dev, E2P_CMD, val);
538 if (ret < 0)
539 return ret;
540
541 ret = lan78xx_wait_eeprom(dev);
542 if (ret < 0)
543 return ret;
544
545 offset++;
546 }
547
548 return 0;
549}
550
551static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
552 u32 length, u8 *data)
553{
554 int i;
555 int ret;
556 u32 buf;
557 unsigned long timeout;
558
559 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
560
561 if (buf & OTP_PWR_DN_PWRDN_N_) {
562 /* clear it and wait to be cleared */
563 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
564
565 timeout = jiffies + HZ;
566 do {
567 usleep_range(1, 10);
568 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
569 if (time_after(jiffies, timeout)) {
570 netdev_warn(dev->net,
571 "timeout on OTP_PWR_DN");
572 return -EIO;
573 }
574 } while (buf & OTP_PWR_DN_PWRDN_N_);
575 }
576
577 for (i = 0; i < length; i++) {
578 ret = lan78xx_write_reg(dev, OTP_ADDR1,
579 ((offset + i) >> 8) & OTP_ADDR1_15_11);
580 ret = lan78xx_write_reg(dev, OTP_ADDR2,
581 ((offset + i) & OTP_ADDR2_10_3));
582
583 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
584 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
585
586 timeout = jiffies + HZ;
587 do {
588 udelay(1);
589 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
590 if (time_after(jiffies, timeout)) {
591 netdev_warn(dev->net,
592 "timeout on OTP_STATUS");
593 return -EIO;
594 }
595 } while (buf & OTP_STATUS_BUSY_);
596
597 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
598
599 data[i] = (u8)(buf & 0xFF);
600 }
601
602 return 0;
603}
604
605static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
606 u32 length, u8 *data)
607{
608 u8 sig;
609 int ret;
610
611 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
612
613 if (ret == 0) {
614 if (sig == OTP_INDICATOR_1)
615 offset = offset;
616 else if (sig == OTP_INDICATOR_2)
617 offset += 0x100;
618 else
619 ret = -EINVAL;
620 ret = lan78xx_read_raw_otp(dev, offset, length, data);
621 }
622
623 return ret;
624}
625
626static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
627{
628 int i, ret;
629
630 for (i = 0; i < 100; i++) {
631 u32 dp_sel;
632
633 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
634 if (unlikely(ret < 0))
635 return -EIO;
636
637 if (dp_sel & DP_SEL_DPRDY_)
638 return 0;
639
640 usleep_range(40, 100);
641 }
642
643 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
644
645 return -EIO;
646}
647
648static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
649 u32 addr, u32 length, u32 *buf)
650{
651 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
652 u32 dp_sel;
653 int i, ret;
654
655 if (usb_autopm_get_interface(dev->intf) < 0)
656 return 0;
657
658 mutex_lock(&pdata->dataport_mutex);
659
660 ret = lan78xx_dataport_wait_not_busy(dev);
661 if (ret < 0)
662 goto done;
663
664 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
665
666 dp_sel &= ~DP_SEL_RSEL_MASK_;
667 dp_sel |= ram_select;
668 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
669
670 for (i = 0; i < length; i++) {
671 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
672
673 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
674
675 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
676
677 ret = lan78xx_dataport_wait_not_busy(dev);
678 if (ret < 0)
679 goto done;
680 }
681
682done:
683 mutex_unlock(&pdata->dataport_mutex);
684 usb_autopm_put_interface(dev->intf);
685
686 return ret;
687}
688
689static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
690 int index, u8 addr[ETH_ALEN])
691{
692 u32 temp;
693
694 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
695 temp = addr[3];
696 temp = addr[2] | (temp << 8);
697 temp = addr[1] | (temp << 8);
698 temp = addr[0] | (temp << 8);
699 pdata->pfilter_table[index][1] = temp;
700 temp = addr[5];
701 temp = addr[4] | (temp << 8);
702 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
703 pdata->pfilter_table[index][0] = temp;
704 }
705}
706
707/* returns hash bit number for given MAC address */
708static inline u32 lan78xx_hash(char addr[ETH_ALEN])
709{
710 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
711}
712
713static void lan78xx_deferred_multicast_write(struct work_struct *param)
714{
715 struct lan78xx_priv *pdata =
716 container_of(param, struct lan78xx_priv, set_multicast);
717 struct lan78xx_net *dev = pdata->dev;
718 int i;
719 int ret;
720
721 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
722 pdata->rfe_ctl);
723
724 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
725 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
726
727 for (i = 1; i < NUM_OF_MAF; i++) {
728 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
729 ret = lan78xx_write_reg(dev, MAF_LO(i),
730 pdata->pfilter_table[i][1]);
731 ret = lan78xx_write_reg(dev, MAF_HI(i),
732 pdata->pfilter_table[i][0]);
733 }
734
735 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
736}
737
738static void lan78xx_set_multicast(struct net_device *netdev)
739{
740 struct lan78xx_net *dev = netdev_priv(netdev);
741 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
742 unsigned long flags;
743 int i;
744
745 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
746
747 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
748 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
749
750 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
751 pdata->mchash_table[i] = 0;
752 /* pfilter_table[0] has own HW address */
753 for (i = 1; i < NUM_OF_MAF; i++) {
754 pdata->pfilter_table[i][0] =
755 pdata->pfilter_table[i][1] = 0;
756 }
757
758 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
759
760 if (dev->net->flags & IFF_PROMISC) {
761 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
762 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
763 } else {
764 if (dev->net->flags & IFF_ALLMULTI) {
765 netif_dbg(dev, drv, dev->net,
766 "receive all multicast enabled");
767 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
768 }
769 }
770
771 if (netdev_mc_count(dev->net)) {
772 struct netdev_hw_addr *ha;
773 int i;
774
775 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
776
777 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
778
779 i = 1;
780 netdev_for_each_mc_addr(ha, netdev) {
781 /* set first 32 into Perfect Filter */
782 if (i < 33) {
783 lan78xx_set_addr_filter(pdata, i, ha->addr);
784 } else {
785 u32 bitnum = lan78xx_hash(ha->addr);
786
787 pdata->mchash_table[bitnum / 32] |=
788 (1 << (bitnum % 32));
789 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
790 }
791 i++;
792 }
793 }
794
795 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
796
797 /* defer register writes to a sleepable context */
798 schedule_work(&pdata->set_multicast);
799}
800
801static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
802 u16 lcladv, u16 rmtadv)
803{
804 u32 flow = 0, fct_flow = 0;
805 int ret;
806
807 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
808
809 if (cap & FLOW_CTRL_TX)
810 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
811
812 if (cap & FLOW_CTRL_RX)
813 flow |= FLOW_CR_RX_FCEN_;
814
815 if (dev->udev->speed == USB_SPEED_SUPER)
816 fct_flow = 0x817;
817 else if (dev->udev->speed == USB_SPEED_HIGH)
818 fct_flow = 0x211;
819
820 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
821 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
822 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
823
824 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
825
826 /* threshold value should be set before enabling flow */
827 ret = lan78xx_write_reg(dev, FLOW, flow);
828
829 return 0;
830}
831
832static int lan78xx_link_reset(struct lan78xx_net *dev)
833{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000834 struct phy_device *phydev = dev->net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000835 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
Geert Uytterhoeven99c79ec2015-09-04 12:47:28 +0200836 int ladv, radv, ret;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000837 u32 buf;
838
839 /* clear PHY interrupt status */
840 /* VTSE PHY */
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000841 ret = phy_read(phydev, PHY_VTSE_INT_STS);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000842 if (unlikely(ret < 0))
843 return -EIO;
844
845 /* clear LAN78xx interrupt status */
846 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
847 if (unlikely(ret < 0))
848 return -EIO;
849
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000850 phy_read_status(phydev);
851
852 if (!phydev->link && dev->link_on) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000853 dev->link_on = false;
854 netif_carrier_off(dev->net);
855
856 /* reset MAC */
857 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
858 if (unlikely(ret < 0))
859 return -EIO;
860 buf |= MAC_CR_RST_;
861 ret = lan78xx_write_reg(dev, MAC_CR, buf);
862 if (unlikely(ret < 0))
863 return -EIO;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000864 } else if (phydev->link && !dev->link_on) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000865 dev->link_on = true;
866
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000867 phy_ethtool_gset(phydev, &ecmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000868
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000869 ret = phy_read(phydev, PHY_VTSE_INT_STS);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000870
871 if (dev->udev->speed == USB_SPEED_SUPER) {
872 if (ethtool_cmd_speed(&ecmd) == 1000) {
873 /* disable U2 */
874 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
875 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
876 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
877 /* enable U1 */
878 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
879 buf |= USB_CFG1_DEV_U1_INIT_EN_;
880 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
881 } else {
882 /* enable U1 & U2 */
883 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
884 buf |= USB_CFG1_DEV_U2_INIT_EN_;
885 buf |= USB_CFG1_DEV_U1_INIT_EN_;
886 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
887 }
888 }
889
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000890 ladv = phy_read(phydev, MII_ADVERTISE);
Geert Uytterhoeven99c79ec2015-09-04 12:47:28 +0200891 if (ladv < 0)
892 return ladv;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000893
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +0000894 radv = phy_read(phydev, MII_LPA);
Geert Uytterhoeven99c79ec2015-09-04 12:47:28 +0200895 if (radv < 0)
896 return radv;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +0000897
898 netif_dbg(dev, link, dev->net,
899 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
900 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
901
902 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
903 netif_carrier_on(dev->net);
904 }
905
906 return ret;
907}
908
909/* some work can't be done in tasklets, so we use keventd
910 *
911 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
912 * but tasklet_schedule() doesn't. hope the failure is rare.
913 */
914void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915{
916 set_bit(work, &dev->flags);
917 if (!schedule_delayed_work(&dev->wq, 0))
918 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919}
920
921static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922{
923 u32 intdata;
924
925 if (urb->actual_length != 4) {
926 netdev_warn(dev->net,
927 "unexpected urb length %d", urb->actual_length);
928 return;
929 }
930
931 memcpy(&intdata, urb->transfer_buffer, 4);
932 le32_to_cpus(&intdata);
933
934 if (intdata & INT_ENP_PHY_INT) {
935 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937 } else
938 netdev_warn(dev->net,
939 "unexpected interrupt: 0x%08x\n", intdata);
940}
941
942static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943{
944 return MAX_EEPROM_SIZE;
945}
946
947static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948 struct ethtool_eeprom *ee, u8 *data)
949{
950 struct lan78xx_net *dev = netdev_priv(netdev);
951
952 ee->magic = LAN78XX_EEPROM_MAGIC;
953
954 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955}
956
957static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958 struct ethtool_eeprom *ee, u8 *data)
959{
960 struct lan78xx_net *dev = netdev_priv(netdev);
961
962 /* Allow entire eeprom update only */
963 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964 (ee->offset == 0) &&
965 (ee->len == 512) &&
966 (data[0] == EEPROM_INDICATOR))
967 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969 (ee->offset == 0) &&
970 (ee->len == 512) &&
971 (data[0] == OTP_INDICATOR_1))
972 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973
974 return -EINVAL;
975}
976
977static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978 u8 *data)
979{
980 if (stringset == ETH_SS_STATS)
981 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982}
983
984static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985{
986 if (sset == ETH_SS_STATS)
987 return ARRAY_SIZE(lan78xx_gstrings);
988 else
989 return -EOPNOTSUPP;
990}
991
992static void lan78xx_get_stats(struct net_device *netdev,
993 struct ethtool_stats *stats, u64 *data)
994{
995 struct lan78xx_net *dev = netdev_priv(netdev);
996 struct lan78xx_statstage lan78xx_stat;
997 u32 *p;
998 int i;
999
1000 if (usb_autopm_get_interface(dev->intf) < 0)
1001 return;
1002
1003 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004 p = (u32 *)&lan78xx_stat;
1005 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006 data[i] = p[i];
1007 }
1008
1009 usb_autopm_put_interface(dev->intf);
1010}
1011
1012static void lan78xx_get_wol(struct net_device *netdev,
1013 struct ethtool_wolinfo *wol)
1014{
1015 struct lan78xx_net *dev = netdev_priv(netdev);
1016 int ret;
1017 u32 buf;
1018 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019
1020 if (usb_autopm_get_interface(dev->intf) < 0)
1021 return;
1022
1023 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024 if (unlikely(ret < 0)) {
1025 wol->supported = 0;
1026 wol->wolopts = 0;
1027 } else {
1028 if (buf & USB_CFG_RMT_WKP_) {
1029 wol->supported = WAKE_ALL;
1030 wol->wolopts = pdata->wol;
1031 } else {
1032 wol->supported = 0;
1033 wol->wolopts = 0;
1034 }
1035 }
1036
1037 usb_autopm_put_interface(dev->intf);
1038}
1039
1040static int lan78xx_set_wol(struct net_device *netdev,
1041 struct ethtool_wolinfo *wol)
1042{
1043 struct lan78xx_net *dev = netdev_priv(netdev);
1044 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045 int ret;
1046
1047 ret = usb_autopm_get_interface(dev->intf);
1048 if (ret < 0)
1049 return ret;
1050
1051 pdata->wol = 0;
1052 if (wol->wolopts & WAKE_UCAST)
1053 pdata->wol |= WAKE_UCAST;
1054 if (wol->wolopts & WAKE_MCAST)
1055 pdata->wol |= WAKE_MCAST;
1056 if (wol->wolopts & WAKE_BCAST)
1057 pdata->wol |= WAKE_BCAST;
1058 if (wol->wolopts & WAKE_MAGIC)
1059 pdata->wol |= WAKE_MAGIC;
1060 if (wol->wolopts & WAKE_PHY)
1061 pdata->wol |= WAKE_PHY;
1062 if (wol->wolopts & WAKE_ARP)
1063 pdata->wol |= WAKE_ARP;
1064
1065 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1066
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001067 phy_ethtool_set_wol(netdev->phydev, wol);
1068
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001069 usb_autopm_put_interface(dev->intf);
1070
1071 return ret;
1072}
1073
1074static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1075{
1076 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001077 struct phy_device *phydev = net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001078 int ret;
1079 u32 buf;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001080
1081 ret = usb_autopm_get_interface(dev->intf);
1082 if (ret < 0)
1083 return ret;
1084
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001085 ret = phy_ethtool_get_eee(phydev, edata);
1086 if (ret < 0)
1087 goto exit;
1088
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001089 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1090 if (buf & MAC_CR_EEE_EN_) {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001091 edata->eee_enabled = true;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001092 edata->eee_active = !!(edata->advertised &
1093 edata->lp_advertised);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001094 edata->tx_lpi_enabled = true;
1095 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1096 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1097 edata->tx_lpi_timer = buf;
1098 } else {
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001099 edata->eee_enabled = false;
1100 edata->eee_active = false;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001101 edata->tx_lpi_enabled = false;
1102 edata->tx_lpi_timer = 0;
1103 }
1104
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001105 ret = 0;
1106exit:
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001107 usb_autopm_put_interface(dev->intf);
1108
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001109 return ret;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001110}
1111
1112static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1113{
1114 struct lan78xx_net *dev = netdev_priv(net);
1115 int ret;
1116 u32 buf;
1117
1118 ret = usb_autopm_get_interface(dev->intf);
1119 if (ret < 0)
1120 return ret;
1121
1122 if (edata->eee_enabled) {
1123 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124 buf |= MAC_CR_EEE_EN_;
1125 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001127 phy_ethtool_set_eee(net->phydev, edata);
1128
1129 buf = (u32)edata->tx_lpi_timer;
1130 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001131 } else {
1132 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1133 buf &= ~MAC_CR_EEE_EN_;
1134 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1135 }
1136
1137 usb_autopm_put_interface(dev->intf);
1138
1139 return 0;
1140}
1141
1142static u32 lan78xx_get_link(struct net_device *net)
1143{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001144 phy_read_status(net->phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001145
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001146 return net->phydev->link;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001147}
1148
1149int lan78xx_nway_reset(struct net_device *net)
1150{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001151 return phy_start_aneg(net->phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001152}
1153
1154static void lan78xx_get_drvinfo(struct net_device *net,
1155 struct ethtool_drvinfo *info)
1156{
1157 struct lan78xx_net *dev = netdev_priv(net);
1158
1159 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1160 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1161 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1162}
1163
1164static u32 lan78xx_get_msglevel(struct net_device *net)
1165{
1166 struct lan78xx_net *dev = netdev_priv(net);
1167
1168 return dev->msg_enable;
1169}
1170
1171static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1172{
1173 struct lan78xx_net *dev = netdev_priv(net);
1174
1175 dev->msg_enable = level;
1176}
1177
1178static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1179{
1180 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001181 struct phy_device *phydev = net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001182 int ret;
1183 int buf;
1184
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001185 ret = usb_autopm_get_interface(dev->intf);
1186 if (ret < 0)
1187 return ret;
1188
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001189 ret = phy_ethtool_gset(phydev, cmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001190
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001191 phy_write(phydev, PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1192 buf = phy_read(phydev, PHY_EXT_MODE_CTRL);
1193 phy_write(phydev, PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001194
1195 buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
1196 if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
1197 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1198 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1199 } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
1200 cmd->eth_tp_mdix = ETH_TP_MDI;
1201 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1202 } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
1203 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1204 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1205 }
1206
1207 usb_autopm_put_interface(dev->intf);
1208
1209 return ret;
1210}
1211
1212static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1213{
1214 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001215 struct phy_device *phydev = net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001216 int ret = 0;
1217 int temp;
1218
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001219 ret = usb_autopm_get_interface(dev->intf);
1220 if (ret < 0)
1221 return ret;
1222
1223 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1224 if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001225 phy_write(phydev, PHY_EXT_GPIO_PAGE,
1226 PHY_EXT_GPIO_PAGE_SPACE_1);
1227 temp = phy_read(phydev, PHY_EXT_MODE_CTRL);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001228 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001229 phy_write(phydev, PHY_EXT_MODE_CTRL,
1230 temp | PHY_EXT_MODE_CTRL_MDI_);
1231 phy_write(phydev, PHY_EXT_GPIO_PAGE,
1232 PHY_EXT_GPIO_PAGE_SPACE_0);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001233 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001234 phy_write(phydev, PHY_EXT_GPIO_PAGE,
1235 PHY_EXT_GPIO_PAGE_SPACE_1);
1236 temp = phy_read(phydev, PHY_EXT_MODE_CTRL);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001237 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001238 phy_write(phydev, PHY_EXT_MODE_CTRL,
1239 temp | PHY_EXT_MODE_CTRL_MDI_X_);
1240 phy_write(phydev, PHY_EXT_GPIO_PAGE,
1241 PHY_EXT_GPIO_PAGE_SPACE_0);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001242 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001243 phy_write(phydev, PHY_EXT_GPIO_PAGE,
1244 PHY_EXT_GPIO_PAGE_SPACE_1);
1245 temp = phy_read(phydev, PHY_EXT_MODE_CTRL);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001246 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001247 phy_write(phydev, PHY_EXT_MODE_CTRL,
1248 temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1249 phy_write(phydev, PHY_EXT_GPIO_PAGE,
1250 PHY_EXT_GPIO_PAGE_SPACE_0);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001251 }
1252 }
1253
1254 /* change speed & duplex */
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001255 ret = phy_ethtool_sset(phydev, cmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001256
1257 if (!cmd->autoneg) {
1258 /* force link down */
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001259 temp = phy_read(phydev, MII_BMCR);
1260 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001261 mdelay(1);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001262 phy_write(phydev, MII_BMCR, temp);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001263 }
1264
1265 usb_autopm_put_interface(dev->intf);
1266
1267 return ret;
1268}
1269
1270static const struct ethtool_ops lan78xx_ethtool_ops = {
1271 .get_link = lan78xx_get_link,
1272 .nway_reset = lan78xx_nway_reset,
1273 .get_drvinfo = lan78xx_get_drvinfo,
1274 .get_msglevel = lan78xx_get_msglevel,
1275 .set_msglevel = lan78xx_set_msglevel,
1276 .get_settings = lan78xx_get_settings,
1277 .set_settings = lan78xx_set_settings,
1278 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1279 .get_eeprom = lan78xx_ethtool_get_eeprom,
1280 .set_eeprom = lan78xx_ethtool_set_eeprom,
1281 .get_ethtool_stats = lan78xx_get_stats,
1282 .get_sset_count = lan78xx_get_sset_count,
1283 .get_strings = lan78xx_get_strings,
1284 .get_wol = lan78xx_get_wol,
1285 .set_wol = lan78xx_set_wol,
1286 .get_eee = lan78xx_get_eee,
1287 .set_eee = lan78xx_set_eee,
1288};
1289
1290static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1291{
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001292 if (!netif_running(netdev))
1293 return -EINVAL;
1294
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001295 return phy_mii_ioctl(netdev->phydev, rq, cmd);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001296}
1297
1298static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1299{
1300 u32 addr_lo, addr_hi;
1301 int ret;
1302 u8 addr[6];
1303
1304 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1305 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1306
1307 addr[0] = addr_lo & 0xFF;
1308 addr[1] = (addr_lo >> 8) & 0xFF;
1309 addr[2] = (addr_lo >> 16) & 0xFF;
1310 addr[3] = (addr_lo >> 24) & 0xFF;
1311 addr[4] = addr_hi & 0xFF;
1312 addr[5] = (addr_hi >> 8) & 0xFF;
1313
1314 if (!is_valid_ether_addr(addr)) {
1315 /* reading mac address from EEPROM or OTP */
1316 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1317 addr) == 0) ||
1318 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1319 addr) == 0)) {
1320 if (is_valid_ether_addr(addr)) {
1321 /* eeprom values are valid so use them */
1322 netif_dbg(dev, ifup, dev->net,
1323 "MAC address read from EEPROM");
1324 } else {
1325 /* generate random MAC */
1326 random_ether_addr(addr);
1327 netif_dbg(dev, ifup, dev->net,
1328 "MAC address set to random addr");
1329 }
1330
1331 addr_lo = addr[0] | (addr[1] << 8) |
1332 (addr[2] << 16) | (addr[3] << 24);
1333 addr_hi = addr[4] | (addr[5] << 8);
1334
1335 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1336 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1337 } else {
1338 /* generate random MAC */
1339 random_ether_addr(addr);
1340 netif_dbg(dev, ifup, dev->net,
1341 "MAC address set to random addr");
1342 }
1343 }
1344
1345 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1346 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1347
1348 ether_addr_copy(dev->net->dev_addr, addr);
1349}
1350
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001351/* MDIO read and write wrappers for phylib */
1352static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001353{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001354 struct lan78xx_net *dev = bus->priv;
1355 u32 val, addr;
1356 int ret;
1357
1358 ret = usb_autopm_get_interface(dev->intf);
1359 if (ret < 0)
1360 return ret;
1361
1362 mutex_lock(&dev->phy_mutex);
1363
1364 /* confirm MII not busy */
1365 ret = lan78xx_phy_wait_not_busy(dev);
1366 if (ret < 0)
1367 goto done;
1368
1369 /* set the address, index & direction (read from PHY) */
1370 addr = mii_access(phy_id, idx, MII_READ);
1371 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1372
1373 ret = lan78xx_phy_wait_not_busy(dev);
1374 if (ret < 0)
1375 goto done;
1376
1377 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1378
1379 ret = (int)(val & 0xFFFF);
1380
1381done:
1382 mutex_unlock(&dev->phy_mutex);
1383 usb_autopm_put_interface(dev->intf);
1384 return ret;
1385}
1386
1387static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1388 u16 regval)
1389{
1390 struct lan78xx_net *dev = bus->priv;
1391 u32 val, addr;
1392 int ret;
1393
1394 ret = usb_autopm_get_interface(dev->intf);
1395 if (ret < 0)
1396 return ret;
1397
1398 mutex_lock(&dev->phy_mutex);
1399
1400 /* confirm MII not busy */
1401 ret = lan78xx_phy_wait_not_busy(dev);
1402 if (ret < 0)
1403 goto done;
1404
1405 val = (u32)regval;
1406 ret = lan78xx_write_reg(dev, MII_DATA, val);
1407
1408 /* set the address, index & direction (write to PHY) */
1409 addr = mii_access(phy_id, idx, MII_WRITE);
1410 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1411
1412 ret = lan78xx_phy_wait_not_busy(dev);
1413 if (ret < 0)
1414 goto done;
1415
1416done:
1417 mutex_unlock(&dev->phy_mutex);
1418 usb_autopm_put_interface(dev->intf);
1419 return 0;
1420}
1421
1422static int lan78xx_mdio_init(struct lan78xx_net *dev)
1423{
1424 int ret;
1425 int i;
1426
1427 dev->mdiobus = mdiobus_alloc();
1428 if (!dev->mdiobus) {
1429 netdev_err(dev->net, "can't allocate MDIO bus\n");
1430 return -ENOMEM;
1431 }
1432
1433 dev->mdiobus->priv = (void *)dev;
1434 dev->mdiobus->read = lan78xx_mdiobus_read;
1435 dev->mdiobus->write = lan78xx_mdiobus_write;
1436 dev->mdiobus->name = "lan78xx-mdiobus";
1437
1438 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1439 dev->udev->bus->busnum, dev->udev->devnum);
1440
1441 dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1442 if (!dev->mdiobus->irq) {
1443 ret = -ENOMEM;
1444 goto exit1;
1445 }
1446
1447 /* handle our own interrupt */
1448 for (i = 0; i < PHY_MAX_ADDR; i++)
1449 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1450
1451 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1452 case 0x78000000:
1453 case 0x78500000:
1454 /* set to internal PHY id */
1455 dev->mdiobus->phy_mask = ~(1 << 1);
1456 break;
1457 }
1458
1459 ret = mdiobus_register(dev->mdiobus);
1460 if (ret) {
1461 netdev_err(dev->net, "can't register MDIO bus\n");
1462 goto exit2;
1463 }
1464
1465 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1466 return 0;
1467exit2:
1468 kfree(dev->mdiobus->irq);
1469exit1:
1470 mdiobus_free(dev->mdiobus);
1471 return ret;
1472}
1473
1474static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1475{
1476 mdiobus_unregister(dev->mdiobus);
1477 kfree(dev->mdiobus->irq);
1478 mdiobus_free(dev->mdiobus);
1479}
1480
1481static void lan78xx_link_status_change(struct net_device *net)
1482{
1483 /* nothing to do */
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001484}
1485
1486static int lan78xx_phy_init(struct lan78xx_net *dev)
1487{
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001488 int ret;
1489 struct phy_device *phydev = dev->net->phydev;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001490
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001491 phydev = phy_find_first(dev->mdiobus);
1492 if (!phydev) {
1493 netdev_err(dev->net, "no PHY found\n");
1494 return -EIO;
1495 }
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001496
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001497 ret = phy_connect_direct(dev->net, phydev,
1498 lan78xx_link_status_change,
1499 PHY_INTERFACE_MODE_GMII);
1500 if (ret) {
1501 netdev_err(dev->net, "can't attach PHY to %s\n",
1502 dev->mdiobus->id);
1503 return -EIO;
1504 }
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001505
1506 /* set to AUTOMDIX */
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001507 phy_write(phydev, PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1508 ret = phy_read(phydev, PHY_EXT_MODE_CTRL);
1509 ret &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1510 phy_write(phydev, PHY_EXT_MODE_CTRL,
1511 ret | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1512 phy_write(phydev, PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001513 dev->mdix_ctrl = ETH_TP_MDI_AUTO;
1514
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001515 /* MAC doesn't support 1000T Half */
1516 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1517 phydev->supported |= (SUPPORTED_10baseT_Half |
1518 SUPPORTED_10baseT_Full |
1519 SUPPORTED_100baseT_Half |
1520 SUPPORTED_100baseT_Full |
1521 SUPPORTED_1000baseT_Full |
1522 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1523 genphy_config_aneg(phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001524
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001525 /* Workaround to enable PHY interrupt.
1526 * phy_start_interrupts() is API for requesting and enabling
1527 * PHY interrupt. However, USB-to-Ethernet device can't use
1528 * request_irq() called in phy_start_interrupts().
1529 * Set PHY to PHY_HALTED and call phy_start()
1530 * to make a call to phy_enable_interrupts()
1531 */
1532 phy_stop(phydev);
1533 phy_start(phydev);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001534
1535 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1536
1537 return 0;
1538}
1539
1540static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1541{
1542 int ret = 0;
1543 u32 buf;
1544 bool rxenabled;
1545
1546 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1547
1548 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1549
1550 if (rxenabled) {
1551 buf &= ~MAC_RX_RXEN_;
1552 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1553 }
1554
1555 /* add 4 to size for FCS */
1556 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1557 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1558
1559 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1560
1561 if (rxenabled) {
1562 buf |= MAC_RX_RXEN_;
1563 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1564 }
1565
1566 return 0;
1567}
1568
1569static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1570{
1571 struct sk_buff *skb;
1572 unsigned long flags;
1573 int count = 0;
1574
1575 spin_lock_irqsave(&q->lock, flags);
1576 while (!skb_queue_empty(q)) {
1577 struct skb_data *entry;
1578 struct urb *urb;
1579 int ret;
1580
1581 skb_queue_walk(q, skb) {
1582 entry = (struct skb_data *)skb->cb;
1583 if (entry->state != unlink_start)
1584 goto found;
1585 }
1586 break;
1587found:
1588 entry->state = unlink_start;
1589 urb = entry->urb;
1590
1591 /* Get reference count of the URB to avoid it to be
1592 * freed during usb_unlink_urb, which may trigger
1593 * use-after-free problem inside usb_unlink_urb since
1594 * usb_unlink_urb is always racing with .complete
1595 * handler(include defer_bh).
1596 */
1597 usb_get_urb(urb);
1598 spin_unlock_irqrestore(&q->lock, flags);
1599 /* during some PM-driven resume scenarios,
1600 * these (async) unlinks complete immediately
1601 */
1602 ret = usb_unlink_urb(urb);
1603 if (ret != -EINPROGRESS && ret != 0)
1604 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1605 else
1606 count++;
1607 usb_put_urb(urb);
1608 spin_lock_irqsave(&q->lock, flags);
1609 }
1610 spin_unlock_irqrestore(&q->lock, flags);
1611 return count;
1612}
1613
1614static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1615{
1616 struct lan78xx_net *dev = netdev_priv(netdev);
1617 int ll_mtu = new_mtu + netdev->hard_header_len;
1618 int old_hard_mtu = dev->hard_mtu;
1619 int old_rx_urb_size = dev->rx_urb_size;
1620 int ret;
1621
1622 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1623 return -EINVAL;
1624
1625 if (new_mtu <= 0)
1626 return -EINVAL;
1627 /* no second zero-length packet read wanted after mtu-sized packets */
1628 if ((ll_mtu % dev->maxpacket) == 0)
1629 return -EDOM;
1630
1631 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1632
1633 netdev->mtu = new_mtu;
1634
1635 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1636 if (dev->rx_urb_size == old_hard_mtu) {
1637 dev->rx_urb_size = dev->hard_mtu;
1638 if (dev->rx_urb_size > old_rx_urb_size) {
1639 if (netif_running(dev->net)) {
1640 unlink_urbs(dev, &dev->rxq);
1641 tasklet_schedule(&dev->bh);
1642 }
1643 }
1644 }
1645
1646 return 0;
1647}
1648
1649int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1650{
1651 struct lan78xx_net *dev = netdev_priv(netdev);
1652 struct sockaddr *addr = p;
1653 u32 addr_lo, addr_hi;
1654 int ret;
1655
1656 if (netif_running(netdev))
1657 return -EBUSY;
1658
1659 if (!is_valid_ether_addr(addr->sa_data))
1660 return -EADDRNOTAVAIL;
1661
1662 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1663
1664 addr_lo = netdev->dev_addr[0] |
1665 netdev->dev_addr[1] << 8 |
1666 netdev->dev_addr[2] << 16 |
1667 netdev->dev_addr[3] << 24;
1668 addr_hi = netdev->dev_addr[4] |
1669 netdev->dev_addr[5] << 8;
1670
1671 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1672 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1673
1674 return 0;
1675}
1676
1677/* Enable or disable Rx checksum offload engine */
1678static int lan78xx_set_features(struct net_device *netdev,
1679 netdev_features_t features)
1680{
1681 struct lan78xx_net *dev = netdev_priv(netdev);
1682 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1683 unsigned long flags;
1684 int ret;
1685
1686 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1687
1688 if (features & NETIF_F_RXCSUM) {
1689 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1690 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1691 } else {
1692 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1693 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1694 }
1695
1696 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1697 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1698 else
1699 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1700
1701 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1702
1703 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1704
1705 return 0;
1706}
1707
1708static void lan78xx_deferred_vlan_write(struct work_struct *param)
1709{
1710 struct lan78xx_priv *pdata =
1711 container_of(param, struct lan78xx_priv, set_vlan);
1712 struct lan78xx_net *dev = pdata->dev;
1713
1714 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1715 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1716}
1717
1718static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1719 __be16 proto, u16 vid)
1720{
1721 struct lan78xx_net *dev = netdev_priv(netdev);
1722 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1723 u16 vid_bit_index;
1724 u16 vid_dword_index;
1725
1726 vid_dword_index = (vid >> 5) & 0x7F;
1727 vid_bit_index = vid & 0x1F;
1728
1729 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1730
1731 /* defer register writes to a sleepable context */
1732 schedule_work(&pdata->set_vlan);
1733
1734 return 0;
1735}
1736
1737static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1738 __be16 proto, u16 vid)
1739{
1740 struct lan78xx_net *dev = netdev_priv(netdev);
1741 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1742 u16 vid_bit_index;
1743 u16 vid_dword_index;
1744
1745 vid_dword_index = (vid >> 5) & 0x7F;
1746 vid_bit_index = vid & 0x1F;
1747
1748 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1749
1750 /* defer register writes to a sleepable context */
1751 schedule_work(&pdata->set_vlan);
1752
1753 return 0;
1754}
1755
1756static void lan78xx_init_ltm(struct lan78xx_net *dev)
1757{
1758 int ret;
1759 u32 buf;
1760 u32 regs[6] = { 0 };
1761
1762 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1763 if (buf & USB_CFG1_LTM_ENABLE_) {
1764 u8 temp[2];
1765 /* Get values from EEPROM first */
1766 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1767 if (temp[0] == 24) {
1768 ret = lan78xx_read_raw_eeprom(dev,
1769 temp[1] * 2,
1770 24,
1771 (u8 *)regs);
1772 if (ret < 0)
1773 return;
1774 }
1775 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1776 if (temp[0] == 24) {
1777 ret = lan78xx_read_raw_otp(dev,
1778 temp[1] * 2,
1779 24,
1780 (u8 *)regs);
1781 if (ret < 0)
1782 return;
1783 }
1784 }
1785 }
1786
1787 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1788 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1789 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1790 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1791 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1792 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1793}
1794
1795static int lan78xx_reset(struct lan78xx_net *dev)
1796{
1797 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1798 u32 buf;
1799 int ret = 0;
1800 unsigned long timeout;
1801
1802 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1803 buf |= HW_CFG_LRST_;
1804 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1805
1806 timeout = jiffies + HZ;
1807 do {
1808 mdelay(1);
1809 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1810 if (time_after(jiffies, timeout)) {
1811 netdev_warn(dev->net,
1812 "timeout on completion of LiteReset");
1813 return -EIO;
1814 }
1815 } while (buf & HW_CFG_LRST_);
1816
1817 lan78xx_init_mac_address(dev);
1818
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001819 /* save DEVID for later usage */
1820 ret = lan78xx_read_reg(dev, ID_REV, &buf);
1821 dev->devid = buf;
1822
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001823 /* Respond to the IN token with a NAK */
1824 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1825 buf |= USB_CFG_BIR_;
1826 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1827
1828 /* Init LTM */
1829 lan78xx_init_ltm(dev);
1830
1831 dev->net->hard_header_len += TX_OVERHEAD;
1832 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1833
1834 if (dev->udev->speed == USB_SPEED_SUPER) {
1835 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1836 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1837 dev->rx_qlen = 4;
1838 dev->tx_qlen = 4;
1839 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1840 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1841 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1842 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1843 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1844 } else {
1845 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1846 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1847 dev->rx_qlen = 4;
1848 }
1849
1850 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1851 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1852
1853 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1854 buf |= HW_CFG_MEF_;
1855 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1856
1857 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1858 buf |= USB_CFG_BCE_;
1859 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1860
1861 /* set FIFO sizes */
1862 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1863 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1864
1865 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1866 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1867
1868 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1869 ret = lan78xx_write_reg(dev, FLOW, 0);
1870 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1871
1872 /* Don't need rfe_ctl_lock during initialisation */
1873 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1874 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1875 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1876
1877 /* Enable or disable checksum offload engines */
1878 lan78xx_set_features(dev->net, dev->net->features);
1879
1880 lan78xx_set_multicast(dev->net);
1881
1882 /* reset PHY */
1883 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1884 buf |= PMT_CTL_PHY_RST_;
1885 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1886
1887 timeout = jiffies + HZ;
1888 do {
1889 mdelay(1);
1890 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1891 if (time_after(jiffies, timeout)) {
1892 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1893 return -EIO;
1894 }
Woojung.Huh@microchip.com6c595b02015-09-16 23:40:39 +00001895 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001896
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001897 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1898
1899 buf |= MAC_CR_GMII_EN_;
1900 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1901
1902 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1903
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001904 /* enable PHY interrupts */
1905 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1906 buf |= INT_ENP_PHY_INT;
1907 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1908
1909 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1910 buf |= MAC_TX_TXEN_;
1911 ret = lan78xx_write_reg(dev, MAC_TX, buf);
1912
1913 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1914 buf |= FCT_TX_CTL_EN_;
1915 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1916
1917 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1918
1919 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1920 buf |= MAC_RX_RXEN_;
1921 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1922
1923 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1924 buf |= FCT_RX_CTL_EN_;
1925 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1926
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001927 return 0;
1928}
1929
1930static int lan78xx_open(struct net_device *net)
1931{
1932 struct lan78xx_net *dev = netdev_priv(net);
1933 int ret;
1934
1935 ret = usb_autopm_get_interface(dev->intf);
1936 if (ret < 0)
1937 goto out;
1938
1939 ret = lan78xx_reset(dev);
1940 if (ret < 0)
1941 goto done;
1942
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00001943 ret = lan78xx_phy_init(dev);
1944 if (ret < 0)
1945 goto done;
1946
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00001947 /* for Link Check */
1948 if (dev->urb_intr) {
1949 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1950 if (ret < 0) {
1951 netif_err(dev, ifup, dev->net,
1952 "intr submit %d\n", ret);
1953 goto done;
1954 }
1955 }
1956
1957 set_bit(EVENT_DEV_OPEN, &dev->flags);
1958
1959 netif_start_queue(net);
1960
1961 dev->link_on = false;
1962
1963 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1964done:
1965 usb_autopm_put_interface(dev->intf);
1966
1967out:
1968 return ret;
1969}
1970
1971static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1972{
1973 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1974 DECLARE_WAITQUEUE(wait, current);
1975 int temp;
1976
1977 /* ensure there are no more active urbs */
1978 add_wait_queue(&unlink_wakeup, &wait);
1979 set_current_state(TASK_UNINTERRUPTIBLE);
1980 dev->wait = &unlink_wakeup;
1981 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1982
1983 /* maybe wait for deletions to finish. */
1984 while (!skb_queue_empty(&dev->rxq) &&
1985 !skb_queue_empty(&dev->txq) &&
1986 !skb_queue_empty(&dev->done)) {
1987 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1988 set_current_state(TASK_UNINTERRUPTIBLE);
1989 netif_dbg(dev, ifdown, dev->net,
1990 "waited for %d urb completions\n", temp);
1991 }
1992 set_current_state(TASK_RUNNING);
1993 dev->wait = NULL;
1994 remove_wait_queue(&unlink_wakeup, &wait);
1995}
1996
1997int lan78xx_stop(struct net_device *net)
1998{
1999 struct lan78xx_net *dev = netdev_priv(net);
2000
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00002001 phy_stop(net->phydev);
2002 phy_disconnect(net->phydev);
2003 net->phydev = NULL;
2004
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002005 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2006 netif_stop_queue(net);
2007
2008 netif_info(dev, ifdown, dev->net,
2009 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2010 net->stats.rx_packets, net->stats.tx_packets,
2011 net->stats.rx_errors, net->stats.tx_errors);
2012
2013 lan78xx_terminate_urbs(dev);
2014
2015 usb_kill_urb(dev->urb_intr);
2016
2017 skb_queue_purge(&dev->rxq_pause);
2018
2019 /* deferred work (task, timer, softirq) must also stop.
2020 * can't flush_scheduled_work() until we drop rtnl (later),
2021 * else workers could deadlock; so make workers a NOP.
2022 */
2023 dev->flags = 0;
2024 cancel_delayed_work_sync(&dev->wq);
2025 tasklet_kill(&dev->bh);
2026
2027 usb_autopm_put_interface(dev->intf);
2028
2029 return 0;
2030}
2031
2032static int lan78xx_linearize(struct sk_buff *skb)
2033{
2034 return skb_linearize(skb);
2035}
2036
2037static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2038 struct sk_buff *skb, gfp_t flags)
2039{
2040 u32 tx_cmd_a, tx_cmd_b;
2041
2042 if (skb_headroom(skb) < TX_OVERHEAD) {
2043 struct sk_buff *skb2;
2044
2045 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2046 dev_kfree_skb_any(skb);
2047 skb = skb2;
2048 if (!skb)
2049 return NULL;
2050 }
2051
2052 if (lan78xx_linearize(skb) < 0)
2053 return NULL;
2054
2055 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2056
2057 if (skb->ip_summed == CHECKSUM_PARTIAL)
2058 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2059
2060 tx_cmd_b = 0;
2061 if (skb_is_gso(skb)) {
2062 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2063
2064 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2065
2066 tx_cmd_a |= TX_CMD_A_LSO_;
2067 }
2068
2069 if (skb_vlan_tag_present(skb)) {
2070 tx_cmd_a |= TX_CMD_A_IVTG_;
2071 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2072 }
2073
2074 skb_push(skb, 4);
2075 cpu_to_le32s(&tx_cmd_b);
2076 memcpy(skb->data, &tx_cmd_b, 4);
2077
2078 skb_push(skb, 4);
2079 cpu_to_le32s(&tx_cmd_a);
2080 memcpy(skb->data, &tx_cmd_a, 4);
2081
2082 return skb;
2083}
2084
2085static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2086 struct sk_buff_head *list, enum skb_state state)
2087{
2088 unsigned long flags;
2089 enum skb_state old_state;
2090 struct skb_data *entry = (struct skb_data *)skb->cb;
2091
2092 spin_lock_irqsave(&list->lock, flags);
2093 old_state = entry->state;
2094 entry->state = state;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002095
2096 __skb_unlink(skb, list);
2097 spin_unlock(&list->lock);
2098 spin_lock(&dev->done.lock);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002099
2100 __skb_queue_tail(&dev->done, skb);
2101 if (skb_queue_len(&dev->done) == 1)
2102 tasklet_schedule(&dev->bh);
2103 spin_unlock_irqrestore(&dev->done.lock, flags);
2104
2105 return old_state;
2106}
2107
2108static void tx_complete(struct urb *urb)
2109{
2110 struct sk_buff *skb = (struct sk_buff *)urb->context;
2111 struct skb_data *entry = (struct skb_data *)skb->cb;
2112 struct lan78xx_net *dev = entry->dev;
2113
2114 if (urb->status == 0) {
2115 dev->net->stats.tx_packets++;
2116 dev->net->stats.tx_bytes += entry->length;
2117 } else {
2118 dev->net->stats.tx_errors++;
2119
2120 switch (urb->status) {
2121 case -EPIPE:
2122 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2123 break;
2124
2125 /* software-driven interface shutdown */
2126 case -ECONNRESET:
2127 case -ESHUTDOWN:
2128 break;
2129
2130 case -EPROTO:
2131 case -ETIME:
2132 case -EILSEQ:
2133 netif_stop_queue(dev->net);
2134 break;
2135 default:
2136 netif_dbg(dev, tx_err, dev->net,
2137 "tx err %d\n", entry->urb->status);
2138 break;
2139 }
2140 }
2141
2142 usb_autopm_put_interface_async(dev->intf);
2143
Woojung.Huh@microchip.com81c38e82015-08-11 15:21:41 +00002144 defer_bh(dev, skb, &dev->txq, tx_done);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002145}
2146
2147static void lan78xx_queue_skb(struct sk_buff_head *list,
2148 struct sk_buff *newsk, enum skb_state state)
2149{
2150 struct skb_data *entry = (struct skb_data *)newsk->cb;
2151
2152 __skb_queue_tail(list, newsk);
2153 entry->state = state;
2154}
2155
2156netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2157{
2158 struct lan78xx_net *dev = netdev_priv(net);
Woojung.Huh@microchip.com81c38e82015-08-11 15:21:41 +00002159 struct sk_buff *skb2 = NULL;
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002160
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002161 if (skb) {
Woojung.Huh@microchip.com81c38e82015-08-11 15:21:41 +00002162 skb_tx_timestamp(skb);
2163 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2164 }
2165
2166 if (skb2) {
2167 skb_queue_tail(&dev->txq_pend, skb2);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002168
2169 if (skb_queue_len(&dev->txq_pend) > 10)
2170 netif_stop_queue(net);
2171 } else {
2172 netif_dbg(dev, tx_err, dev->net,
2173 "lan78xx_tx_prep return NULL\n");
2174 dev->net->stats.tx_errors++;
2175 dev->net->stats.tx_dropped++;
2176 }
2177
2178 tasklet_schedule(&dev->bh);
2179
2180 return NETDEV_TX_OK;
2181}
2182
2183int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2184{
2185 int tmp;
2186 struct usb_host_interface *alt = NULL;
2187 struct usb_host_endpoint *in = NULL, *out = NULL;
2188 struct usb_host_endpoint *status = NULL;
2189
2190 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2191 unsigned ep;
2192
2193 in = NULL;
2194 out = NULL;
2195 status = NULL;
2196 alt = intf->altsetting + tmp;
2197
2198 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2199 struct usb_host_endpoint *e;
2200 int intr = 0;
2201
2202 e = alt->endpoint + ep;
2203 switch (e->desc.bmAttributes) {
2204 case USB_ENDPOINT_XFER_INT:
2205 if (!usb_endpoint_dir_in(&e->desc))
2206 continue;
2207 intr = 1;
2208 /* FALLTHROUGH */
2209 case USB_ENDPOINT_XFER_BULK:
2210 break;
2211 default:
2212 continue;
2213 }
2214 if (usb_endpoint_dir_in(&e->desc)) {
2215 if (!intr && !in)
2216 in = e;
2217 else if (intr && !status)
2218 status = e;
2219 } else {
2220 if (!out)
2221 out = e;
2222 }
2223 }
2224 if (in && out)
2225 break;
2226 }
2227 if (!alt || !in || !out)
2228 return -EINVAL;
2229
2230 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2231 in->desc.bEndpointAddress &
2232 USB_ENDPOINT_NUMBER_MASK);
2233 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2234 out->desc.bEndpointAddress &
2235 USB_ENDPOINT_NUMBER_MASK);
2236 dev->ep_intr = status;
2237
2238 return 0;
2239}
2240
2241static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2242{
2243 struct lan78xx_priv *pdata = NULL;
2244 int ret;
2245 int i;
2246
2247 ret = lan78xx_get_endpoints(dev, intf);
2248
2249 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2250
2251 pdata = (struct lan78xx_priv *)(dev->data[0]);
2252 if (!pdata) {
2253 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2254 return -ENOMEM;
2255 }
2256
2257 pdata->dev = dev;
2258
2259 spin_lock_init(&pdata->rfe_ctl_lock);
2260 mutex_init(&pdata->dataport_mutex);
2261
2262 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2263
2264 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2265 pdata->vlan_table[i] = 0;
2266
2267 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2268
2269 dev->net->features = 0;
2270
2271 if (DEFAULT_TX_CSUM_ENABLE)
2272 dev->net->features |= NETIF_F_HW_CSUM;
2273
2274 if (DEFAULT_RX_CSUM_ENABLE)
2275 dev->net->features |= NETIF_F_RXCSUM;
2276
2277 if (DEFAULT_TSO_CSUM_ENABLE)
2278 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2279
2280 dev->net->hw_features = dev->net->features;
2281
2282 /* Init all registers */
2283 ret = lan78xx_reset(dev);
2284
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00002285 lan78xx_mdio_init(dev);
2286
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002287 dev->net->flags |= IFF_MULTICAST;
2288
2289 pdata->wol = WAKE_MAGIC;
2290
2291 return 0;
2292}
2293
2294static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2295{
2296 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2297
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00002298 lan78xx_remove_mdio(dev);
2299
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002300 if (pdata) {
2301 netif_dbg(dev, ifdown, dev->net, "free pdata");
2302 kfree(pdata);
2303 pdata = NULL;
2304 dev->data[0] = 0;
2305 }
2306}
2307
2308static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2309 struct sk_buff *skb,
2310 u32 rx_cmd_a, u32 rx_cmd_b)
2311{
2312 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2313 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2314 skb->ip_summed = CHECKSUM_NONE;
2315 } else {
2316 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2317 skb->ip_summed = CHECKSUM_COMPLETE;
2318 }
2319}
2320
2321void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2322{
2323 int status;
2324
2325 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2326 skb_queue_tail(&dev->rxq_pause, skb);
2327 return;
2328 }
2329
2330 skb->protocol = eth_type_trans(skb, dev->net);
2331 dev->net->stats.rx_packets++;
2332 dev->net->stats.rx_bytes += skb->len;
2333
2334 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2335 skb->len + sizeof(struct ethhdr), skb->protocol);
2336 memset(skb->cb, 0, sizeof(struct skb_data));
2337
2338 if (skb_defer_rx_timestamp(skb))
2339 return;
2340
2341 status = netif_rx(skb);
2342 if (status != NET_RX_SUCCESS)
2343 netif_dbg(dev, rx_err, dev->net,
2344 "netif_rx status %d\n", status);
2345}
2346
2347static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2348{
2349 if (skb->len < dev->net->hard_header_len)
2350 return 0;
2351
2352 while (skb->len > 0) {
2353 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2354 u16 rx_cmd_c;
2355 struct sk_buff *skb2;
2356 unsigned char *packet;
2357
2358 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2359 le32_to_cpus(&rx_cmd_a);
2360 skb_pull(skb, sizeof(rx_cmd_a));
2361
2362 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2363 le32_to_cpus(&rx_cmd_b);
2364 skb_pull(skb, sizeof(rx_cmd_b));
2365
2366 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2367 le16_to_cpus(&rx_cmd_c);
2368 skb_pull(skb, sizeof(rx_cmd_c));
2369
2370 packet = skb->data;
2371
2372 /* get the packet length */
2373 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2374 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2375
2376 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2377 netif_dbg(dev, rx_err, dev->net,
2378 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2379 } else {
2380 /* last frame in this batch */
2381 if (skb->len == size) {
2382 lan78xx_rx_csum_offload(dev, skb,
2383 rx_cmd_a, rx_cmd_b);
2384
2385 skb_trim(skb, skb->len - 4); /* remove fcs */
2386 skb->truesize = size + sizeof(struct sk_buff);
2387
2388 return 1;
2389 }
2390
2391 skb2 = skb_clone(skb, GFP_ATOMIC);
2392 if (unlikely(!skb2)) {
2393 netdev_warn(dev->net, "Error allocating skb");
2394 return 0;
2395 }
2396
2397 skb2->len = size;
2398 skb2->data = packet;
2399 skb_set_tail_pointer(skb2, size);
2400
2401 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2402
2403 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2404 skb2->truesize = size + sizeof(struct sk_buff);
2405
2406 lan78xx_skb_return(dev, skb2);
2407 }
2408
2409 skb_pull(skb, size);
2410
2411 /* padding bytes before the next frame starts */
2412 if (skb->len)
2413 skb_pull(skb, align_count);
2414 }
2415
2416 if (unlikely(skb->len < 0)) {
2417 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2418 return 0;
2419 }
2420
2421 return 1;
2422}
2423
2424static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2425{
2426 if (!lan78xx_rx(dev, skb)) {
2427 dev->net->stats.rx_errors++;
2428 goto done;
2429 }
2430
2431 if (skb->len) {
2432 lan78xx_skb_return(dev, skb);
2433 return;
2434 }
2435
2436 netif_dbg(dev, rx_err, dev->net, "drop\n");
2437 dev->net->stats.rx_errors++;
2438done:
2439 skb_queue_tail(&dev->done, skb);
2440}
2441
2442static void rx_complete(struct urb *urb);
2443
2444static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2445{
2446 struct sk_buff *skb;
2447 struct skb_data *entry;
2448 unsigned long lockflags;
2449 size_t size = dev->rx_urb_size;
2450 int ret = 0;
2451
2452 skb = netdev_alloc_skb_ip_align(dev->net, size);
2453 if (!skb) {
2454 usb_free_urb(urb);
2455 return -ENOMEM;
2456 }
2457
2458 entry = (struct skb_data *)skb->cb;
2459 entry->urb = urb;
2460 entry->dev = dev;
2461 entry->length = 0;
2462
2463 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2464 skb->data, size, rx_complete, skb);
2465
2466 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2467
2468 if (netif_device_present(dev->net) &&
2469 netif_running(dev->net) &&
2470 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2471 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2472 ret = usb_submit_urb(urb, GFP_ATOMIC);
2473 switch (ret) {
2474 case 0:
2475 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2476 break;
2477 case -EPIPE:
2478 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2479 break;
2480 case -ENODEV:
2481 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2482 netif_device_detach(dev->net);
2483 break;
2484 case -EHOSTUNREACH:
2485 ret = -ENOLINK;
2486 break;
2487 default:
2488 netif_dbg(dev, rx_err, dev->net,
2489 "rx submit, %d\n", ret);
2490 tasklet_schedule(&dev->bh);
2491 }
2492 } else {
2493 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2494 ret = -ENOLINK;
2495 }
2496 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2497 if (ret) {
2498 dev_kfree_skb_any(skb);
2499 usb_free_urb(urb);
2500 }
2501 return ret;
2502}
2503
2504static void rx_complete(struct urb *urb)
2505{
2506 struct sk_buff *skb = (struct sk_buff *)urb->context;
2507 struct skb_data *entry = (struct skb_data *)skb->cb;
2508 struct lan78xx_net *dev = entry->dev;
2509 int urb_status = urb->status;
2510 enum skb_state state;
2511
2512 skb_put(skb, urb->actual_length);
2513 state = rx_done;
2514 entry->urb = NULL;
2515
2516 switch (urb_status) {
2517 case 0:
2518 if (skb->len < dev->net->hard_header_len) {
2519 state = rx_cleanup;
2520 dev->net->stats.rx_errors++;
2521 dev->net->stats.rx_length_errors++;
2522 netif_dbg(dev, rx_err, dev->net,
2523 "rx length %d\n", skb->len);
2524 }
2525 usb_mark_last_busy(dev->udev);
2526 break;
2527 case -EPIPE:
2528 dev->net->stats.rx_errors++;
2529 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2530 /* FALLTHROUGH */
2531 case -ECONNRESET: /* async unlink */
2532 case -ESHUTDOWN: /* hardware gone */
2533 netif_dbg(dev, ifdown, dev->net,
2534 "rx shutdown, code %d\n", urb_status);
2535 state = rx_cleanup;
2536 entry->urb = urb;
2537 urb = NULL;
2538 break;
2539 case -EPROTO:
2540 case -ETIME:
2541 case -EILSEQ:
2542 dev->net->stats.rx_errors++;
2543 state = rx_cleanup;
2544 entry->urb = urb;
2545 urb = NULL;
2546 break;
2547
2548 /* data overrun ... flush fifo? */
2549 case -EOVERFLOW:
2550 dev->net->stats.rx_over_errors++;
2551 /* FALLTHROUGH */
2552
2553 default:
2554 state = rx_cleanup;
2555 dev->net->stats.rx_errors++;
2556 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2557 break;
2558 }
2559
2560 state = defer_bh(dev, skb, &dev->rxq, state);
2561
2562 if (urb) {
2563 if (netif_running(dev->net) &&
2564 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2565 state != unlink_start) {
2566 rx_submit(dev, urb, GFP_ATOMIC);
2567 return;
2568 }
2569 usb_free_urb(urb);
2570 }
2571 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2572}
2573
2574static void lan78xx_tx_bh(struct lan78xx_net *dev)
2575{
2576 int length;
2577 struct urb *urb = NULL;
2578 struct skb_data *entry;
2579 unsigned long flags;
2580 struct sk_buff_head *tqp = &dev->txq_pend;
2581 struct sk_buff *skb, *skb2;
2582 int ret;
2583 int count, pos;
2584 int skb_totallen, pkt_cnt;
2585
2586 skb_totallen = 0;
2587 pkt_cnt = 0;
2588 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2589 if (skb_is_gso(skb)) {
2590 if (pkt_cnt) {
2591 /* handle previous packets first */
2592 break;
2593 }
2594 length = skb->len;
2595 skb2 = skb_dequeue(tqp);
2596 goto gso_skb;
2597 }
2598
2599 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2600 break;
2601 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2602 pkt_cnt++;
2603 }
2604
2605 /* copy to a single skb */
2606 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2607 if (!skb)
2608 goto drop;
2609
2610 skb_put(skb, skb_totallen);
2611
2612 for (count = pos = 0; count < pkt_cnt; count++) {
2613 skb2 = skb_dequeue(tqp);
2614 if (skb2) {
2615 memcpy(skb->data + pos, skb2->data, skb2->len);
2616 pos += roundup(skb2->len, sizeof(u32));
2617 dev_kfree_skb(skb2);
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002618 }
2619 }
2620
2621 length = skb_totallen;
2622
2623gso_skb:
2624 urb = usb_alloc_urb(0, GFP_ATOMIC);
2625 if (!urb) {
2626 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2627 goto drop;
2628 }
2629
2630 entry = (struct skb_data *)skb->cb;
2631 entry->urb = urb;
2632 entry->dev = dev;
2633 entry->length = length;
2634
2635 spin_lock_irqsave(&dev->txq.lock, flags);
2636 ret = usb_autopm_get_interface_async(dev->intf);
2637 if (ret < 0) {
2638 spin_unlock_irqrestore(&dev->txq.lock, flags);
2639 goto drop;
2640 }
2641
2642 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2643 skb->data, skb->len, tx_complete, skb);
2644
2645 if (length % dev->maxpacket == 0) {
2646 /* send USB_ZERO_PACKET */
2647 urb->transfer_flags |= URB_ZERO_PACKET;
2648 }
2649
2650#ifdef CONFIG_PM
2651 /* if this triggers the device is still a sleep */
2652 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2653 /* transmission will be done in resume */
2654 usb_anchor_urb(urb, &dev->deferred);
2655 /* no use to process more packets */
2656 netif_stop_queue(dev->net);
2657 usb_put_urb(urb);
2658 spin_unlock_irqrestore(&dev->txq.lock, flags);
2659 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2660 return;
2661 }
2662#endif
2663
2664 ret = usb_submit_urb(urb, GFP_ATOMIC);
2665 switch (ret) {
2666 case 0:
2667 dev->net->trans_start = jiffies;
2668 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2669 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2670 netif_stop_queue(dev->net);
2671 break;
2672 case -EPIPE:
2673 netif_stop_queue(dev->net);
2674 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2675 usb_autopm_put_interface_async(dev->intf);
2676 break;
2677 default:
2678 usb_autopm_put_interface_async(dev->intf);
2679 netif_dbg(dev, tx_err, dev->net,
2680 "tx: submit urb err %d\n", ret);
2681 break;
2682 }
2683
2684 spin_unlock_irqrestore(&dev->txq.lock, flags);
2685
2686 if (ret) {
2687 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2688drop:
2689 dev->net->stats.tx_dropped++;
2690 if (skb)
2691 dev_kfree_skb_any(skb);
2692 usb_free_urb(urb);
2693 } else
2694 netif_dbg(dev, tx_queued, dev->net,
2695 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2696}
2697
2698static void lan78xx_rx_bh(struct lan78xx_net *dev)
2699{
2700 struct urb *urb;
2701 int i;
2702
2703 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2704 for (i = 0; i < 10; i++) {
2705 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2706 break;
2707 urb = usb_alloc_urb(0, GFP_ATOMIC);
2708 if (urb)
2709 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2710 return;
2711 }
2712
2713 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2714 tasklet_schedule(&dev->bh);
2715 }
2716 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2717 netif_wake_queue(dev->net);
2718}
2719
2720static void lan78xx_bh(unsigned long param)
2721{
2722 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2723 struct sk_buff *skb;
2724 struct skb_data *entry;
2725
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002726 while ((skb = skb_dequeue(&dev->done))) {
2727 entry = (struct skb_data *)(skb->cb);
2728 switch (entry->state) {
2729 case rx_done:
2730 entry->state = rx_cleanup;
2731 rx_process(dev, skb);
2732 continue;
2733 case tx_done:
2734 usb_free_urb(entry->urb);
2735 dev_kfree_skb(skb);
2736 continue;
2737 case rx_cleanup:
2738 usb_free_urb(entry->urb);
2739 dev_kfree_skb(skb);
2740 continue;
2741 default:
2742 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2743 return;
2744 }
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00002745 }
2746
2747 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2748 if (!skb_queue_empty(&dev->txq_pend))
2749 lan78xx_tx_bh(dev);
2750
2751 if (!timer_pending(&dev->delay) &&
2752 !test_bit(EVENT_RX_HALT, &dev->flags))
2753 lan78xx_rx_bh(dev);
2754 }
2755}
2756
2757static void lan78xx_delayedwork(struct work_struct *work)
2758{
2759 int status;
2760 struct lan78xx_net *dev;
2761
2762 dev = container_of(work, struct lan78xx_net, wq.work);
2763
2764 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2765 unlink_urbs(dev, &dev->txq);
2766 status = usb_autopm_get_interface(dev->intf);
2767 if (status < 0)
2768 goto fail_pipe;
2769 status = usb_clear_halt(dev->udev, dev->pipe_out);
2770 usb_autopm_put_interface(dev->intf);
2771 if (status < 0 &&
2772 status != -EPIPE &&
2773 status != -ESHUTDOWN) {
2774 if (netif_msg_tx_err(dev))
2775fail_pipe:
2776 netdev_err(dev->net,
2777 "can't clear tx halt, status %d\n",
2778 status);
2779 } else {
2780 clear_bit(EVENT_TX_HALT, &dev->flags);
2781 if (status != -ESHUTDOWN)
2782 netif_wake_queue(dev->net);
2783 }
2784 }
2785 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2786 unlink_urbs(dev, &dev->rxq);
2787 status = usb_autopm_get_interface(dev->intf);
2788 if (status < 0)
2789 goto fail_halt;
2790 status = usb_clear_halt(dev->udev, dev->pipe_in);
2791 usb_autopm_put_interface(dev->intf);
2792 if (status < 0 &&
2793 status != -EPIPE &&
2794 status != -ESHUTDOWN) {
2795 if (netif_msg_rx_err(dev))
2796fail_halt:
2797 netdev_err(dev->net,
2798 "can't clear rx halt, status %d\n",
2799 status);
2800 } else {
2801 clear_bit(EVENT_RX_HALT, &dev->flags);
2802 tasklet_schedule(&dev->bh);
2803 }
2804 }
2805
2806 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2807 int ret = 0;
2808
2809 clear_bit(EVENT_LINK_RESET, &dev->flags);
2810 status = usb_autopm_get_interface(dev->intf);
2811 if (status < 0)
2812 goto skip_reset;
2813 if (lan78xx_link_reset(dev) < 0) {
2814 usb_autopm_put_interface(dev->intf);
2815skip_reset:
2816 netdev_info(dev->net, "link reset failed (%d)\n",
2817 ret);
2818 } else {
2819 usb_autopm_put_interface(dev->intf);
2820 }
2821 }
2822}
2823
2824static void intr_complete(struct urb *urb)
2825{
2826 struct lan78xx_net *dev = urb->context;
2827 int status = urb->status;
2828
2829 switch (status) {
2830 /* success */
2831 case 0:
2832 lan78xx_status(dev, urb);
2833 break;
2834
2835 /* software-driven interface shutdown */
2836 case -ENOENT: /* urb killed */
2837 case -ESHUTDOWN: /* hardware gone */
2838 netif_dbg(dev, ifdown, dev->net,
2839 "intr shutdown, code %d\n", status);
2840 return;
2841
2842 /* NOTE: not throttling like RX/TX, since this endpoint
2843 * already polls infrequently
2844 */
2845 default:
2846 netdev_dbg(dev->net, "intr status %d\n", status);
2847 break;
2848 }
2849
2850 if (!netif_running(dev->net))
2851 return;
2852
2853 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2854 status = usb_submit_urb(urb, GFP_ATOMIC);
2855 if (status != 0)
2856 netif_err(dev, timer, dev->net,
2857 "intr resubmit --> %d\n", status);
2858}
2859
2860static void lan78xx_disconnect(struct usb_interface *intf)
2861{
2862 struct lan78xx_net *dev;
2863 struct usb_device *udev;
2864 struct net_device *net;
2865
2866 dev = usb_get_intfdata(intf);
2867 usb_set_intfdata(intf, NULL);
2868 if (!dev)
2869 return;
2870
2871 udev = interface_to_usbdev(intf);
2872
2873 net = dev->net;
2874 unregister_netdev(net);
2875
2876 cancel_delayed_work_sync(&dev->wq);
2877
2878 usb_scuttle_anchored_urbs(&dev->deferred);
2879
2880 lan78xx_unbind(dev, intf);
2881
2882 usb_kill_urb(dev->urb_intr);
2883 usb_free_urb(dev->urb_intr);
2884
2885 free_netdev(net);
2886 usb_put_dev(udev);
2887}
2888
2889void lan78xx_tx_timeout(struct net_device *net)
2890{
2891 struct lan78xx_net *dev = netdev_priv(net);
2892
2893 unlink_urbs(dev, &dev->txq);
2894 tasklet_schedule(&dev->bh);
2895}
2896
2897static const struct net_device_ops lan78xx_netdev_ops = {
2898 .ndo_open = lan78xx_open,
2899 .ndo_stop = lan78xx_stop,
2900 .ndo_start_xmit = lan78xx_start_xmit,
2901 .ndo_tx_timeout = lan78xx_tx_timeout,
2902 .ndo_change_mtu = lan78xx_change_mtu,
2903 .ndo_set_mac_address = lan78xx_set_mac_addr,
2904 .ndo_validate_addr = eth_validate_addr,
2905 .ndo_do_ioctl = lan78xx_ioctl,
2906 .ndo_set_rx_mode = lan78xx_set_multicast,
2907 .ndo_set_features = lan78xx_set_features,
2908 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
2909 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
2910};
2911
2912static int lan78xx_probe(struct usb_interface *intf,
2913 const struct usb_device_id *id)
2914{
2915 struct lan78xx_net *dev;
2916 struct net_device *netdev;
2917 struct usb_device *udev;
2918 int ret;
2919 unsigned maxp;
2920 unsigned period;
2921 u8 *buf = NULL;
2922
2923 udev = interface_to_usbdev(intf);
2924 udev = usb_get_dev(udev);
2925
2926 ret = -ENOMEM;
2927 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2928 if (!netdev) {
2929 dev_err(&intf->dev, "Error: OOM\n");
2930 goto out1;
2931 }
2932
2933 /* netdev_printk() needs this */
2934 SET_NETDEV_DEV(netdev, &intf->dev);
2935
2936 dev = netdev_priv(netdev);
2937 dev->udev = udev;
2938 dev->intf = intf;
2939 dev->net = netdev;
2940 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2941 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2942
2943 skb_queue_head_init(&dev->rxq);
2944 skb_queue_head_init(&dev->txq);
2945 skb_queue_head_init(&dev->done);
2946 skb_queue_head_init(&dev->rxq_pause);
2947 skb_queue_head_init(&dev->txq_pend);
2948 mutex_init(&dev->phy_mutex);
2949
2950 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2951 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2952 init_usb_anchor(&dev->deferred);
2953
2954 netdev->netdev_ops = &lan78xx_netdev_ops;
2955 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2956 netdev->ethtool_ops = &lan78xx_ethtool_ops;
2957
2958 ret = lan78xx_bind(dev, intf);
2959 if (ret < 0)
2960 goto out2;
2961 strcpy(netdev->name, "eth%d");
2962
2963 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2964 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2965
2966 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
2967 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
2968 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
2969
2970 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2971 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2972
2973 dev->pipe_intr = usb_rcvintpipe(dev->udev,
2974 dev->ep_intr->desc.bEndpointAddress &
2975 USB_ENDPOINT_NUMBER_MASK);
2976 period = dev->ep_intr->desc.bInterval;
2977
2978 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2979 buf = kmalloc(maxp, GFP_KERNEL);
2980 if (buf) {
2981 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2982 if (!dev->urb_intr) {
2983 kfree(buf);
2984 goto out3;
2985 } else {
2986 usb_fill_int_urb(dev->urb_intr, dev->udev,
2987 dev->pipe_intr, buf, maxp,
2988 intr_complete, dev, period);
2989 }
2990 }
2991
2992 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
2993
2994 /* driver requires remote-wakeup capability during autosuspend. */
2995 intf->needs_remote_wakeup = 1;
2996
2997 ret = register_netdev(netdev);
2998 if (ret != 0) {
2999 netif_err(dev, probe, netdev, "couldn't register the device\n");
3000 goto out2;
3001 }
3002
3003 usb_set_intfdata(intf, dev);
3004
3005 ret = device_set_wakeup_enable(&udev->dev, true);
3006
3007 /* Default delay of 2sec has more overhead than advantage.
3008 * Set to 10sec as default.
3009 */
3010 pm_runtime_set_autosuspend_delay(&udev->dev,
3011 DEFAULT_AUTOSUSPEND_DELAY);
3012
3013 return 0;
3014
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00003015out3:
3016 lan78xx_unbind(dev, intf);
3017out2:
3018 free_netdev(netdev);
3019out1:
3020 usb_put_dev(udev);
3021
3022 return ret;
3023}
3024
3025static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3026{
3027 const u16 crc16poly = 0x8005;
3028 int i;
3029 u16 bit, crc, msb;
3030 u8 data;
3031
3032 crc = 0xFFFF;
3033 for (i = 0; i < len; i++) {
3034 data = *buf++;
3035 for (bit = 0; bit < 8; bit++) {
3036 msb = crc >> 15;
3037 crc <<= 1;
3038
3039 if (msb ^ (u16)(data & 1)) {
3040 crc ^= crc16poly;
3041 crc |= (u16)0x0001U;
3042 }
3043 data >>= 1;
3044 }
3045 }
3046
3047 return crc;
3048}
3049
3050static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3051{
3052 u32 buf;
3053 int ret;
3054 int mask_index;
3055 u16 crc;
3056 u32 temp_wucsr;
3057 u32 temp_pmt_ctl;
3058 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3059 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3060 const u8 arp_type[2] = { 0x08, 0x06 };
3061
3062 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3063 buf &= ~MAC_TX_TXEN_;
3064 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3065 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3066 buf &= ~MAC_RX_RXEN_;
3067 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3068
3069 ret = lan78xx_write_reg(dev, WUCSR, 0);
3070 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3071 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3072
3073 temp_wucsr = 0;
3074
3075 temp_pmt_ctl = 0;
3076 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3077 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3078 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3079
3080 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3081 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3082
3083 mask_index = 0;
3084 if (wol & WAKE_PHY) {
3085 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3086
3087 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3088 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3089 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3090 }
3091 if (wol & WAKE_MAGIC) {
3092 temp_wucsr |= WUCSR_MPEN_;
3093
3094 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3095 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3096 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3097 }
3098 if (wol & WAKE_BCAST) {
3099 temp_wucsr |= WUCSR_BCST_EN_;
3100
3101 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3102 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3103 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3104 }
3105 if (wol & WAKE_MCAST) {
3106 temp_wucsr |= WUCSR_WAKE_EN_;
3107
3108 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3109 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3110 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3111 WUF_CFGX_EN_ |
3112 WUF_CFGX_TYPE_MCAST_ |
3113 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3114 (crc & WUF_CFGX_CRC16_MASK_));
3115
3116 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3117 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3118 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3119 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3120 mask_index++;
3121
3122 /* for IPv6 Multicast */
3123 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3124 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3125 WUF_CFGX_EN_ |
3126 WUF_CFGX_TYPE_MCAST_ |
3127 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3128 (crc & WUF_CFGX_CRC16_MASK_));
3129
3130 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3131 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3132 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3133 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3134 mask_index++;
3135
3136 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3137 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3138 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3139 }
3140 if (wol & WAKE_UCAST) {
3141 temp_wucsr |= WUCSR_PFDA_EN_;
3142
3143 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3144 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3145 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3146 }
3147 if (wol & WAKE_ARP) {
3148 temp_wucsr |= WUCSR_WAKE_EN_;
3149
3150 /* set WUF_CFG & WUF_MASK
3151 * for packettype (offset 12,13) = ARP (0x0806)
3152 */
3153 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3154 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3155 WUF_CFGX_EN_ |
3156 WUF_CFGX_TYPE_ALL_ |
3157 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3158 (crc & WUF_CFGX_CRC16_MASK_));
3159
3160 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3161 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3162 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3163 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3164 mask_index++;
3165
3166 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3167 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3168 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3169 }
3170
3171 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3172
3173 /* when multiple WOL bits are set */
3174 if (hweight_long((unsigned long)wol) > 1) {
3175 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3176 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3177 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3178 }
3179 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3180
3181 /* clear WUPS */
3182 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3183 buf |= PMT_CTL_WUPS_MASK_;
3184 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3185
3186 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3187 buf |= MAC_RX_RXEN_;
3188 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3189
3190 return 0;
3191}
3192
3193int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3194{
3195 struct lan78xx_net *dev = usb_get_intfdata(intf);
3196 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3197 u32 buf;
3198 int ret;
3199 int event;
3200
3201 ret = 0;
3202 event = message.event;
3203
3204 if (!dev->suspend_count++) {
3205 spin_lock_irq(&dev->txq.lock);
3206 /* don't autosuspend while transmitting */
3207 if ((skb_queue_len(&dev->txq) ||
3208 skb_queue_len(&dev->txq_pend)) &&
3209 PMSG_IS_AUTO(message)) {
3210 spin_unlock_irq(&dev->txq.lock);
3211 ret = -EBUSY;
3212 goto out;
3213 } else {
3214 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3215 spin_unlock_irq(&dev->txq.lock);
3216 }
3217
3218 /* stop TX & RX */
3219 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3220 buf &= ~MAC_TX_TXEN_;
3221 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3222 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3223 buf &= ~MAC_RX_RXEN_;
3224 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3225
3226 /* empty out the rx and queues */
3227 netif_device_detach(dev->net);
3228 lan78xx_terminate_urbs(dev);
3229 usb_kill_urb(dev->urb_intr);
3230
3231 /* reattach */
3232 netif_device_attach(dev->net);
3233 }
3234
3235 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3236 if (PMSG_IS_AUTO(message)) {
3237 /* auto suspend (selective suspend) */
3238 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3239 buf &= ~MAC_TX_TXEN_;
3240 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3241 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3242 buf &= ~MAC_RX_RXEN_;
3243 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3244
3245 ret = lan78xx_write_reg(dev, WUCSR, 0);
3246 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3247 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3248
3249 /* set goodframe wakeup */
3250 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3251
3252 buf |= WUCSR_RFE_WAKE_EN_;
3253 buf |= WUCSR_STORE_WAKE_;
3254
3255 ret = lan78xx_write_reg(dev, WUCSR, buf);
3256
3257 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3258
3259 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3260 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3261
3262 buf |= PMT_CTL_PHY_WAKE_EN_;
3263 buf |= PMT_CTL_WOL_EN_;
3264 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3265 buf |= PMT_CTL_SUS_MODE_3_;
3266
3267 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3268
3269 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3270
3271 buf |= PMT_CTL_WUPS_MASK_;
3272
3273 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3274
3275 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3276 buf |= MAC_RX_RXEN_;
3277 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3278 } else {
3279 lan78xx_set_suspend(dev, pdata->wol);
3280 }
3281 }
3282
3283out:
3284 return ret;
3285}
3286
3287int lan78xx_resume(struct usb_interface *intf)
3288{
3289 struct lan78xx_net *dev = usb_get_intfdata(intf);
3290 struct sk_buff *skb;
3291 struct urb *res;
3292 int ret;
3293 u32 buf;
3294
3295 if (!--dev->suspend_count) {
3296 /* resume interrupt URBs */
3297 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3298 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3299
3300 spin_lock_irq(&dev->txq.lock);
3301 while ((res = usb_get_from_anchor(&dev->deferred))) {
3302 skb = (struct sk_buff *)res->context;
3303 ret = usb_submit_urb(res, GFP_ATOMIC);
3304 if (ret < 0) {
3305 dev_kfree_skb_any(skb);
3306 usb_free_urb(res);
3307 usb_autopm_put_interface_async(dev->intf);
3308 } else {
3309 dev->net->trans_start = jiffies;
3310 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3311 }
3312 }
3313
3314 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3315 spin_unlock_irq(&dev->txq.lock);
3316
3317 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3318 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3319 netif_start_queue(dev->net);
3320 tasklet_schedule(&dev->bh);
3321 }
3322 }
3323
3324 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3325 ret = lan78xx_write_reg(dev, WUCSR, 0);
3326 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3327
3328 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3329 WUCSR2_ARP_RCD_ |
3330 WUCSR2_IPV6_TCPSYN_RCD_ |
3331 WUCSR2_IPV4_TCPSYN_RCD_);
3332
3333 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3334 WUCSR_EEE_RX_WAKE_ |
3335 WUCSR_PFDA_FR_ |
3336 WUCSR_RFE_WAKE_FR_ |
3337 WUCSR_WUFR_ |
3338 WUCSR_MPR_ |
3339 WUCSR_BCST_FR_);
3340
3341 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3342 buf |= MAC_TX_TXEN_;
3343 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3344
3345 return 0;
3346}
3347
3348int lan78xx_reset_resume(struct usb_interface *intf)
3349{
3350 struct lan78xx_net *dev = usb_get_intfdata(intf);
3351
3352 lan78xx_reset(dev);
Woojung.Huh@microchip.comce85e132015-09-16 23:40:54 +00003353
3354 lan78xx_phy_init(dev);
3355
Woojung.Huh@microchip.com55d7de92015-07-30 19:45:21 +00003356 return lan78xx_resume(intf);
3357}
3358
3359static const struct usb_device_id products[] = {
3360 {
3361 /* LAN7800 USB Gigabit Ethernet Device */
3362 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3363 },
3364 {
3365 /* LAN7850 USB Gigabit Ethernet Device */
3366 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3367 },
3368 {},
3369};
3370MODULE_DEVICE_TABLE(usb, products);
3371
3372static struct usb_driver lan78xx_driver = {
3373 .name = DRIVER_NAME,
3374 .id_table = products,
3375 .probe = lan78xx_probe,
3376 .disconnect = lan78xx_disconnect,
3377 .suspend = lan78xx_suspend,
3378 .resume = lan78xx_resume,
3379 .reset_resume = lan78xx_reset_resume,
3380 .supports_autosuspend = 1,
3381 .disable_hub_initiated_lpm = 1,
3382};
3383
3384module_usb_driver(lan78xx_driver);
3385
3386MODULE_AUTHOR(DRIVER_AUTHOR);
3387MODULE_DESCRIPTION(DRIVER_DESC);
3388MODULE_LICENSE("GPL");