blob: 167a3dab2f18ec11b926070ae593ed5b75868777 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000076static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
Ron Mercer49740972009-02-26 10:08:36 +0000362 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700363 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700367 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400368
369 status =
370 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
Ron Mercer76b26692009-10-08 09:54:40 +0000399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400439 return status;
440}
441
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
hartleysfcb635e2010-01-05 06:58:12 +0000455 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000456 } else {
457 memset(zero_mac_addr, 0, ETH_ALEN);
458 addr = &zero_mac_addr[0];
459 QPRINTK(qdev, IFUP, DEBUG,
460 "Clearing MAC address on %s\n",
461 qdev->ndev->name);
462 }
463 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
464 if (status)
465 return status;
466 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
467 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
468 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
469 if (status)
470 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
471 "address.\n");
472 return status;
473}
474
Ron Mercer6a473302009-07-02 06:06:12 +0000475void ql_link_on(struct ql_adapter *qdev)
476{
477 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
478 qdev->ndev->name);
479 netif_carrier_on(qdev->ndev);
480 ql_set_mac_addr(qdev, 1);
481}
482
483void ql_link_off(struct ql_adapter *qdev)
484{
485 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
486 qdev->ndev->name);
487 netif_carrier_off(qdev->ndev);
488 ql_set_mac_addr(qdev, 0);
489}
490
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400491/* Get a specific frame routing value from the CAM.
492 * Used for debug and reg dump.
493 */
494int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
495{
496 int status = 0;
497
Ron Mercer939678f2009-01-04 17:08:29 -0800498 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400499 if (status)
500 goto exit;
501
502 ql_write32(qdev, RT_IDX,
503 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800504 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400505 if (status)
506 goto exit;
507 *value = ql_read32(qdev, RT_DATA);
508exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 return status;
510}
511
512/* The NIC function for this chip has 16 routing indexes. Each one can be used
513 * to route different frame types to various inbound queues. We send broadcast/
514 * multicast/error frames to the default queue for slow handling,
515 * and CAM hit/RSS frames to the fast handling queues.
516 */
517static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
518 int enable)
519{
Ron Mercer8587ea32009-02-23 10:42:15 +0000520 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400521 u32 value = 0;
522
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 QPRINTK(qdev, IFUP, DEBUG,
524 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
525 (enable ? "Adding" : "Removing"),
526 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
527 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
528 ((index ==
529 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
530 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
531 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
532 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
533 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
534 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
535 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
536 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
537 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
538 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
539 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
540 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
541 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
542 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
543 (enable ? "to" : "from"));
544
545 switch (mask) {
546 case RT_IDX_CAM_HIT:
547 {
548 value = RT_IDX_DST_CAM_Q | /* dest */
549 RT_IDX_TYPE_NICQ | /* type */
550 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
551 break;
552 }
553 case RT_IDX_VALID: /* Promiscuous Mode frames. */
554 {
555 value = RT_IDX_DST_DFLT_Q | /* dest */
556 RT_IDX_TYPE_NICQ | /* type */
557 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
558 break;
559 }
560 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
561 {
562 value = RT_IDX_DST_DFLT_Q | /* dest */
563 RT_IDX_TYPE_NICQ | /* type */
564 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
565 break;
566 }
567 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
568 {
569 value = RT_IDX_DST_DFLT_Q | /* dest */
570 RT_IDX_TYPE_NICQ | /* type */
571 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
572 break;
573 }
574 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
575 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000576 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400577 RT_IDX_TYPE_NICQ | /* type */
578 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
579 break;
580 }
581 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
582 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000583 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400584 RT_IDX_TYPE_NICQ | /* type */
585 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
586 break;
587 }
588 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
589 {
590 value = RT_IDX_DST_RSS | /* dest */
591 RT_IDX_TYPE_NICQ | /* type */
592 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
593 break;
594 }
595 case 0: /* Clear the E-bit on an entry. */
596 {
597 value = RT_IDX_DST_DFLT_Q | /* dest */
598 RT_IDX_TYPE_NICQ | /* type */
599 (index << RT_IDX_IDX_SHIFT);/* index */
600 break;
601 }
602 default:
603 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
604 mask);
605 status = -EPERM;
606 goto exit;
607 }
608
609 if (value) {
610 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
611 if (status)
612 goto exit;
613 value |= (enable ? RT_IDX_E : 0);
614 ql_write32(qdev, RT_IDX, value);
615 ql_write32(qdev, RT_DATA, enable ? mask : 0);
616 }
617exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400618 return status;
619}
620
621static void ql_enable_interrupts(struct ql_adapter *qdev)
622{
623 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
624}
625
626static void ql_disable_interrupts(struct ql_adapter *qdev)
627{
628 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
629}
630
631/* If we're running with multiple MSI-X vectors then we enable on the fly.
632 * Otherwise, we may have multiple outstanding workers and don't want to
633 * enable until the last one finishes. In this case, the irq_cnt gets
634 * incremented everytime we queue a worker and decremented everytime
635 * a worker finishes. Once it hits zero we enable the interrupt.
636 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700637u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400638{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639 u32 var = 0;
640 unsigned long hw_flags = 0;
641 struct intr_context *ctx = qdev->intr_context + intr;
642
643 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
644 /* Always enable if we're MSIX multi interrupts and
645 * it's not the default (zeroeth) interrupt.
646 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648 ctx->intr_en_mask);
649 var = ql_read32(qdev, STS);
650 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400651 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700652
653 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
654 if (atomic_dec_and_test(&ctx->irq_cnt)) {
655 ql_write32(qdev, INTR_EN,
656 ctx->intr_en_mask);
657 var = ql_read32(qdev, STS);
658 }
659 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
660 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400661}
662
663static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
664{
665 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700666 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400667
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668 /* HW disables for us if we're MSIX multi interrupts and
669 * it's not the default (zeroeth) interrupt.
670 */
671 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
672 return 0;
673
674 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000675 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700676 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 var = ql_read32(qdev, STS);
680 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700681 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000682 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400683 return var;
684}
685
686static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
687{
688 int i;
689 for (i = 0; i < qdev->intr_count; i++) {
690 /* The enable call does a atomic_dec_and_test
691 * and enables only if the result is zero.
692 * So we precharge it here.
693 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700694 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
695 i == 0))
696 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400697 ql_enable_completion_interrupt(qdev, i);
698 }
699
700}
701
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000702static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
703{
704 int status, i;
705 u16 csum = 0;
706 __le16 *flash = (__le16 *)&qdev->flash;
707
708 status = strncmp((char *)&qdev->flash, str, 4);
709 if (status) {
710 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
711 return status;
712 }
713
714 for (i = 0; i < size; i++)
715 csum += le16_to_cpu(*flash++);
716
717 if (csum)
718 QPRINTK(qdev, IFUP, ERR,
719 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
720
721 return csum;
722}
723
Ron Mercer26351472009-02-02 13:53:57 -0800724static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400725{
726 int status = 0;
727 /* wait for reg to come ready */
728 status = ql_wait_reg_rdy(qdev,
729 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
730 if (status)
731 goto exit;
732 /* set up for reg read */
733 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
734 /* wait for reg to come ready */
735 status = ql_wait_reg_rdy(qdev,
736 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
737 if (status)
738 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800739 /* This data is stored on flash as an array of
740 * __le32. Since ql_read32() returns cpu endian
741 * we need to swap it back.
742 */
743 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400744exit:
745 return status;
746}
747
Ron Mercercdca8d02009-03-02 08:07:31 +0000748static int ql_get_8000_flash_params(struct ql_adapter *qdev)
749{
750 u32 i, size;
751 int status;
752 __le32 *p = (__le32 *)&qdev->flash;
753 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000754 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000755
756 /* Get flash offset for function and adjust
757 * for dword access.
758 */
Ron Mercere4552f52009-06-09 05:39:32 +0000759 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000760 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
761 else
762 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
763
764 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
765 return -ETIMEDOUT;
766
767 size = sizeof(struct flash_params_8000) / sizeof(u32);
768 for (i = 0; i < size; i++, p++) {
769 status = ql_read_flash_word(qdev, i+offset, p);
770 if (status) {
771 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
772 goto exit;
773 }
774 }
775
776 status = ql_validate_flash(qdev,
777 sizeof(struct flash_params_8000) / sizeof(u16),
778 "8000");
779 if (status) {
780 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
781 status = -EINVAL;
782 goto exit;
783 }
784
Ron Mercer542512e2009-06-09 05:39:33 +0000785 /* Extract either manufacturer or BOFM modified
786 * MAC address.
787 */
788 if (qdev->flash.flash_params_8000.data_type1 == 2)
789 memcpy(mac_addr,
790 qdev->flash.flash_params_8000.mac_addr1,
791 qdev->ndev->addr_len);
792 else
793 memcpy(mac_addr,
794 qdev->flash.flash_params_8000.mac_addr,
795 qdev->ndev->addr_len);
796
797 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000798 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
799 status = -EINVAL;
800 goto exit;
801 }
802
803 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000804 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000805 qdev->ndev->addr_len);
806
807exit:
808 ql_sem_unlock(qdev, SEM_FLASH_MASK);
809 return status;
810}
811
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000812static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400813{
814 int i;
815 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800816 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800817 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000818 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800819
820 /* Second function's parameters follow the first
821 * function's.
822 */
Ron Mercere4552f52009-06-09 05:39:32 +0000823 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000824 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400825
826 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
827 return -ETIMEDOUT;
828
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000829 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800830 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 if (status) {
832 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
833 goto exit;
834 }
835
836 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000837
838 status = ql_validate_flash(qdev,
839 sizeof(struct flash_params_8012) / sizeof(u16),
840 "8012");
841 if (status) {
842 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
843 status = -EINVAL;
844 goto exit;
845 }
846
847 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
848 status = -EINVAL;
849 goto exit;
850 }
851
852 memcpy(qdev->ndev->dev_addr,
853 qdev->flash.flash_params_8012.mac_addr,
854 qdev->ndev->addr_len);
855
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400856exit:
857 ql_sem_unlock(qdev, SEM_FLASH_MASK);
858 return status;
859}
860
861/* xgmac register are located behind the xgmac_addr and xgmac_data
862 * register pair. Each read/write requires us to wait for the ready
863 * bit before reading/writing the data.
864 */
865static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
866{
867 int status;
868 /* wait for reg to come ready */
869 status = ql_wait_reg_rdy(qdev,
870 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
871 if (status)
872 return status;
873 /* write the data to the data reg */
874 ql_write32(qdev, XGMAC_DATA, data);
875 /* trigger the write */
876 ql_write32(qdev, XGMAC_ADDR, reg);
877 return status;
878}
879
880/* xgmac register are located behind the xgmac_addr and xgmac_data
881 * register pair. Each read/write requires us to wait for the ready
882 * bit before reading/writing the data.
883 */
884int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
885{
886 int status = 0;
887 /* wait for reg to come ready */
888 status = ql_wait_reg_rdy(qdev,
889 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
890 if (status)
891 goto exit;
892 /* set up for reg read */
893 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
894 /* wait for reg to come ready */
895 status = ql_wait_reg_rdy(qdev,
896 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
897 if (status)
898 goto exit;
899 /* get the data */
900 *data = ql_read32(qdev, XGMAC_DATA);
901exit:
902 return status;
903}
904
905/* This is used for reading the 64-bit statistics regs. */
906int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
907{
908 int status = 0;
909 u32 hi = 0;
910 u32 lo = 0;
911
912 status = ql_read_xgmac_reg(qdev, reg, &lo);
913 if (status)
914 goto exit;
915
916 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
917 if (status)
918 goto exit;
919
920 *data = (u64) lo | ((u64) hi << 32);
921
922exit:
923 return status;
924}
925
Ron Mercercdca8d02009-03-02 08:07:31 +0000926static int ql_8000_port_initialize(struct ql_adapter *qdev)
927{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000928 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000929 /*
930 * Get MPI firmware version for driver banner
931 * and ethool info.
932 */
933 status = ql_mb_about_fw(qdev);
934 if (status)
935 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000936 status = ql_mb_get_fw_state(qdev);
937 if (status)
938 goto exit;
939 /* Wake up a worker to get/set the TX/RX frame sizes. */
940 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
941exit:
942 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000943}
944
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400945/* Take the MAC Core out of reset.
946 * Enable statistics counting.
947 * Take the transmitter/receiver out of reset.
948 * This functionality may be done in the MPI firmware at a
949 * later date.
950 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000951static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400952{
953 int status = 0;
954 u32 data;
955
956 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
957 /* Another function has the semaphore, so
958 * wait for the port init bit to come ready.
959 */
960 QPRINTK(qdev, LINK, INFO,
961 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
962 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 if (status) {
964 QPRINTK(qdev, LINK, CRIT,
965 "Port initialize timed out.\n");
966 }
967 return status;
968 }
969
970 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
971 /* Set the core reset. */
972 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
973 if (status)
974 goto end;
975 data |= GLOBAL_CFG_RESET;
976 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
977 if (status)
978 goto end;
979
980 /* Clear the core reset and turn on jumbo for receiver. */
981 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
982 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
983 data |= GLOBAL_CFG_TX_STAT_EN;
984 data |= GLOBAL_CFG_RX_STAT_EN;
985 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
986 if (status)
987 goto end;
988
989 /* Enable transmitter, and clear it's reset. */
990 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
991 if (status)
992 goto end;
993 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
994 data |= TX_CFG_EN; /* Enable the transmitter. */
995 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
996 if (status)
997 goto end;
998
999 /* Enable receiver and clear it's reset. */
1000 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1001 if (status)
1002 goto end;
1003 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1004 data |= RX_CFG_EN; /* Enable the receiver. */
1005 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1006 if (status)
1007 goto end;
1008
1009 /* Turn on jumbo. */
1010 status =
1011 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1012 if (status)
1013 goto end;
1014 status =
1015 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1016 if (status)
1017 goto end;
1018
1019 /* Signal to the world that the port is enabled. */
1020 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021end:
1022 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1023 return status;
1024}
1025
Ron Mercer7c734352009-10-19 03:32:19 +00001026static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027{
1028 return PAGE_SIZE << qdev->lbq_buf_order;
1029}
1030
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001032static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033{
1034 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1035 rx_ring->lbq_curr_idx++;
1036 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1037 rx_ring->lbq_curr_idx = 0;
1038 rx_ring->lbq_free_cnt++;
1039 return lbq_desc;
1040}
1041
Ron Mercer7c734352009-10-19 03:32:19 +00001042static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1043 struct rx_ring *rx_ring)
1044{
1045 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046
1047 pci_dma_sync_single_for_cpu(qdev->pdev,
1048 pci_unmap_addr(lbq_desc, mapaddr),
1049 rx_ring->lbq_buf_size,
1050 PCI_DMA_FROMDEVICE);
1051
1052 /* If it's the last chunk of our master page then
1053 * we unmap it.
1054 */
1055 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1056 == ql_lbq_block_size(qdev))
1057 pci_unmap_page(qdev->pdev,
1058 lbq_desc->p.pg_chunk.map,
1059 ql_lbq_block_size(qdev),
1060 PCI_DMA_FROMDEVICE);
1061 return lbq_desc;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1068 rx_ring->sbq_curr_idx++;
1069 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1070 rx_ring->sbq_curr_idx = 0;
1071 rx_ring->sbq_free_cnt++;
1072 return sbq_desc;
1073}
1074
1075/* Update an rx ring index. */
1076static void ql_update_cq(struct rx_ring *rx_ring)
1077{
1078 rx_ring->cnsmr_idx++;
1079 rx_ring->curr_entry++;
1080 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1081 rx_ring->cnsmr_idx = 0;
1082 rx_ring->curr_entry = rx_ring->cq_base;
1083 }
1084}
1085
1086static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087{
1088 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1089}
1090
Ron Mercer7c734352009-10-19 03:32:19 +00001091static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1092 struct bq_desc *lbq_desc)
1093{
1094 if (!rx_ring->pg_chunk.page) {
1095 u64 map;
1096 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1097 GFP_ATOMIC,
1098 qdev->lbq_buf_order);
1099 if (unlikely(!rx_ring->pg_chunk.page)) {
1100 QPRINTK(qdev, DRV, ERR,
1101 "page allocation failed.\n");
1102 return -ENOMEM;
1103 }
1104 rx_ring->pg_chunk.offset = 0;
1105 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1106 0, ql_lbq_block_size(qdev),
1107 PCI_DMA_FROMDEVICE);
1108 if (pci_dma_mapping_error(qdev->pdev, map)) {
1109 __free_pages(rx_ring->pg_chunk.page,
1110 qdev->lbq_buf_order);
1111 QPRINTK(qdev, DRV, ERR,
1112 "PCI mapping failed.\n");
1113 return -ENOMEM;
1114 }
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 }
1118
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1121 */
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124 /* Adjust the master page chunk for next
1125 * buffer get.
1126 */
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1131 } else {
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1135 }
1136 return 0;
1137}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
Ron Mercer49f21862009-02-23 10:42:16 +00001141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001143 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 u64 map;
1145 int i;
1146
Ron Mercer7c734352009-10-19 03:32:19 +00001147 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001148 for (i = 0; i < 16; i++) {
1149 QPRINTK(qdev, RX_STATUS, DEBUG,
1150 "lbq: try cleaning clean_idx = %d.\n",
1151 clean_idx);
1152 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 QPRINTK(qdev, IFUP, ERR,
1155 "Could not get a page chunk.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001161 pci_unmap_addr_set(lbq_desc, mapaddr, map);
Ron Mercer7c734352009-10-19 03:32:19 +00001162 pci_unmap_len_set(lbq_desc, maplen,
1163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001182 QPRINTK(qdev, RX_STATUS, DEBUG,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
1185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
1200 for (i = 0; i < 16; i++) {
1201 sbq_desc = &rx_ring->sbq[clean_idx];
1202 QPRINTK(qdev, RX_STATUS, DEBUG,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
1206 QPRINTK(qdev, RX_STATUS, DEBUG,
1207 "sbq: getting new skb for index %d.\n",
1208 sbq_desc->index);
1209 sbq_desc->p.skb =
1210 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001211 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001212 if (sbq_desc->p.skb == NULL) {
1213 QPRINTK(qdev, PROBE, ERR,
1214 "Couldn't get an skb.\n");
1215 rx_ring->sbq_clean_idx = clean_idx;
1216 return;
1217 }
1218 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219 map = pci_map_single(qdev->pdev,
1220 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001221 rx_ring->sbq_buf_size,
1222 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001223 if (pci_dma_mapping_error(qdev->pdev, map)) {
1224 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1225 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001226 dev_kfree_skb_any(sbq_desc->p.skb);
1227 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001228 return;
1229 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001230 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1231 pci_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001232 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001233 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001234 }
1235
1236 clean_idx++;
1237 if (clean_idx == rx_ring->sbq_len)
1238 clean_idx = 0;
1239 }
1240 rx_ring->sbq_clean_idx = clean_idx;
1241 rx_ring->sbq_prod_idx += 16;
1242 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1243 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001244 rx_ring->sbq_free_cnt -= 16;
1245 }
1246
1247 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001248 QPRINTK(qdev, RX_STATUS, DEBUG,
1249 "sbq: updating prod idx = %d.\n",
1250 rx_ring->sbq_prod_idx);
1251 ql_write_db_reg(rx_ring->sbq_prod_idx,
1252 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 }
1254}
1255
1256static void ql_update_buffer_queues(struct ql_adapter *qdev,
1257 struct rx_ring *rx_ring)
1258{
1259 ql_update_sbq(qdev, rx_ring);
1260 ql_update_lbq(qdev, rx_ring);
1261}
1262
1263/* Unmaps tx buffers. Can be called from send() if a pci mapping
1264 * fails at some stage, or from the interrupt when a tx completes.
1265 */
1266static void ql_unmap_send(struct ql_adapter *qdev,
1267 struct tx_ring_desc *tx_ring_desc, int mapped)
1268{
1269 int i;
1270 for (i = 0; i < mapped; i++) {
1271 if (i == 0 || (i == 7 && mapped > 7)) {
1272 /*
1273 * Unmap the skb->data area, or the
1274 * external sglist (AKA the Outbound
1275 * Address List (OAL)).
1276 * If its the zeroeth element, then it's
1277 * the skb->data area. If it's the 7th
1278 * element and there is more than 6 frags,
1279 * then its an OAL.
1280 */
1281 if (i == 7) {
1282 QPRINTK(qdev, TX_DONE, DEBUG,
1283 "unmapping OAL area.\n");
1284 }
1285 pci_unmap_single(qdev->pdev,
1286 pci_unmap_addr(&tx_ring_desc->map[i],
1287 mapaddr),
1288 pci_unmap_len(&tx_ring_desc->map[i],
1289 maplen),
1290 PCI_DMA_TODEVICE);
1291 } else {
1292 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1293 i);
1294 pci_unmap_page(qdev->pdev,
1295 pci_unmap_addr(&tx_ring_desc->map[i],
1296 mapaddr),
1297 pci_unmap_len(&tx_ring_desc->map[i],
1298 maplen), PCI_DMA_TODEVICE);
1299 }
1300 }
1301
1302}
1303
1304/* Map the buffers for this transmit. This will return
1305 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1306 */
1307static int ql_map_send(struct ql_adapter *qdev,
1308 struct ob_mac_iocb_req *mac_iocb_ptr,
1309 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1310{
1311 int len = skb_headlen(skb);
1312 dma_addr_t map;
1313 int frag_idx, err, map_idx = 0;
1314 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1315 int frag_cnt = skb_shinfo(skb)->nr_frags;
1316
1317 if (frag_cnt) {
1318 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1319 }
1320 /*
1321 * Map the skb buffer first.
1322 */
1323 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1324
1325 err = pci_dma_mapping_error(qdev->pdev, map);
1326 if (err) {
1327 QPRINTK(qdev, TX_QUEUED, ERR,
1328 "PCI mapping failed with error: %d\n", err);
1329
1330 return NETDEV_TX_BUSY;
1331 }
1332
1333 tbd->len = cpu_to_le32(len);
1334 tbd->addr = cpu_to_le64(map);
1335 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1336 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1337 map_idx++;
1338
1339 /*
1340 * This loop fills the remainder of the 8 address descriptors
1341 * in the IOCB. If there are more than 7 fragments, then the
1342 * eighth address desc will point to an external list (OAL).
1343 * When this happens, the remainder of the frags will be stored
1344 * in this list.
1345 */
1346 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1347 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1348 tbd++;
1349 if (frag_idx == 6 && frag_cnt > 7) {
1350 /* Let's tack on an sglist.
1351 * Our control block will now
1352 * look like this:
1353 * iocb->seg[0] = skb->data
1354 * iocb->seg[1] = frag[0]
1355 * iocb->seg[2] = frag[1]
1356 * iocb->seg[3] = frag[2]
1357 * iocb->seg[4] = frag[3]
1358 * iocb->seg[5] = frag[4]
1359 * iocb->seg[6] = frag[5]
1360 * iocb->seg[7] = ptr to OAL (external sglist)
1361 * oal->seg[0] = frag[6]
1362 * oal->seg[1] = frag[7]
1363 * oal->seg[2] = frag[8]
1364 * oal->seg[3] = frag[9]
1365 * oal->seg[4] = frag[10]
1366 * etc...
1367 */
1368 /* Tack on the OAL in the eighth segment of IOCB. */
1369 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1370 sizeof(struct oal),
1371 PCI_DMA_TODEVICE);
1372 err = pci_dma_mapping_error(qdev->pdev, map);
1373 if (err) {
1374 QPRINTK(qdev, TX_QUEUED, ERR,
1375 "PCI mapping outbound address list with error: %d\n",
1376 err);
1377 goto map_error;
1378 }
1379
1380 tbd->addr = cpu_to_le64(map);
1381 /*
1382 * The length is the number of fragments
1383 * that remain to be mapped times the length
1384 * of our sglist (OAL).
1385 */
1386 tbd->len =
1387 cpu_to_le32((sizeof(struct tx_buf_desc) *
1388 (frag_cnt - frag_idx)) | TX_DESC_C);
1389 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1390 map);
1391 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1392 sizeof(struct oal));
1393 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1394 map_idx++;
1395 }
1396
1397 map =
1398 pci_map_page(qdev->pdev, frag->page,
1399 frag->page_offset, frag->size,
1400 PCI_DMA_TODEVICE);
1401
1402 err = pci_dma_mapping_error(qdev->pdev, map);
1403 if (err) {
1404 QPRINTK(qdev, TX_QUEUED, ERR,
1405 "PCI mapping frags failed with error: %d.\n",
1406 err);
1407 goto map_error;
1408 }
1409
1410 tbd->addr = cpu_to_le64(map);
1411 tbd->len = cpu_to_le32(frag->size);
1412 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1413 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1414 frag->size);
1415
1416 }
1417 /* Save the number of segments we've mapped. */
1418 tx_ring_desc->map_cnt = map_idx;
1419 /* Terminate the last segment. */
1420 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1421 return NETDEV_TX_OK;
1422
1423map_error:
1424 /*
1425 * If the first frag mapping failed, then i will be zero.
1426 * This causes the unmap of the skb->data area. Otherwise
1427 * we pass in the number of frags that mapped successfully
1428 * so they can be umapped.
1429 */
1430 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1431 return NETDEV_TX_BUSY;
1432}
1433
Ron Mercer4f848c02010-01-02 10:37:43 +00001434/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001435static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1436 struct rx_ring *rx_ring,
1437 struct ib_mac_iocb_rsp *ib_mac_rsp,
1438 u32 length,
1439 u16 vlan_id)
1440{
1441 struct sk_buff *skb;
1442 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1443 struct skb_frag_struct *rx_frag;
1444 int nr_frags;
1445 struct napi_struct *napi = &rx_ring->napi;
1446
1447 napi->dev = qdev->ndev;
1448
1449 skb = napi_get_frags(napi);
1450 if (!skb) {
1451 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1452 rx_ring->rx_dropped++;
1453 put_page(lbq_desc->p.pg_chunk.page);
1454 return;
1455 }
1456 prefetch(lbq_desc->p.pg_chunk.va);
1457 rx_frag = skb_shinfo(skb)->frags;
1458 nr_frags = skb_shinfo(skb)->nr_frags;
1459 rx_frag += nr_frags;
1460 rx_frag->page = lbq_desc->p.pg_chunk.page;
1461 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1462 rx_frag->size = length;
1463
1464 skb->len += length;
1465 skb->data_len += length;
1466 skb->truesize += length;
1467 skb_shinfo(skb)->nr_frags++;
1468
1469 rx_ring->rx_packets++;
1470 rx_ring->rx_bytes += length;
1471 skb->ip_summed = CHECKSUM_UNNECESSARY;
1472 skb_record_rx_queue(skb, rx_ring->cq_id);
1473 if (qdev->vlgrp && (vlan_id != 0xffff))
1474 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1475 else
1476 napi_gro_frags(napi);
1477}
1478
1479/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001480static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1481 struct rx_ring *rx_ring,
1482 struct ib_mac_iocb_rsp *ib_mac_rsp,
1483 u32 length,
1484 u16 vlan_id)
1485{
1486 struct net_device *ndev = qdev->ndev;
1487 struct sk_buff *skb = NULL;
1488 void *addr;
1489 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1490 struct napi_struct *napi = &rx_ring->napi;
1491
1492 skb = netdev_alloc_skb(ndev, length);
1493 if (!skb) {
1494 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1495 "need to unwind!.\n");
1496 rx_ring->rx_dropped++;
1497 put_page(lbq_desc->p.pg_chunk.page);
1498 return;
1499 }
1500
1501 addr = lbq_desc->p.pg_chunk.va;
1502 prefetch(addr);
1503
1504
1505 /* Frame error, so drop the packet. */
1506 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1507 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1508 ib_mac_rsp->flags2);
1509 rx_ring->rx_errors++;
1510 goto err_out;
1511 }
1512
1513 /* The max framesize filter on this chip is set higher than
1514 * MTU since FCoE uses 2k frames.
1515 */
1516 if (skb->len > ndev->mtu + ETH_HLEN) {
1517 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1518 rx_ring->rx_dropped++;
1519 goto err_out;
1520 }
1521 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1522 QPRINTK(qdev, RX_STATUS, DEBUG,
1523 "%d bytes of headers and data in large. Chain "
1524 "page to new skb and pull tail.\n", length);
1525 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1526 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1527 length-ETH_HLEN);
1528 skb->len += length-ETH_HLEN;
1529 skb->data_len += length-ETH_HLEN;
1530 skb->truesize += length-ETH_HLEN;
1531
1532 rx_ring->rx_packets++;
1533 rx_ring->rx_bytes += skb->len;
1534 skb->protocol = eth_type_trans(skb, ndev);
1535 skb->ip_summed = CHECKSUM_NONE;
1536
1537 if (qdev->rx_csum &&
1538 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1539 /* TCP frame. */
1540 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1541 QPRINTK(qdev, RX_STATUS, DEBUG,
1542 "TCP checksum done!\n");
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1544 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1545 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1546 /* Unfragmented ipv4 UDP frame. */
1547 struct iphdr *iph = (struct iphdr *) skb->data;
1548 if (!(iph->frag_off &
1549 cpu_to_be16(IP_MF|IP_OFFSET))) {
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1551 QPRINTK(qdev, RX_STATUS, DEBUG,
1552 "TCP checksum done!\n");
1553 }
1554 }
1555 }
1556
1557 skb_record_rx_queue(skb, rx_ring->cq_id);
1558 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1559 if (qdev->vlgrp && (vlan_id != 0xffff))
1560 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1561 else
1562 napi_gro_receive(napi, skb);
1563 } else {
1564 if (qdev->vlgrp && (vlan_id != 0xffff))
1565 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1566 else
1567 netif_receive_skb(skb);
1568 }
1569 return;
1570err_out:
1571 dev_kfree_skb_any(skb);
1572 put_page(lbq_desc->p.pg_chunk.page);
1573}
1574
1575/* Process an inbound completion from an rx ring. */
1576static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1577 struct rx_ring *rx_ring,
1578 struct ib_mac_iocb_rsp *ib_mac_rsp,
1579 u32 length,
1580 u16 vlan_id)
1581{
1582 struct net_device *ndev = qdev->ndev;
1583 struct sk_buff *skb = NULL;
1584 struct sk_buff *new_skb = NULL;
1585 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1586
1587 skb = sbq_desc->p.skb;
1588 /* Allocate new_skb and copy */
1589 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1590 if (new_skb == NULL) {
1591 QPRINTK(qdev, PROBE, ERR,
1592 "No skb available, drop the packet.\n");
1593 rx_ring->rx_dropped++;
1594 return;
1595 }
1596 skb_reserve(new_skb, NET_IP_ALIGN);
1597 memcpy(skb_put(new_skb, length), skb->data, length);
1598 skb = new_skb;
1599
1600 /* Frame error, so drop the packet. */
1601 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1602 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1603 ib_mac_rsp->flags2);
1604 dev_kfree_skb_any(skb);
1605 rx_ring->rx_errors++;
1606 return;
1607 }
1608
1609 /* loopback self test for ethtool */
1610 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1611 ql_check_lb_frame(qdev, skb);
1612 dev_kfree_skb_any(skb);
1613 return;
1614 }
1615
1616 /* The max framesize filter on this chip is set higher than
1617 * MTU since FCoE uses 2k frames.
1618 */
1619 if (skb->len > ndev->mtu + ETH_HLEN) {
1620 dev_kfree_skb_any(skb);
1621 rx_ring->rx_dropped++;
1622 return;
1623 }
1624
1625 prefetch(skb->data);
1626 skb->dev = ndev;
1627 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1628 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1629 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1630 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1631 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1632 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1633 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1634 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1635 }
1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1637 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1638
1639 rx_ring->rx_packets++;
1640 rx_ring->rx_bytes += skb->len;
1641 skb->protocol = eth_type_trans(skb, ndev);
1642 skb->ip_summed = CHECKSUM_NONE;
1643
1644 /* If rx checksum is on, and there are no
1645 * csum or frame errors.
1646 */
1647 if (qdev->rx_csum &&
1648 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1649 /* TCP frame. */
1650 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1651 QPRINTK(qdev, RX_STATUS, DEBUG,
1652 "TCP checksum done!\n");
1653 skb->ip_summed = CHECKSUM_UNNECESSARY;
1654 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1655 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1656 /* Unfragmented ipv4 UDP frame. */
1657 struct iphdr *iph = (struct iphdr *) skb->data;
1658 if (!(iph->frag_off &
1659 cpu_to_be16(IP_MF|IP_OFFSET))) {
1660 skb->ip_summed = CHECKSUM_UNNECESSARY;
1661 QPRINTK(qdev, RX_STATUS, DEBUG,
1662 "TCP checksum done!\n");
1663 }
1664 }
1665 }
1666
1667 skb_record_rx_queue(skb, rx_ring->cq_id);
1668 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1669 if (qdev->vlgrp && (vlan_id != 0xffff))
1670 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1671 vlan_id, skb);
1672 else
1673 napi_gro_receive(&rx_ring->napi, skb);
1674 } else {
1675 if (qdev->vlgrp && (vlan_id != 0xffff))
1676 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1677 else
1678 netif_receive_skb(skb);
1679 }
1680}
1681
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001682static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001683{
1684 void *temp_addr = skb->data;
1685
1686 /* Undo the skb_reserve(skb,32) we did before
1687 * giving to hardware, and realign data on
1688 * a 2-byte boundary.
1689 */
1690 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1691 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1692 skb_copy_to_linear_data(skb, temp_addr,
1693 (unsigned int)len);
1694}
1695
1696/*
1697 * This function builds an skb for the given inbound
1698 * completion. It will be rewritten for readability in the near
1699 * future, but for not it works well.
1700 */
1701static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1702 struct rx_ring *rx_ring,
1703 struct ib_mac_iocb_rsp *ib_mac_rsp)
1704{
1705 struct bq_desc *lbq_desc;
1706 struct bq_desc *sbq_desc;
1707 struct sk_buff *skb = NULL;
1708 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1709 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1710
1711 /*
1712 * Handle the header buffer if present.
1713 */
1714 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1715 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1716 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1717 /*
1718 * Headers fit nicely into a small buffer.
1719 */
1720 sbq_desc = ql_get_curr_sbuf(rx_ring);
1721 pci_unmap_single(qdev->pdev,
1722 pci_unmap_addr(sbq_desc, mapaddr),
1723 pci_unmap_len(sbq_desc, maplen),
1724 PCI_DMA_FROMDEVICE);
1725 skb = sbq_desc->p.skb;
1726 ql_realign_skb(skb, hdr_len);
1727 skb_put(skb, hdr_len);
1728 sbq_desc->p.skb = NULL;
1729 }
1730
1731 /*
1732 * Handle the data buffer(s).
1733 */
1734 if (unlikely(!length)) { /* Is there data too? */
1735 QPRINTK(qdev, RX_STATUS, DEBUG,
1736 "No Data buffer in this packet.\n");
1737 return skb;
1738 }
1739
1740 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1741 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1742 QPRINTK(qdev, RX_STATUS, DEBUG,
1743 "Headers in small, data of %d bytes in small, combine them.\n", length);
1744 /*
1745 * Data is less than small buffer size so it's
1746 * stuffed in a small buffer.
1747 * For this case we append the data
1748 * from the "data" small buffer to the "header" small
1749 * buffer.
1750 */
1751 sbq_desc = ql_get_curr_sbuf(rx_ring);
1752 pci_dma_sync_single_for_cpu(qdev->pdev,
1753 pci_unmap_addr
1754 (sbq_desc, mapaddr),
1755 pci_unmap_len
1756 (sbq_desc, maplen),
1757 PCI_DMA_FROMDEVICE);
1758 memcpy(skb_put(skb, length),
1759 sbq_desc->p.skb->data, length);
1760 pci_dma_sync_single_for_device(qdev->pdev,
1761 pci_unmap_addr
1762 (sbq_desc,
1763 mapaddr),
1764 pci_unmap_len
1765 (sbq_desc,
1766 maplen),
1767 PCI_DMA_FROMDEVICE);
1768 } else {
1769 QPRINTK(qdev, RX_STATUS, DEBUG,
1770 "%d bytes in a single small buffer.\n", length);
1771 sbq_desc = ql_get_curr_sbuf(rx_ring);
1772 skb = sbq_desc->p.skb;
1773 ql_realign_skb(skb, length);
1774 skb_put(skb, length);
1775 pci_unmap_single(qdev->pdev,
1776 pci_unmap_addr(sbq_desc,
1777 mapaddr),
1778 pci_unmap_len(sbq_desc,
1779 maplen),
1780 PCI_DMA_FROMDEVICE);
1781 sbq_desc->p.skb = NULL;
1782 }
1783 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1784 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1785 QPRINTK(qdev, RX_STATUS, DEBUG,
1786 "Header in small, %d bytes in large. Chain large to small!\n", length);
1787 /*
1788 * The data is in a single large buffer. We
1789 * chain it to the header buffer's skb and let
1790 * it rip.
1791 */
Ron Mercer7c734352009-10-19 03:32:19 +00001792 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001793 QPRINTK(qdev, RX_STATUS, DEBUG,
Ron Mercer7c734352009-10-19 03:32:19 +00001794 "Chaining page at offset = %d,"
1795 "for %d bytes to skb.\n",
1796 lbq_desc->p.pg_chunk.offset, length);
1797 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1798 lbq_desc->p.pg_chunk.offset,
1799 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001800 skb->len += length;
1801 skb->data_len += length;
1802 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001803 } else {
1804 /*
1805 * The headers and data are in a single large buffer. We
1806 * copy it to a new skb and let it go. This can happen with
1807 * jumbo mtu on a non-TCP/UDP frame.
1808 */
Ron Mercer7c734352009-10-19 03:32:19 +00001809 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001810 skb = netdev_alloc_skb(qdev->ndev, length);
1811 if (skb == NULL) {
1812 QPRINTK(qdev, PROBE, DEBUG,
1813 "No skb available, drop the packet.\n");
1814 return NULL;
1815 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001816 pci_unmap_page(qdev->pdev,
1817 pci_unmap_addr(lbq_desc,
1818 mapaddr),
1819 pci_unmap_len(lbq_desc, maplen),
1820 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001821 skb_reserve(skb, NET_IP_ALIGN);
1822 QPRINTK(qdev, RX_STATUS, DEBUG,
1823 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
Ron Mercer7c734352009-10-19 03:32:19 +00001824 skb_fill_page_desc(skb, 0,
1825 lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1827 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 skb->len += length;
1829 skb->data_len += length;
1830 skb->truesize += length;
1831 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001832 __pskb_pull_tail(skb,
1833 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1834 VLAN_ETH_HLEN : ETH_HLEN);
1835 }
1836 } else {
1837 /*
1838 * The data is in a chain of large buffers
1839 * pointed to by a small buffer. We loop
1840 * thru and chain them to the our small header
1841 * buffer's skb.
1842 * frags: There are 18 max frags and our small
1843 * buffer will hold 32 of them. The thing is,
1844 * we'll use 3 max for our 9000 byte jumbo
1845 * frames. If the MTU goes up we could
1846 * eventually be in trouble.
1847 */
Ron Mercer7c734352009-10-19 03:32:19 +00001848 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 sbq_desc = ql_get_curr_sbuf(rx_ring);
1850 pci_unmap_single(qdev->pdev,
1851 pci_unmap_addr(sbq_desc, mapaddr),
1852 pci_unmap_len(sbq_desc, maplen),
1853 PCI_DMA_FROMDEVICE);
1854 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1855 /*
1856 * This is an non TCP/UDP IP frame, so
1857 * the headers aren't split into a small
1858 * buffer. We have to use the small buffer
1859 * that contains our sg list as our skb to
1860 * send upstairs. Copy the sg list here to
1861 * a local buffer and use it to find the
1862 * pages to chain.
1863 */
1864 QPRINTK(qdev, RX_STATUS, DEBUG,
1865 "%d bytes of headers & data in chain of large.\n", length);
1866 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001867 sbq_desc->p.skb = NULL;
1868 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001869 }
1870 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001871 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1872 size = (length < rx_ring->lbq_buf_size) ? length :
1873 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874
1875 QPRINTK(qdev, RX_STATUS, DEBUG,
1876 "Adding page %d to skb for %d bytes.\n",
1877 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001878 skb_fill_page_desc(skb, i,
1879 lbq_desc->p.pg_chunk.page,
1880 lbq_desc->p.pg_chunk.offset,
1881 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 skb->len += size;
1883 skb->data_len += size;
1884 skb->truesize += size;
1885 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001886 i++;
1887 }
1888 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1889 VLAN_ETH_HLEN : ETH_HLEN);
1890 }
1891 return skb;
1892}
1893
1894/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001895static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001897 struct ib_mac_iocb_rsp *ib_mac_rsp,
1898 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899{
1900 struct net_device *ndev = qdev->ndev;
1901 struct sk_buff *skb = NULL;
1902
1903 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1904
1905 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1906 if (unlikely(!skb)) {
1907 QPRINTK(qdev, RX_STATUS, DEBUG,
1908 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001909 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001910 return;
1911 }
1912
Ron Mercera32959c2009-06-09 05:39:27 +00001913 /* Frame error, so drop the packet. */
1914 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1915 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1916 ib_mac_rsp->flags2);
1917 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001918 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001919 return;
1920 }
Ron Mercerec33a492009-06-09 05:39:28 +00001921
1922 /* The max framesize filter on this chip is set higher than
1923 * MTU since FCoE uses 2k frames.
1924 */
1925 if (skb->len > ndev->mtu + ETH_HLEN) {
1926 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001927 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001928 return;
1929 }
1930
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001931 /* loopback self test for ethtool */
1932 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933 ql_check_lb_frame(qdev, skb);
1934 dev_kfree_skb_any(skb);
1935 return;
1936 }
1937
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001938 prefetch(skb->data);
1939 skb->dev = ndev;
1940 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1941 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1944 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1945 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1946 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1947 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001948 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001949 }
1950 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1951 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1952 }
Ron Mercerd555f592009-03-09 10:59:19 +00001953
Ron Mercerd555f592009-03-09 10:59:19 +00001954 skb->protocol = eth_type_trans(skb, ndev);
1955 skb->ip_summed = CHECKSUM_NONE;
1956
1957 /* If rx checksum is on, and there are no
1958 * csum or frame errors.
1959 */
1960 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001961 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1962 /* TCP frame. */
1963 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1964 QPRINTK(qdev, RX_STATUS, DEBUG,
1965 "TCP checksum done!\n");
1966 skb->ip_summed = CHECKSUM_UNNECESSARY;
1967 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969 /* Unfragmented ipv4 UDP frame. */
1970 struct iphdr *iph = (struct iphdr *) skb->data;
1971 if (!(iph->frag_off &
1972 cpu_to_be16(IP_MF|IP_OFFSET))) {
1973 skb->ip_summed = CHECKSUM_UNNECESSARY;
1974 QPRINTK(qdev, RX_STATUS, DEBUG,
1975 "TCP checksum done!\n");
1976 }
1977 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001978 }
Ron Mercerd555f592009-03-09 10:59:19 +00001979
Ron Mercer885ee392009-11-03 13:49:31 +00001980 rx_ring->rx_packets++;
1981 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001982 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001983 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1984 if (qdev->vlgrp &&
1985 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1986 (vlan_id != 0))
1987 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1988 vlan_id, skb);
1989 else
1990 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001991 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001992 if (qdev->vlgrp &&
1993 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1994 (vlan_id != 0))
1995 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1996 else
1997 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001998 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001999}
2000
Ron Mercer4f848c02010-01-02 10:37:43 +00002001/* Process an inbound completion from an rx ring. */
2002static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2003 struct rx_ring *rx_ring,
2004 struct ib_mac_iocb_rsp *ib_mac_rsp)
2005{
2006 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2007 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2008 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2009 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2010
2011 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2012
2013 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2014 /* The data and headers are split into
2015 * separate buffers.
2016 */
2017 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2018 vlan_id);
2019 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2020 /* The data fit in a single small buffer.
2021 * Allocate a new skb, copy the data and
2022 * return the buffer to the free pool.
2023 */
2024 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2025 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002026 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2027 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2028 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2029 /* TCP packet in a page chunk that's been checksummed.
2030 * Tack it on to our GRO skb and let it go.
2031 */
2032 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2033 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002034 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2035 /* Non-TCP packet in a page chunk. Allocate an
2036 * skb, tack it on frags, and send it up.
2037 */
2038 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2039 length, vlan_id);
2040 } else {
2041 struct bq_desc *lbq_desc;
2042
2043 /* Free small buffer that holds the IAL */
2044 lbq_desc = ql_get_curr_sbuf(rx_ring);
2045 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2046 length, qdev->ndev->mtu);
2047
2048 /* Unwind the large buffers for this frame. */
2049 while (length > 0) {
2050 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2051 length -= (length < rx_ring->lbq_buf_size) ?
2052 length : rx_ring->lbq_buf_size;
2053 put_page(lbq_desc->p.pg_chunk.page);
2054 }
2055 }
2056
2057 return (unsigned long)length;
2058}
2059
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002060/* Process an outbound completion from an rx ring. */
2061static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2062 struct ob_mac_iocb_rsp *mac_rsp)
2063{
2064 struct tx_ring *tx_ring;
2065 struct tx_ring_desc *tx_ring_desc;
2066
2067 QL_DUMP_OB_MAC_RSP(mac_rsp);
2068 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2069 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2070 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002071 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2072 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002073 dev_kfree_skb(tx_ring_desc->skb);
2074 tx_ring_desc->skb = NULL;
2075
2076 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2077 OB_MAC_IOCB_RSP_S |
2078 OB_MAC_IOCB_RSP_L |
2079 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2080 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2081 QPRINTK(qdev, TX_DONE, WARNING,
2082 "Total descriptor length did not match transfer length.\n");
2083 }
2084 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2085 QPRINTK(qdev, TX_DONE, WARNING,
2086 "Frame too short to be legal, not sent.\n");
2087 }
2088 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2089 QPRINTK(qdev, TX_DONE, WARNING,
2090 "Frame too long, but sent anyway.\n");
2091 }
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2093 QPRINTK(qdev, TX_DONE, WARNING,
2094 "PCI backplane error. Frame not sent.\n");
2095 }
2096 }
2097 atomic_inc(&tx_ring->tx_count);
2098}
2099
2100/* Fire up a handler to reset the MPI processor. */
2101void ql_queue_fw_error(struct ql_adapter *qdev)
2102{
Ron Mercer6a473302009-07-02 06:06:12 +00002103 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002104 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2105}
2106
2107void ql_queue_asic_error(struct ql_adapter *qdev)
2108{
Ron Mercer6a473302009-07-02 06:06:12 +00002109 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002111 /* Clear adapter up bit to signal the recovery
2112 * process that it shouldn't kill the reset worker
2113 * thread
2114 */
2115 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002116 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2117}
2118
2119static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2120 struct ib_ae_iocb_rsp *ib_ae_rsp)
2121{
2122 switch (ib_ae_rsp->event) {
2123 case MGMT_ERR_EVENT:
2124 QPRINTK(qdev, RX_ERR, ERR,
2125 "Management Processor Fatal Error.\n");
2126 ql_queue_fw_error(qdev);
2127 return;
2128
2129 case CAM_LOOKUP_ERR_EVENT:
2130 QPRINTK(qdev, LINK, ERR,
2131 "Multiple CAM hits lookup occurred.\n");
2132 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
2133 ql_queue_asic_error(qdev);
2134 return;
2135
2136 case SOFT_ECC_ERROR_EVENT:
2137 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
2138 ql_queue_asic_error(qdev);
2139 break;
2140
2141 case PCI_ERR_ANON_BUF_RD:
2142 QPRINTK(qdev, RX_ERR, ERR,
2143 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2144 ib_ae_rsp->q_id);
2145 ql_queue_asic_error(qdev);
2146 break;
2147
2148 default:
2149 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
2150 ib_ae_rsp->event);
2151 ql_queue_asic_error(qdev);
2152 break;
2153 }
2154}
2155
2156static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2157{
2158 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002159 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160 struct ob_mac_iocb_rsp *net_rsp = NULL;
2161 int count = 0;
2162
Ron Mercer1e213302009-03-09 10:59:21 +00002163 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002164 /* While there are entries in the completion queue. */
2165 while (prod != rx_ring->cnsmr_idx) {
2166
2167 QPRINTK(qdev, RX_STATUS, DEBUG,
2168 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2169 prod, rx_ring->cnsmr_idx);
2170
2171 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2172 rmb();
2173 switch (net_rsp->opcode) {
2174
2175 case OPCODE_OB_MAC_TSO_IOCB:
2176 case OPCODE_OB_MAC_IOCB:
2177 ql_process_mac_tx_intr(qdev, net_rsp);
2178 break;
2179 default:
2180 QPRINTK(qdev, RX_STATUS, DEBUG,
2181 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2182 net_rsp->opcode);
2183 }
2184 count++;
2185 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002186 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002187 }
2188 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002189 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2190 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2191 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002192 if (atomic_read(&tx_ring->queue_stopped) &&
2193 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2194 /*
2195 * The queue got stopped because the tx_ring was full.
2196 * Wake it up, because it's now at least 25% empty.
2197 */
Ron Mercer1e213302009-03-09 10:59:21 +00002198 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199 }
2200
2201 return count;
2202}
2203
2204static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2205{
2206 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002207 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002208 struct ql_net_rsp_iocb *net_rsp;
2209 int count = 0;
2210
2211 /* While there are entries in the completion queue. */
2212 while (prod != rx_ring->cnsmr_idx) {
2213
2214 QPRINTK(qdev, RX_STATUS, DEBUG,
2215 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2216 prod, rx_ring->cnsmr_idx);
2217
2218 net_rsp = rx_ring->curr_entry;
2219 rmb();
2220 switch (net_rsp->opcode) {
2221 case OPCODE_IB_MAC_IOCB:
2222 ql_process_mac_rx_intr(qdev, rx_ring,
2223 (struct ib_mac_iocb_rsp *)
2224 net_rsp);
2225 break;
2226
2227 case OPCODE_IB_AE_IOCB:
2228 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2229 net_rsp);
2230 break;
2231 default:
2232 {
2233 QPRINTK(qdev, RX_STATUS, DEBUG,
2234 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2235 net_rsp->opcode);
2236 }
2237 }
2238 count++;
2239 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002240 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002241 if (count == budget)
2242 break;
2243 }
2244 ql_update_buffer_queues(qdev, rx_ring);
2245 ql_write_cq_idx(rx_ring);
2246 return count;
2247}
2248
2249static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2250{
2251 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2252 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002253 struct rx_ring *trx_ring;
2254 int i, work_done = 0;
2255 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256
2257 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
2258 rx_ring->cq_id);
2259
Ron Mercer39aa8162009-08-27 11:02:11 +00002260 /* Service the TX rings first. They start
2261 * right after the RSS rings. */
2262 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2263 trx_ring = &qdev->rx_ring[i];
2264 /* If this TX completion ring belongs to this vector and
2265 * it's not empty then service it.
2266 */
2267 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2268 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2269 trx_ring->cnsmr_idx)) {
2270 QPRINTK(qdev, INTR, DEBUG,
2271 "%s: Servicing TX completion ring %d.\n",
2272 __func__, trx_ring->cq_id);
2273 ql_clean_outbound_rx_ring(trx_ring);
2274 }
2275 }
2276
2277 /*
2278 * Now service the RSS ring if it's active.
2279 */
2280 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2281 rx_ring->cnsmr_idx) {
2282 QPRINTK(qdev, INTR, DEBUG,
2283 "%s: Servicing RX completion ring %d.\n",
2284 __func__, rx_ring->cq_id);
2285 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2286 }
2287
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002288 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002289 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002290 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2291 }
2292 return work_done;
2293}
2294
Ron Mercer01e6b952009-10-30 12:13:34 +00002295static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002296{
2297 struct ql_adapter *qdev = netdev_priv(ndev);
2298
2299 qdev->vlgrp = grp;
2300 if (grp) {
2301 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
2302 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2303 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2304 } else {
2305 QPRINTK(qdev, IFUP, DEBUG,
2306 "Turning off VLAN in NIC_RCV_CFG.\n");
2307 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2308 }
2309}
2310
Ron Mercer01e6b952009-10-30 12:13:34 +00002311static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002312{
2313 struct ql_adapter *qdev = netdev_priv(ndev);
2314 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002315 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002316
Ron Mercercc288f52009-02-23 10:42:14 +00002317 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2318 if (status)
2319 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002320 if (ql_set_mac_addr_reg
2321 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2322 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2323 }
Ron Mercercc288f52009-02-23 10:42:14 +00002324 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002325}
2326
Ron Mercer01e6b952009-10-30 12:13:34 +00002327static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328{
2329 struct ql_adapter *qdev = netdev_priv(ndev);
2330 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002331 int status;
2332
2333 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2334 if (status)
2335 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002336
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002337 if (ql_set_mac_addr_reg
2338 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2339 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2340 }
Ron Mercercc288f52009-02-23 10:42:14 +00002341 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002342
2343}
2344
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2346static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2347{
2348 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002349 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002350 return IRQ_HANDLED;
2351}
2352
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002353/* This handles a fatal error, MPI activity, and the default
2354 * rx_ring in an MSI-X multiple vector environment.
2355 * In MSI/Legacy environment it also process the rest of
2356 * the rx_rings.
2357 */
2358static irqreturn_t qlge_isr(int irq, void *dev_id)
2359{
2360 struct rx_ring *rx_ring = dev_id;
2361 struct ql_adapter *qdev = rx_ring->qdev;
2362 struct intr_context *intr_context = &qdev->intr_context[0];
2363 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002364 int work_done = 0;
2365
Ron Mercerbb0d2152008-10-20 10:30:26 -07002366 spin_lock(&qdev->hw_lock);
2367 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2368 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2369 spin_unlock(&qdev->hw_lock);
2370 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002371 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002372 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002373
Ron Mercerbb0d2152008-10-20 10:30:26 -07002374 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375
2376 /*
2377 * Check for fatal error.
2378 */
2379 if (var & STS_FE) {
2380 ql_queue_asic_error(qdev);
2381 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2382 var = ql_read32(qdev, ERR_STS);
2383 QPRINTK(qdev, INTR, ERR,
2384 "Resetting chip. Error Status Register = 0x%x\n", var);
2385 return IRQ_HANDLED;
2386 }
2387
2388 /*
2389 * Check MPI processor activity.
2390 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002391 if ((var & STS_PI) &&
2392 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002393 /*
2394 * We've got an async event or mailbox completion.
2395 * Handle it and clear the source of the interrupt.
2396 */
2397 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2398 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002399 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2400 queue_delayed_work_on(smp_processor_id(),
2401 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002402 work_done++;
2403 }
2404
2405 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002406 * Get the bit-mask that shows the active queues for this
2407 * pass. Compare it to the queues that this irq services
2408 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002409 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002410 var = ql_read32(qdev, ISR1);
2411 if (var & intr_context->irq_mask) {
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002412 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002413 "Waking handler for rx_ring[0].\n");
2414 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002415 napi_schedule(&rx_ring->napi);
2416 work_done++;
2417 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002418 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002419 return work_done ? IRQ_HANDLED : IRQ_NONE;
2420}
2421
2422static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2423{
2424
2425 if (skb_is_gso(skb)) {
2426 int err;
2427 if (skb_header_cloned(skb)) {
2428 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2429 if (err)
2430 return err;
2431 }
2432
2433 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2434 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2435 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2436 mac_iocb_ptr->total_hdrs_len =
2437 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2438 mac_iocb_ptr->net_trans_offset =
2439 cpu_to_le16(skb_network_offset(skb) |
2440 skb_transport_offset(skb)
2441 << OB_MAC_TRANSPORT_HDR_SHIFT);
2442 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2443 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2444 if (likely(skb->protocol == htons(ETH_P_IP))) {
2445 struct iphdr *iph = ip_hdr(skb);
2446 iph->check = 0;
2447 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2448 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2449 iph->daddr, 0,
2450 IPPROTO_TCP,
2451 0);
2452 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2453 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2454 tcp_hdr(skb)->check =
2455 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2456 &ipv6_hdr(skb)->daddr,
2457 0, IPPROTO_TCP, 0);
2458 }
2459 return 1;
2460 }
2461 return 0;
2462}
2463
2464static void ql_hw_csum_setup(struct sk_buff *skb,
2465 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2466{
2467 int len;
2468 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002469 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002470 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2471 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2472 mac_iocb_ptr->net_trans_offset =
2473 cpu_to_le16(skb_network_offset(skb) |
2474 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2475
2476 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2477 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2478 if (likely(iph->protocol == IPPROTO_TCP)) {
2479 check = &(tcp_hdr(skb)->check);
2480 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2481 mac_iocb_ptr->total_hdrs_len =
2482 cpu_to_le16(skb_transport_offset(skb) +
2483 (tcp_hdr(skb)->doff << 2));
2484 } else {
2485 check = &(udp_hdr(skb)->check);
2486 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2487 mac_iocb_ptr->total_hdrs_len =
2488 cpu_to_le16(skb_transport_offset(skb) +
2489 sizeof(struct udphdr));
2490 }
2491 *check = ~csum_tcpudp_magic(iph->saddr,
2492 iph->daddr, len, iph->protocol, 0);
2493}
2494
Stephen Hemminger613573252009-08-31 19:50:58 +00002495static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002496{
2497 struct tx_ring_desc *tx_ring_desc;
2498 struct ob_mac_iocb_req *mac_iocb_ptr;
2499 struct ql_adapter *qdev = netdev_priv(ndev);
2500 int tso;
2501 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002502 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002503
2504 tx_ring = &qdev->tx_ring[tx_ring_idx];
2505
Ron Mercer74c50b42009-03-09 10:59:27 +00002506 if (skb_padto(skb, ETH_ZLEN))
2507 return NETDEV_TX_OK;
2508
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002509 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2510 QPRINTK(qdev, TX_QUEUED, INFO,
2511 "%s: shutting down tx queue %d du to lack of resources.\n",
2512 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002513 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002514 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002515 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002516 return NETDEV_TX_BUSY;
2517 }
2518 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2519 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002520 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002521
2522 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2523 mac_iocb_ptr->tid = tx_ring_desc->index;
2524 /* We use the upper 32-bits to store the tx queue for this IO.
2525 * When we get the completion we can use it to establish the context.
2526 */
2527 mac_iocb_ptr->txq_idx = tx_ring_idx;
2528 tx_ring_desc->skb = skb;
2529
2530 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2531
2532 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2533 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2534 vlan_tx_tag_get(skb));
2535 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2536 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2537 }
2538 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2539 if (tso < 0) {
2540 dev_kfree_skb_any(skb);
2541 return NETDEV_TX_OK;
2542 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2543 ql_hw_csum_setup(skb,
2544 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2545 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002546 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2547 NETDEV_TX_OK) {
2548 QPRINTK(qdev, TX_QUEUED, ERR,
2549 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002550 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002551 return NETDEV_TX_BUSY;
2552 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002553 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2554 tx_ring->prod_idx++;
2555 if (tx_ring->prod_idx == tx_ring->wq_len)
2556 tx_ring->prod_idx = 0;
2557 wmb();
2558
2559 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002560 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2561 tx_ring->prod_idx, skb->len);
2562
2563 atomic_dec(&tx_ring->tx_count);
2564 return NETDEV_TX_OK;
2565}
2566
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002567
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002568static void ql_free_shadow_space(struct ql_adapter *qdev)
2569{
2570 if (qdev->rx_ring_shadow_reg_area) {
2571 pci_free_consistent(qdev->pdev,
2572 PAGE_SIZE,
2573 qdev->rx_ring_shadow_reg_area,
2574 qdev->rx_ring_shadow_reg_dma);
2575 qdev->rx_ring_shadow_reg_area = NULL;
2576 }
2577 if (qdev->tx_ring_shadow_reg_area) {
2578 pci_free_consistent(qdev->pdev,
2579 PAGE_SIZE,
2580 qdev->tx_ring_shadow_reg_area,
2581 qdev->tx_ring_shadow_reg_dma);
2582 qdev->tx_ring_shadow_reg_area = NULL;
2583 }
2584}
2585
2586static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2587{
2588 qdev->rx_ring_shadow_reg_area =
2589 pci_alloc_consistent(qdev->pdev,
2590 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2591 if (qdev->rx_ring_shadow_reg_area == NULL) {
2592 QPRINTK(qdev, IFUP, ERR,
2593 "Allocation of RX shadow space failed.\n");
2594 return -ENOMEM;
2595 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002596 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002597 qdev->tx_ring_shadow_reg_area =
2598 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2599 &qdev->tx_ring_shadow_reg_dma);
2600 if (qdev->tx_ring_shadow_reg_area == NULL) {
2601 QPRINTK(qdev, IFUP, ERR,
2602 "Allocation of TX shadow space failed.\n");
2603 goto err_wqp_sh_area;
2604 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002605 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002606 return 0;
2607
2608err_wqp_sh_area:
2609 pci_free_consistent(qdev->pdev,
2610 PAGE_SIZE,
2611 qdev->rx_ring_shadow_reg_area,
2612 qdev->rx_ring_shadow_reg_dma);
2613 return -ENOMEM;
2614}
2615
2616static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2617{
2618 struct tx_ring_desc *tx_ring_desc;
2619 int i;
2620 struct ob_mac_iocb_req *mac_iocb_ptr;
2621
2622 mac_iocb_ptr = tx_ring->wq_base;
2623 tx_ring_desc = tx_ring->q;
2624 for (i = 0; i < tx_ring->wq_len; i++) {
2625 tx_ring_desc->index = i;
2626 tx_ring_desc->skb = NULL;
2627 tx_ring_desc->queue_entry = mac_iocb_ptr;
2628 mac_iocb_ptr++;
2629 tx_ring_desc++;
2630 }
2631 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2632 atomic_set(&tx_ring->queue_stopped, 0);
2633}
2634
2635static void ql_free_tx_resources(struct ql_adapter *qdev,
2636 struct tx_ring *tx_ring)
2637{
2638 if (tx_ring->wq_base) {
2639 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2640 tx_ring->wq_base, tx_ring->wq_base_dma);
2641 tx_ring->wq_base = NULL;
2642 }
2643 kfree(tx_ring->q);
2644 tx_ring->q = NULL;
2645}
2646
2647static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2648 struct tx_ring *tx_ring)
2649{
2650 tx_ring->wq_base =
2651 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2652 &tx_ring->wq_base_dma);
2653
Joe Perches8e95a202009-12-03 07:58:21 +00002654 if ((tx_ring->wq_base == NULL) ||
2655 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002656 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2657 return -ENOMEM;
2658 }
2659 tx_ring->q =
2660 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2661 if (tx_ring->q == NULL)
2662 goto err;
2663
2664 return 0;
2665err:
2666 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2667 tx_ring->wq_base, tx_ring->wq_base_dma);
2668 return -ENOMEM;
2669}
2670
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002671static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002673 struct bq_desc *lbq_desc;
2674
Ron Mercer7c734352009-10-19 03:32:19 +00002675 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002676
Ron Mercer7c734352009-10-19 03:32:19 +00002677 curr_idx = rx_ring->lbq_curr_idx;
2678 clean_idx = rx_ring->lbq_clean_idx;
2679 while (curr_idx != clean_idx) {
2680 lbq_desc = &rx_ring->lbq[curr_idx];
2681
2682 if (lbq_desc->p.pg_chunk.last_flag) {
2683 pci_unmap_page(qdev->pdev,
2684 lbq_desc->p.pg_chunk.map,
2685 ql_lbq_block_size(qdev),
2686 PCI_DMA_FROMDEVICE);
2687 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002688 }
Ron Mercer7c734352009-10-19 03:32:19 +00002689
2690 put_page(lbq_desc->p.pg_chunk.page);
2691 lbq_desc->p.pg_chunk.page = NULL;
2692
2693 if (++curr_idx == rx_ring->lbq_len)
2694 curr_idx = 0;
2695
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002696 }
2697}
2698
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002699static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002700{
2701 int i;
2702 struct bq_desc *sbq_desc;
2703
2704 for (i = 0; i < rx_ring->sbq_len; i++) {
2705 sbq_desc = &rx_ring->sbq[i];
2706 if (sbq_desc == NULL) {
2707 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2708 return;
2709 }
2710 if (sbq_desc->p.skb) {
2711 pci_unmap_single(qdev->pdev,
2712 pci_unmap_addr(sbq_desc, mapaddr),
2713 pci_unmap_len(sbq_desc, maplen),
2714 PCI_DMA_FROMDEVICE);
2715 dev_kfree_skb(sbq_desc->p.skb);
2716 sbq_desc->p.skb = NULL;
2717 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002718 }
2719}
2720
Ron Mercer4545a3f2009-02-23 10:42:17 +00002721/* Free all large and small rx buffers associated
2722 * with the completion queues for this device.
2723 */
2724static void ql_free_rx_buffers(struct ql_adapter *qdev)
2725{
2726 int i;
2727 struct rx_ring *rx_ring;
2728
2729 for (i = 0; i < qdev->rx_ring_count; i++) {
2730 rx_ring = &qdev->rx_ring[i];
2731 if (rx_ring->lbq)
2732 ql_free_lbq_buffers(qdev, rx_ring);
2733 if (rx_ring->sbq)
2734 ql_free_sbq_buffers(qdev, rx_ring);
2735 }
2736}
2737
2738static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2739{
2740 struct rx_ring *rx_ring;
2741 int i;
2742
2743 for (i = 0; i < qdev->rx_ring_count; i++) {
2744 rx_ring = &qdev->rx_ring[i];
2745 if (rx_ring->type != TX_Q)
2746 ql_update_buffer_queues(qdev, rx_ring);
2747 }
2748}
2749
2750static void ql_init_lbq_ring(struct ql_adapter *qdev,
2751 struct rx_ring *rx_ring)
2752{
2753 int i;
2754 struct bq_desc *lbq_desc;
2755 __le64 *bq = rx_ring->lbq_base;
2756
2757 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2758 for (i = 0; i < rx_ring->lbq_len; i++) {
2759 lbq_desc = &rx_ring->lbq[i];
2760 memset(lbq_desc, 0, sizeof(*lbq_desc));
2761 lbq_desc->index = i;
2762 lbq_desc->addr = bq;
2763 bq++;
2764 }
2765}
2766
2767static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002768 struct rx_ring *rx_ring)
2769{
2770 int i;
2771 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002772 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002773
Ron Mercer4545a3f2009-02-23 10:42:17 +00002774 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775 for (i = 0; i < rx_ring->sbq_len; i++) {
2776 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002777 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002778 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002779 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002780 bq++;
2781 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002782}
2783
2784static void ql_free_rx_resources(struct ql_adapter *qdev,
2785 struct rx_ring *rx_ring)
2786{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002787 /* Free the small buffer queue. */
2788 if (rx_ring->sbq_base) {
2789 pci_free_consistent(qdev->pdev,
2790 rx_ring->sbq_size,
2791 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2792 rx_ring->sbq_base = NULL;
2793 }
2794
2795 /* Free the small buffer queue control blocks. */
2796 kfree(rx_ring->sbq);
2797 rx_ring->sbq = NULL;
2798
2799 /* Free the large buffer queue. */
2800 if (rx_ring->lbq_base) {
2801 pci_free_consistent(qdev->pdev,
2802 rx_ring->lbq_size,
2803 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2804 rx_ring->lbq_base = NULL;
2805 }
2806
2807 /* Free the large buffer queue control blocks. */
2808 kfree(rx_ring->lbq);
2809 rx_ring->lbq = NULL;
2810
2811 /* Free the rx queue. */
2812 if (rx_ring->cq_base) {
2813 pci_free_consistent(qdev->pdev,
2814 rx_ring->cq_size,
2815 rx_ring->cq_base, rx_ring->cq_base_dma);
2816 rx_ring->cq_base = NULL;
2817 }
2818}
2819
2820/* Allocate queues and buffers for this completions queue based
2821 * on the values in the parameter structure. */
2822static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2823 struct rx_ring *rx_ring)
2824{
2825
2826 /*
2827 * Allocate the completion queue for this rx_ring.
2828 */
2829 rx_ring->cq_base =
2830 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2831 &rx_ring->cq_base_dma);
2832
2833 if (rx_ring->cq_base == NULL) {
2834 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2835 return -ENOMEM;
2836 }
2837
2838 if (rx_ring->sbq_len) {
2839 /*
2840 * Allocate small buffer queue.
2841 */
2842 rx_ring->sbq_base =
2843 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2844 &rx_ring->sbq_base_dma);
2845
2846 if (rx_ring->sbq_base == NULL) {
2847 QPRINTK(qdev, IFUP, ERR,
2848 "Small buffer queue allocation failed.\n");
2849 goto err_mem;
2850 }
2851
2852 /*
2853 * Allocate small buffer queue control blocks.
2854 */
2855 rx_ring->sbq =
2856 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2857 GFP_KERNEL);
2858 if (rx_ring->sbq == NULL) {
2859 QPRINTK(qdev, IFUP, ERR,
2860 "Small buffer queue control block allocation failed.\n");
2861 goto err_mem;
2862 }
2863
Ron Mercer4545a3f2009-02-23 10:42:17 +00002864 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002865 }
2866
2867 if (rx_ring->lbq_len) {
2868 /*
2869 * Allocate large buffer queue.
2870 */
2871 rx_ring->lbq_base =
2872 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2873 &rx_ring->lbq_base_dma);
2874
2875 if (rx_ring->lbq_base == NULL) {
2876 QPRINTK(qdev, IFUP, ERR,
2877 "Large buffer queue allocation failed.\n");
2878 goto err_mem;
2879 }
2880 /*
2881 * Allocate large buffer queue control blocks.
2882 */
2883 rx_ring->lbq =
2884 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2885 GFP_KERNEL);
2886 if (rx_ring->lbq == NULL) {
2887 QPRINTK(qdev, IFUP, ERR,
2888 "Large buffer queue control block allocation failed.\n");
2889 goto err_mem;
2890 }
2891
Ron Mercer4545a3f2009-02-23 10:42:17 +00002892 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002893 }
2894
2895 return 0;
2896
2897err_mem:
2898 ql_free_rx_resources(qdev, rx_ring);
2899 return -ENOMEM;
2900}
2901
2902static void ql_tx_ring_clean(struct ql_adapter *qdev)
2903{
2904 struct tx_ring *tx_ring;
2905 struct tx_ring_desc *tx_ring_desc;
2906 int i, j;
2907
2908 /*
2909 * Loop through all queues and free
2910 * any resources.
2911 */
2912 for (j = 0; j < qdev->tx_ring_count; j++) {
2913 tx_ring = &qdev->tx_ring[j];
2914 for (i = 0; i < tx_ring->wq_len; i++) {
2915 tx_ring_desc = &tx_ring->q[i];
2916 if (tx_ring_desc && tx_ring_desc->skb) {
2917 QPRINTK(qdev, IFDOWN, ERR,
2918 "Freeing lost SKB %p, from queue %d, index %d.\n",
2919 tx_ring_desc->skb, j,
2920 tx_ring_desc->index);
2921 ql_unmap_send(qdev, tx_ring_desc,
2922 tx_ring_desc->map_cnt);
2923 dev_kfree_skb(tx_ring_desc->skb);
2924 tx_ring_desc->skb = NULL;
2925 }
2926 }
2927 }
2928}
2929
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002930static void ql_free_mem_resources(struct ql_adapter *qdev)
2931{
2932 int i;
2933
2934 for (i = 0; i < qdev->tx_ring_count; i++)
2935 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2936 for (i = 0; i < qdev->rx_ring_count; i++)
2937 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2938 ql_free_shadow_space(qdev);
2939}
2940
2941static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2942{
2943 int i;
2944
2945 /* Allocate space for our shadow registers and such. */
2946 if (ql_alloc_shadow_space(qdev))
2947 return -ENOMEM;
2948
2949 for (i = 0; i < qdev->rx_ring_count; i++) {
2950 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2951 QPRINTK(qdev, IFUP, ERR,
2952 "RX resource allocation failed.\n");
2953 goto err_mem;
2954 }
2955 }
2956 /* Allocate tx queue resources */
2957 for (i = 0; i < qdev->tx_ring_count; i++) {
2958 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2959 QPRINTK(qdev, IFUP, ERR,
2960 "TX resource allocation failed.\n");
2961 goto err_mem;
2962 }
2963 }
2964 return 0;
2965
2966err_mem:
2967 ql_free_mem_resources(qdev);
2968 return -ENOMEM;
2969}
2970
2971/* Set up the rx ring control block and pass it to the chip.
2972 * The control block is defined as
2973 * "Completion Queue Initialization Control Block", or cqicb.
2974 */
2975static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2976{
2977 struct cqicb *cqicb = &rx_ring->cqicb;
2978 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002979 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002980 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002981 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002982 void __iomem *doorbell_area =
2983 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2984 int err = 0;
2985 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002986 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002987 __le64 *base_indirect_ptr;
2988 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002989
2990 /* Set up the shadow registers for this ring. */
2991 rx_ring->prod_idx_sh_reg = shadow_reg;
2992 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00002993 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002994 shadow_reg += sizeof(u64);
2995 shadow_reg_dma += sizeof(u64);
2996 rx_ring->lbq_base_indirect = shadow_reg;
2997 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002998 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2999 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003000 rx_ring->sbq_base_indirect = shadow_reg;
3001 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3002
3003 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003004 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003005 rx_ring->cnsmr_idx = 0;
3006 rx_ring->curr_entry = rx_ring->cq_base;
3007
3008 /* PCI doorbell mem area + 0x04 for valid register */
3009 rx_ring->valid_db_reg = doorbell_area + 0x04;
3010
3011 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003012 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003013
3014 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003015 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003016
3017 memset((void *)cqicb, 0, sizeof(struct cqicb));
3018 cqicb->msix_vect = rx_ring->irq;
3019
Ron Mercer459caf52009-01-04 17:08:11 -08003020 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3021 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003022
Ron Mercer97345522009-01-09 11:31:50 +00003023 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003024
Ron Mercer97345522009-01-09 11:31:50 +00003025 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003026
3027 /*
3028 * Set up the control block load flags.
3029 */
3030 cqicb->flags = FLAGS_LC | /* Load queue base address */
3031 FLAGS_LV | /* Load MSI-X vector */
3032 FLAGS_LI; /* Load irq delay values */
3033 if (rx_ring->lbq_len) {
3034 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003035 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003036 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3037 page_entries = 0;
3038 do {
3039 *base_indirect_ptr = cpu_to_le64(tmp);
3040 tmp += DB_PAGE_SIZE;
3041 base_indirect_ptr++;
3042 page_entries++;
3043 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003044 cqicb->lbq_addr =
3045 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003046 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3047 (u16) rx_ring->lbq_buf_size;
3048 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3049 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3050 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003051 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003052 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003053 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003054 rx_ring->lbq_clean_idx = 0;
3055 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003056 }
3057 if (rx_ring->sbq_len) {
3058 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003059 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003060 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3061 page_entries = 0;
3062 do {
3063 *base_indirect_ptr = cpu_to_le64(tmp);
3064 tmp += DB_PAGE_SIZE;
3065 base_indirect_ptr++;
3066 page_entries++;
3067 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003068 cqicb->sbq_addr =
3069 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003071 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003072 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3073 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003074 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003075 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003077 rx_ring->sbq_clean_idx = 0;
3078 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079 }
3080 switch (rx_ring->type) {
3081 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003082 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3083 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3084 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003085 case RX_Q:
3086 /* Inbound completion handling rx_rings run in
3087 * separate NAPI contexts.
3088 */
3089 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3090 64);
3091 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3092 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3093 break;
3094 default:
3095 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
3096 rx_ring->type);
3097 }
Ron Mercer49740972009-02-26 10:08:36 +00003098 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003099 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3100 CFG_LCQ, rx_ring->cq_id);
3101 if (err) {
3102 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
3103 return err;
3104 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003105 return err;
3106}
3107
3108static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3109{
3110 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3111 void __iomem *doorbell_area =
3112 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3113 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3114 (tx_ring->wq_id * sizeof(u64));
3115 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3116 (tx_ring->wq_id * sizeof(u64));
3117 int err = 0;
3118
3119 /*
3120 * Assign doorbell registers for this tx_ring.
3121 */
3122 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003123 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 tx_ring->prod_idx = 0;
3125 /* TX PCI doorbell mem area + 0x04 */
3126 tx_ring->valid_db_reg = doorbell_area + 0x04;
3127
3128 /*
3129 * Assign shadow registers for this tx_ring.
3130 */
3131 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3132 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3133
3134 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3135 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3136 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3137 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3138 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003139 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003140
Ron Mercer97345522009-01-09 11:31:50 +00003141 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142
3143 ql_init_tx_ring(qdev, tx_ring);
3144
Ron Mercere3324712009-07-02 06:06:13 +00003145 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003146 (u16) tx_ring->wq_id);
3147 if (err) {
3148 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
3149 return err;
3150 }
Ron Mercer49740972009-02-26 10:08:36 +00003151 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 return err;
3153}
3154
3155static void ql_disable_msix(struct ql_adapter *qdev)
3156{
3157 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3158 pci_disable_msix(qdev->pdev);
3159 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3160 kfree(qdev->msi_x_entry);
3161 qdev->msi_x_entry = NULL;
3162 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3163 pci_disable_msi(qdev->pdev);
3164 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3165 }
3166}
3167
Ron Mercera4ab6132009-08-27 11:02:10 +00003168/* We start by trying to get the number of vectors
3169 * stored in qdev->intr_count. If we don't get that
3170 * many then we reduce the count and try again.
3171 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003172static void ql_enable_msix(struct ql_adapter *qdev)
3173{
Ron Mercera4ab6132009-08-27 11:02:10 +00003174 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003176 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003177 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003178 /* Try to alloc space for the msix struct,
3179 * if it fails then go to MSI/legacy.
3180 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003181 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 sizeof(struct msix_entry),
3183 GFP_KERNEL);
3184 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003185 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003186 goto msi;
3187 }
3188
Ron Mercera4ab6132009-08-27 11:02:10 +00003189 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003190 qdev->msi_x_entry[i].entry = i;
3191
Ron Mercera4ab6132009-08-27 11:02:10 +00003192 /* Loop to get our vectors. We start with
3193 * what we want and settle for what we get.
3194 */
3195 do {
3196 err = pci_enable_msix(qdev->pdev,
3197 qdev->msi_x_entry, qdev->intr_count);
3198 if (err > 0)
3199 qdev->intr_count = err;
3200 } while (err > 0);
3201
3202 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003203 kfree(qdev->msi_x_entry);
3204 qdev->msi_x_entry = NULL;
3205 QPRINTK(qdev, IFUP, WARNING,
3206 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003207 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003208 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003209 } else if (err == 0) {
3210 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3211 QPRINTK(qdev, IFUP, INFO,
3212 "MSI-X Enabled, got %d vectors.\n",
3213 qdev->intr_count);
3214 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003215 }
3216 }
3217msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003218 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003219 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003220 if (!pci_enable_msi(qdev->pdev)) {
3221 set_bit(QL_MSI_ENABLED, &qdev->flags);
3222 QPRINTK(qdev, IFUP, INFO,
3223 "Running with MSI interrupts.\n");
3224 return;
3225 }
3226 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003227 qlge_irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003228 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
3229}
3230
Ron Mercer39aa8162009-08-27 11:02:11 +00003231/* Each vector services 1 RSS ring and and 1 or more
3232 * TX completion rings. This function loops through
3233 * the TX completion rings and assigns the vector that
3234 * will service it. An example would be if there are
3235 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3236 * This would mean that vector 0 would service RSS ring 0
3237 * and TX competion rings 0,1,2 and 3. Vector 1 would
3238 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3239 */
3240static void ql_set_tx_vect(struct ql_adapter *qdev)
3241{
3242 int i, j, vect;
3243 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3244
3245 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3246 /* Assign irq vectors to TX rx_rings.*/
3247 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3248 i < qdev->rx_ring_count; i++) {
3249 if (j == tx_rings_per_vector) {
3250 vect++;
3251 j = 0;
3252 }
3253 qdev->rx_ring[i].irq = vect;
3254 j++;
3255 }
3256 } else {
3257 /* For single vector all rings have an irq
3258 * of zero.
3259 */
3260 for (i = 0; i < qdev->rx_ring_count; i++)
3261 qdev->rx_ring[i].irq = 0;
3262 }
3263}
3264
3265/* Set the interrupt mask for this vector. Each vector
3266 * will service 1 RSS ring and 1 or more TX completion
3267 * rings. This function sets up a bit mask per vector
3268 * that indicates which rings it services.
3269 */
3270static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3271{
3272 int j, vect = ctx->intr;
3273 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3274
3275 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3276 /* Add the RSS ring serviced by this vector
3277 * to the mask.
3278 */
3279 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3280 /* Add the TX ring(s) serviced by this vector
3281 * to the mask. */
3282 for (j = 0; j < tx_rings_per_vector; j++) {
3283 ctx->irq_mask |=
3284 (1 << qdev->rx_ring[qdev->rss_ring_count +
3285 (vect * tx_rings_per_vector) + j].cq_id);
3286 }
3287 } else {
3288 /* For single vector we just shift each queue's
3289 * ID into the mask.
3290 */
3291 for (j = 0; j < qdev->rx_ring_count; j++)
3292 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3293 }
3294}
3295
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003296/*
3297 * Here we build the intr_context structures based on
3298 * our rx_ring count and intr vector count.
3299 * The intr_context structure is used to hook each vector
3300 * to possibly different handlers.
3301 */
3302static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3303{
3304 int i = 0;
3305 struct intr_context *intr_context = &qdev->intr_context[0];
3306
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003307 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3308 /* Each rx_ring has it's
3309 * own intr_context since we have separate
3310 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003311 */
3312 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3313 qdev->rx_ring[i].irq = i;
3314 intr_context->intr = i;
3315 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003316 /* Set up this vector's bit-mask that indicates
3317 * which queues it services.
3318 */
3319 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003320 /*
3321 * We set up each vectors enable/disable/read bits so
3322 * there's no bit/mask calculations in the critical path.
3323 */
3324 intr_context->intr_en_mask =
3325 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3326 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3327 | i;
3328 intr_context->intr_dis_mask =
3329 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3330 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3331 INTR_EN_IHD | i;
3332 intr_context->intr_read_mask =
3333 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3334 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3335 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003336 if (i == 0) {
3337 /* The first vector/queue handles
3338 * broadcast/multicast, fatal errors,
3339 * and firmware events. This in addition
3340 * to normal inbound NAPI processing.
3341 */
3342 intr_context->handler = qlge_isr;
3343 sprintf(intr_context->name, "%s-rx-%d",
3344 qdev->ndev->name, i);
3345 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003346 /*
3347 * Inbound queues handle unicast frames only.
3348 */
3349 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003350 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003351 qdev->ndev->name, i);
3352 }
3353 }
3354 } else {
3355 /*
3356 * All rx_rings use the same intr_context since
3357 * there is only one vector.
3358 */
3359 intr_context->intr = 0;
3360 intr_context->qdev = qdev;
3361 /*
3362 * We set up each vectors enable/disable/read bits so
3363 * there's no bit/mask calculations in the critical path.
3364 */
3365 intr_context->intr_en_mask =
3366 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3367 intr_context->intr_dis_mask =
3368 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3369 INTR_EN_TYPE_DISABLE;
3370 intr_context->intr_read_mask =
3371 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3372 /*
3373 * Single interrupt means one handler for all rings.
3374 */
3375 intr_context->handler = qlge_isr;
3376 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003377 /* Set up this vector's bit-mask that indicates
3378 * which queues it services. In this case there is
3379 * a single vector so it will service all RSS and
3380 * TX completion rings.
3381 */
3382 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003383 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003384 /* Tell the TX completion rings which MSIx vector
3385 * they will be using.
3386 */
3387 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003388}
3389
3390static void ql_free_irq(struct ql_adapter *qdev)
3391{
3392 int i;
3393 struct intr_context *intr_context = &qdev->intr_context[0];
3394
3395 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3396 if (intr_context->hooked) {
3397 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3398 free_irq(qdev->msi_x_entry[i].vector,
3399 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00003400 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003401 "freeing msix interrupt %d.\n", i);
3402 } else {
3403 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00003404 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003405 "freeing msi interrupt %d.\n", i);
3406 }
3407 }
3408 }
3409 ql_disable_msix(qdev);
3410}
3411
3412static int ql_request_irq(struct ql_adapter *qdev)
3413{
3414 int i;
3415 int status = 0;
3416 struct pci_dev *pdev = qdev->pdev;
3417 struct intr_context *intr_context = &qdev->intr_context[0];
3418
3419 ql_resolve_queues_to_irqs(qdev);
3420
3421 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3422 atomic_set(&intr_context->irq_cnt, 0);
3423 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3424 status = request_irq(qdev->msi_x_entry[i].vector,
3425 intr_context->handler,
3426 0,
3427 intr_context->name,
3428 &qdev->rx_ring[i]);
3429 if (status) {
3430 QPRINTK(qdev, IFUP, ERR,
3431 "Failed request for MSIX interrupt %d.\n",
3432 i);
3433 goto err_irq;
3434 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003435 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003436 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3437 i,
3438 qdev->rx_ring[i].type ==
3439 DEFAULT_Q ? "DEFAULT_Q" : "",
3440 qdev->rx_ring[i].type ==
3441 TX_Q ? "TX_Q" : "",
3442 qdev->rx_ring[i].type ==
3443 RX_Q ? "RX_Q" : "", intr_context->name);
3444 }
3445 } else {
3446 QPRINTK(qdev, IFUP, DEBUG,
3447 "trying msi or legacy interrupts.\n");
3448 QPRINTK(qdev, IFUP, DEBUG,
3449 "%s: irq = %d.\n", __func__, pdev->irq);
3450 QPRINTK(qdev, IFUP, DEBUG,
3451 "%s: context->name = %s.\n", __func__,
3452 intr_context->name);
3453 QPRINTK(qdev, IFUP, DEBUG,
3454 "%s: dev_id = 0x%p.\n", __func__,
3455 &qdev->rx_ring[0]);
3456 status =
3457 request_irq(pdev->irq, qlge_isr,
3458 test_bit(QL_MSI_ENABLED,
3459 &qdev->
3460 flags) ? 0 : IRQF_SHARED,
3461 intr_context->name, &qdev->rx_ring[0]);
3462 if (status)
3463 goto err_irq;
3464
3465 QPRINTK(qdev, IFUP, ERR,
3466 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3467 i,
3468 qdev->rx_ring[0].type ==
3469 DEFAULT_Q ? "DEFAULT_Q" : "",
3470 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3471 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3472 intr_context->name);
3473 }
3474 intr_context->hooked = 1;
3475 }
3476 return status;
3477err_irq:
3478 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3479 ql_free_irq(qdev);
3480 return status;
3481}
3482
3483static int ql_start_rss(struct ql_adapter *qdev)
3484{
Ron Mercer541ae282009-10-08 09:54:37 +00003485 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3486 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3487 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3488 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3489 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3490 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003491 struct ricb *ricb = &qdev->ricb;
3492 int status = 0;
3493 int i;
3494 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3495
Ron Mercere3324712009-07-02 06:06:13 +00003496 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003497
Ron Mercerb2014ff2009-08-27 11:02:09 +00003498 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003499 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003500 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3501 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003502
3503 /*
3504 * Fill out the Indirection Table.
3505 */
Ron Mercer541ae282009-10-08 09:54:37 +00003506 for (i = 0; i < 1024; i++)
3507 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003508
Ron Mercer541ae282009-10-08 09:54:37 +00003509 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3510 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003511
Ron Mercer49740972009-02-26 10:08:36 +00003512 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003513
Ron Mercere3324712009-07-02 06:06:13 +00003514 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003515 if (status) {
3516 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3517 return status;
3518 }
Ron Mercer49740972009-02-26 10:08:36 +00003519 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003520 return status;
3521}
3522
Ron Mercera5f59dc2009-07-02 06:06:07 +00003523static int ql_clear_routing_entries(struct ql_adapter *qdev)
3524{
3525 int i, status = 0;
3526
3527 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3528 if (status)
3529 return status;
3530 /* Clear all the entries in the routing table. */
3531 for (i = 0; i < 16; i++) {
3532 status = ql_set_routing_reg(qdev, i, 0, 0);
3533 if (status) {
3534 QPRINTK(qdev, IFUP, ERR,
3535 "Failed to init routing register for CAM "
3536 "packets.\n");
3537 break;
3538 }
3539 }
3540 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3541 return status;
3542}
3543
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544/* Initialize the frame-to-queue routing. */
3545static int ql_route_initialize(struct ql_adapter *qdev)
3546{
3547 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003548
3549 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003550 status = ql_clear_routing_entries(qdev);
3551 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003552 return status;
3553
3554 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3555 if (status)
3556 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003557
3558 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3559 if (status) {
3560 QPRINTK(qdev, IFUP, ERR,
3561 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003562 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 }
3564 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3565 if (status) {
3566 QPRINTK(qdev, IFUP, ERR,
3567 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003568 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003569 }
3570 /* If we have more than one inbound queue, then turn on RSS in the
3571 * routing block.
3572 */
3573 if (qdev->rss_ring_count > 1) {
3574 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3575 RT_IDX_RSS_MATCH, 1);
3576 if (status) {
3577 QPRINTK(qdev, IFUP, ERR,
3578 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003579 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580 }
3581 }
3582
3583 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3584 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003585 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586 QPRINTK(qdev, IFUP, ERR,
3587 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003588exit:
3589 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003590 return status;
3591}
3592
Ron Mercer2ee1e272009-03-03 12:10:33 +00003593int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003594{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003595 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003596
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003597 /* If check if the link is up and use to
3598 * determine if we are setting or clearing
3599 * the MAC address in the CAM.
3600 */
3601 set = ql_read32(qdev, STS);
3602 set &= qdev->port_link_up;
3603 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003604 if (status) {
3605 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3606 return status;
3607 }
3608
3609 status = ql_route_initialize(qdev);
3610 if (status)
3611 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3612
3613 return status;
3614}
3615
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003616static int ql_adapter_initialize(struct ql_adapter *qdev)
3617{
3618 u32 value, mask;
3619 int i;
3620 int status = 0;
3621
3622 /*
3623 * Set up the System register to halt on errors.
3624 */
3625 value = SYS_EFE | SYS_FAE;
3626 mask = value << 16;
3627 ql_write32(qdev, SYS, mask | value);
3628
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003629 /* Set the default queue, and VLAN behavior. */
3630 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3631 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003632 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3633
3634 /* Set the MPI interrupt to enabled. */
3635 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3636
3637 /* Enable the function, set pagesize, enable error checking. */
3638 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003639 FSC_EC | FSC_VM_PAGE_4K;
3640 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641
3642 /* Set/clear header splitting. */
3643 mask = FSC_VM_PAGESIZE_MASK |
3644 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3645 ql_write32(qdev, FSC, mask | value);
3646
Ron Mercer572c5262010-01-02 10:37:42 +00003647 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648
Ron Mercera3b71932009-10-08 09:54:38 +00003649 /* Set RX packet routing to use port/pci function on which the
3650 * packet arrived on in addition to usual frame routing.
3651 * This is helpful on bonding where both interfaces can have
3652 * the same MAC address.
3653 */
3654 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003655 /* Reroute all packets to our Interface.
3656 * They may have been routed to MPI firmware
3657 * due to WOL.
3658 */
3659 value = ql_read32(qdev, MGMT_RCV_CFG);
3660 value &= ~MGMT_RCV_CFG_RM;
3661 mask = 0xffff0000;
3662
3663 /* Sticky reg needs clearing due to WOL. */
3664 ql_write32(qdev, MGMT_RCV_CFG, mask);
3665 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3666
3667 /* Default WOL is enable on Mezz cards */
3668 if (qdev->pdev->subsystem_device == 0x0068 ||
3669 qdev->pdev->subsystem_device == 0x0180)
3670 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003671
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003672 /* Start up the rx queues. */
3673 for (i = 0; i < qdev->rx_ring_count; i++) {
3674 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3675 if (status) {
3676 QPRINTK(qdev, IFUP, ERR,
3677 "Failed to start rx ring[%d].\n", i);
3678 return status;
3679 }
3680 }
3681
3682 /* If there is more than one inbound completion queue
3683 * then download a RICB to configure RSS.
3684 */
3685 if (qdev->rss_ring_count > 1) {
3686 status = ql_start_rss(qdev);
3687 if (status) {
3688 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3689 return status;
3690 }
3691 }
3692
3693 /* Start up the tx queues. */
3694 for (i = 0; i < qdev->tx_ring_count; i++) {
3695 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3696 if (status) {
3697 QPRINTK(qdev, IFUP, ERR,
3698 "Failed to start tx ring[%d].\n", i);
3699 return status;
3700 }
3701 }
3702
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003703 /* Initialize the port and set the max framesize. */
3704 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003705 if (status)
3706 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003707
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003708 /* Set up the MAC address and frame routing filter. */
3709 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003710 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003711 QPRINTK(qdev, IFUP, ERR,
3712 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003713 return status;
3714 }
3715
3716 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003717 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003718 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003719 i);
3720 napi_enable(&qdev->rx_ring[i].napi);
3721 }
3722
3723 return status;
3724}
3725
3726/* Issue soft reset to chip. */
3727static int ql_adapter_reset(struct ql_adapter *qdev)
3728{
3729 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003730 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003731 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003732
Ron Mercera5f59dc2009-07-02 06:06:07 +00003733 /* Clear all the entries in the routing table. */
3734 status = ql_clear_routing_entries(qdev);
3735 if (status) {
3736 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3737 return status;
3738 }
3739
3740 end_jiffies = jiffies +
3741 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003742
3743 /* Stop management traffic. */
3744 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3745
3746 /* Wait for the NIC and MGMNT FIFOs to empty. */
3747 ql_wait_fifo_empty(qdev);
3748
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003749 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003750
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003751 do {
3752 value = ql_read32(qdev, RST_FO);
3753 if ((value & RST_FO_FR) == 0)
3754 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003755 cpu_relax();
3756 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003757
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003758 if (value & RST_FO_FR) {
3759 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003760 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003761 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 }
3763
Ron Mercer84087f42009-10-08 09:54:41 +00003764 /* Resume management traffic. */
3765 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003766 return status;
3767}
3768
3769static void ql_display_dev_info(struct net_device *ndev)
3770{
3771 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3772
3773 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003774 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003775 "XG Roll = %d, XG Rev = %d.\n",
3776 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003777 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778 qdev->chip_rev_id & 0x0000000f,
3779 qdev->chip_rev_id >> 4 & 0x0000000f,
3780 qdev->chip_rev_id >> 8 & 0x0000000f,
3781 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003782 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003783}
3784
Ron Mercerbc083ce2009-10-21 11:07:40 +00003785int ql_wol(struct ql_adapter *qdev)
3786{
3787 int status = 0;
3788 u32 wol = MB_WOL_DISABLE;
3789
3790 /* The CAM is still intact after a reset, but if we
3791 * are doing WOL, then we may need to program the
3792 * routing regs. We would also need to issue the mailbox
3793 * commands to instruct the MPI what to do per the ethtool
3794 * settings.
3795 */
3796
3797 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3798 WAKE_MCAST | WAKE_BCAST)) {
3799 QPRINTK(qdev, IFDOWN, ERR,
3800 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3801 qdev->wol);
3802 return -EINVAL;
3803 }
3804
3805 if (qdev->wol & WAKE_MAGIC) {
3806 status = ql_mb_wol_set_magic(qdev, 1);
3807 if (status) {
3808 QPRINTK(qdev, IFDOWN, ERR,
3809 "Failed to set magic packet on %s.\n",
3810 qdev->ndev->name);
3811 return status;
3812 } else
3813 QPRINTK(qdev, DRV, INFO,
3814 "Enabled magic packet successfully on %s.\n",
3815 qdev->ndev->name);
3816
3817 wol |= MB_WOL_MAGIC_PKT;
3818 }
3819
3820 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003821 wol |= MB_WOL_MODE_ON;
3822 status = ql_mb_wol_mode(qdev, wol);
3823 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3824 (status == 0) ? "Sucessfully set" : "Failed", wol,
3825 qdev->ndev->name);
3826 }
3827
3828 return status;
3829}
3830
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003831static int ql_adapter_down(struct ql_adapter *qdev)
3832{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003833 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003834
Ron Mercer6a473302009-07-02 06:06:12 +00003835 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836
Ron Mercer6497b602009-02-12 16:37:13 -08003837 /* Don't kill the reset worker thread if we
3838 * are in the process of recovery.
3839 */
3840 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3841 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003842 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3843 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003844 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003845 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003846
Ron Mercer39aa8162009-08-27 11:02:11 +00003847 for (i = 0; i < qdev->rss_ring_count; i++)
3848 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003849
3850 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3851
3852 ql_disable_interrupts(qdev);
3853
3854 ql_tx_ring_clean(qdev);
3855
Ron Mercer6b318cb2009-03-09 10:59:26 +00003856 /* Call netif_napi_del() from common point.
3857 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003858 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003859 netif_napi_del(&qdev->rx_ring[i].napi);
3860
Ron Mercer4545a3f2009-02-23 10:42:17 +00003861 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003862
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003863 status = ql_adapter_reset(qdev);
3864 if (status)
3865 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3866 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003867 return status;
3868}
3869
3870static int ql_adapter_up(struct ql_adapter *qdev)
3871{
3872 int err = 0;
3873
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003874 err = ql_adapter_initialize(qdev);
3875 if (err) {
3876 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003877 goto err_init;
3878 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003879 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003880 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003881 /* If the port is initialized and the
3882 * link is up the turn on the carrier.
3883 */
3884 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3885 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003886 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003887 ql_enable_interrupts(qdev);
3888 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003889 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003890
3891 return 0;
3892err_init:
3893 ql_adapter_reset(qdev);
3894 return err;
3895}
3896
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003897static void ql_release_adapter_resources(struct ql_adapter *qdev)
3898{
3899 ql_free_mem_resources(qdev);
3900 ql_free_irq(qdev);
3901}
3902
3903static int ql_get_adapter_resources(struct ql_adapter *qdev)
3904{
3905 int status = 0;
3906
3907 if (ql_alloc_mem_resources(qdev)) {
3908 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3909 return -ENOMEM;
3910 }
3911 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003912 return status;
3913}
3914
3915static int qlge_close(struct net_device *ndev)
3916{
3917 struct ql_adapter *qdev = netdev_priv(ndev);
3918
3919 /*
3920 * Wait for device to recover from a reset.
3921 * (Rarely happens, but possible.)
3922 */
3923 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3924 msleep(1);
3925 ql_adapter_down(qdev);
3926 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003927 return 0;
3928}
3929
3930static int ql_configure_rings(struct ql_adapter *qdev)
3931{
3932 int i;
3933 struct rx_ring *rx_ring;
3934 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003935 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003936 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3937 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3938
3939 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003940
Ron Mercera4ab6132009-08-27 11:02:10 +00003941 /* In a perfect world we have one RSS ring for each CPU
3942 * and each has it's own vector. To do that we ask for
3943 * cpu_cnt vectors. ql_enable_msix() will adjust the
3944 * vector count to what we actually get. We then
3945 * allocate an RSS ring for each.
3946 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003947 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003948 qdev->intr_count = cpu_cnt;
3949 ql_enable_msix(qdev);
3950 /* Adjust the RSS ring count to the actual vector count. */
3951 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003952 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003953 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003955 for (i = 0; i < qdev->tx_ring_count; i++) {
3956 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003957 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003958 tx_ring->qdev = qdev;
3959 tx_ring->wq_id = i;
3960 tx_ring->wq_len = qdev->tx_ring_size;
3961 tx_ring->wq_size =
3962 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3963
3964 /*
3965 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003966 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003967 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003968 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003969 }
3970
3971 for (i = 0; i < qdev->rx_ring_count; i++) {
3972 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003973 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974 rx_ring->qdev = qdev;
3975 rx_ring->cq_id = i;
3976 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003977 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003978 /*
3979 * Inbound (RSS) queues.
3980 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003981 rx_ring->cq_len = qdev->rx_ring_size;
3982 rx_ring->cq_size =
3983 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3984 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3985 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003986 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00003987 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3988 QPRINTK(qdev, IFUP, DEBUG,
3989 "lbq_buf_size %d, order = %d\n",
3990 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003991 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3992 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003993 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00003994 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003995 rx_ring->type = RX_Q;
3996 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003997 /*
3998 * Outbound queue handles outbound completions only.
3999 */
4000 /* outbound cq is same size as tx_ring it services. */
4001 rx_ring->cq_len = qdev->tx_ring_size;
4002 rx_ring->cq_size =
4003 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4004 rx_ring->lbq_len = 0;
4005 rx_ring->lbq_size = 0;
4006 rx_ring->lbq_buf_size = 0;
4007 rx_ring->sbq_len = 0;
4008 rx_ring->sbq_size = 0;
4009 rx_ring->sbq_buf_size = 0;
4010 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004011 }
4012 }
4013 return 0;
4014}
4015
4016static int qlge_open(struct net_device *ndev)
4017{
4018 int err = 0;
4019 struct ql_adapter *qdev = netdev_priv(ndev);
4020
Ron Mercer74e12432009-11-11 12:54:04 +00004021 err = ql_adapter_reset(qdev);
4022 if (err)
4023 return err;
4024
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004025 err = ql_configure_rings(qdev);
4026 if (err)
4027 return err;
4028
4029 err = ql_get_adapter_resources(qdev);
4030 if (err)
4031 goto error_up;
4032
4033 err = ql_adapter_up(qdev);
4034 if (err)
4035 goto error_up;
4036
4037 return err;
4038
4039error_up:
4040 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004041 return err;
4042}
4043
Ron Mercer7c734352009-10-19 03:32:19 +00004044static int ql_change_rx_buffers(struct ql_adapter *qdev)
4045{
4046 struct rx_ring *rx_ring;
4047 int i, status;
4048 u32 lbq_buf_len;
4049
4050 /* Wait for an oustanding reset to complete. */
4051 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4052 int i = 3;
4053 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4054 QPRINTK(qdev, IFUP, ERR,
4055 "Waiting for adapter UP...\n");
4056 ssleep(1);
4057 }
4058
4059 if (!i) {
4060 QPRINTK(qdev, IFUP, ERR,
4061 "Timed out waiting for adapter UP\n");
4062 return -ETIMEDOUT;
4063 }
4064 }
4065
4066 status = ql_adapter_down(qdev);
4067 if (status)
4068 goto error;
4069
4070 /* Get the new rx buffer size. */
4071 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4072 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4073 qdev->lbq_buf_order = get_order(lbq_buf_len);
4074
4075 for (i = 0; i < qdev->rss_ring_count; i++) {
4076 rx_ring = &qdev->rx_ring[i];
4077 /* Set the new size. */
4078 rx_ring->lbq_buf_size = lbq_buf_len;
4079 }
4080
4081 status = ql_adapter_up(qdev);
4082 if (status)
4083 goto error;
4084
4085 return status;
4086error:
4087 QPRINTK(qdev, IFUP, ALERT,
4088 "Driver up/down cycle failed, closing device.\n");
4089 set_bit(QL_ADAPTER_UP, &qdev->flags);
4090 dev_close(qdev->ndev);
4091 return status;
4092}
4093
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004094static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4095{
4096 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004097 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098
4099 if (ndev->mtu == 1500 && new_mtu == 9000) {
4100 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
4101 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4102 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
4103 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
4104 (ndev->mtu == 9000 && new_mtu == 9000)) {
4105 return 0;
4106 } else
4107 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004108
4109 queue_delayed_work(qdev->workqueue,
4110 &qdev->mpi_port_cfg_work, 3*HZ);
4111
4112 if (!netif_running(qdev->ndev)) {
4113 ndev->mtu = new_mtu;
4114 return 0;
4115 }
4116
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004117 ndev->mtu = new_mtu;
Ron Mercer7c734352009-10-19 03:32:19 +00004118 status = ql_change_rx_buffers(qdev);
4119 if (status) {
4120 QPRINTK(qdev, IFUP, ERR,
4121 "Changing MTU failed.\n");
4122 }
4123
4124 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004125}
4126
4127static struct net_device_stats *qlge_get_stats(struct net_device
4128 *ndev)
4129{
Ron Mercer885ee392009-11-03 13:49:31 +00004130 struct ql_adapter *qdev = netdev_priv(ndev);
4131 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4132 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4133 unsigned long pkts, mcast, dropped, errors, bytes;
4134 int i;
4135
4136 /* Get RX stats. */
4137 pkts = mcast = dropped = errors = bytes = 0;
4138 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4139 pkts += rx_ring->rx_packets;
4140 bytes += rx_ring->rx_bytes;
4141 dropped += rx_ring->rx_dropped;
4142 errors += rx_ring->rx_errors;
4143 mcast += rx_ring->rx_multicast;
4144 }
4145 ndev->stats.rx_packets = pkts;
4146 ndev->stats.rx_bytes = bytes;
4147 ndev->stats.rx_dropped = dropped;
4148 ndev->stats.rx_errors = errors;
4149 ndev->stats.multicast = mcast;
4150
4151 /* Get TX stats. */
4152 pkts = errors = bytes = 0;
4153 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4154 pkts += tx_ring->tx_packets;
4155 bytes += tx_ring->tx_bytes;
4156 errors += tx_ring->tx_errors;
4157 }
4158 ndev->stats.tx_packets = pkts;
4159 ndev->stats.tx_bytes = bytes;
4160 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004161 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004162}
4163
4164static void qlge_set_multicast_list(struct net_device *ndev)
4165{
4166 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4167 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00004168 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004169
Ron Mercercc288f52009-02-23 10:42:14 +00004170 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4171 if (status)
4172 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173 /*
4174 * Set or clear promiscuous mode if a
4175 * transition is taking place.
4176 */
4177 if (ndev->flags & IFF_PROMISC) {
4178 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4179 if (ql_set_routing_reg
4180 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4181 QPRINTK(qdev, HW, ERR,
4182 "Failed to set promiscous mode.\n");
4183 } else {
4184 set_bit(QL_PROMISCUOUS, &qdev->flags);
4185 }
4186 }
4187 } else {
4188 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4189 if (ql_set_routing_reg
4190 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4191 QPRINTK(qdev, HW, ERR,
4192 "Failed to clear promiscous mode.\n");
4193 } else {
4194 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4195 }
4196 }
4197 }
4198
4199 /*
4200 * Set or clear all multicast mode if a
4201 * transition is taking place.
4202 */
4203 if ((ndev->flags & IFF_ALLMULTI) ||
4204 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
4205 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4206 if (ql_set_routing_reg
4207 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4208 QPRINTK(qdev, HW, ERR,
4209 "Failed to set all-multi mode.\n");
4210 } else {
4211 set_bit(QL_ALLMULTI, &qdev->flags);
4212 }
4213 }
4214 } else {
4215 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4216 if (ql_set_routing_reg
4217 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4218 QPRINTK(qdev, HW, ERR,
4219 "Failed to clear all-multi mode.\n");
4220 } else {
4221 clear_bit(QL_ALLMULTI, &qdev->flags);
4222 }
4223 }
4224 }
4225
4226 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00004227 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4228 if (status)
4229 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004230 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4231 i++, mc_ptr = mc_ptr->next)
4232 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4233 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4234 QPRINTK(qdev, HW, ERR,
4235 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004236 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004237 goto exit;
4238 }
Ron Mercercc288f52009-02-23 10:42:14 +00004239 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004240 if (ql_set_routing_reg
4241 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4242 QPRINTK(qdev, HW, ERR,
4243 "Failed to set multicast match mode.\n");
4244 } else {
4245 set_bit(QL_ALLMULTI, &qdev->flags);
4246 }
4247 }
4248exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004249 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004250}
4251
4252static int qlge_set_mac_address(struct net_device *ndev, void *p)
4253{
4254 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4255 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004256 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004257
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004258 if (!is_valid_ether_addr(addr->sa_data))
4259 return -EADDRNOTAVAIL;
4260 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4261
Ron Mercercc288f52009-02-23 10:42:14 +00004262 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4263 if (status)
4264 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004265 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4266 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004267 if (status)
4268 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
4269 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4270 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004271}
4272
4273static void qlge_tx_timeout(struct net_device *ndev)
4274{
4275 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004276 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004277}
4278
4279static void ql_asic_reset_work(struct work_struct *work)
4280{
4281 struct ql_adapter *qdev =
4282 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004283 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004284 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004285 status = ql_adapter_down(qdev);
4286 if (status)
4287 goto error;
4288
4289 status = ql_adapter_up(qdev);
4290 if (status)
4291 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004292
4293 /* Restore rx mode. */
4294 clear_bit(QL_ALLMULTI, &qdev->flags);
4295 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4296 qlge_set_multicast_list(qdev->ndev);
4297
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004298 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004299 return;
4300error:
4301 QPRINTK(qdev, IFUP, ALERT,
4302 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004303
Ron Mercerdb988122009-03-09 10:59:17 +00004304 set_bit(QL_ADAPTER_UP, &qdev->flags);
4305 dev_close(qdev->ndev);
4306 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004307}
4308
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004309static struct nic_operations qla8012_nic_ops = {
4310 .get_flash = ql_get_8012_flash_params,
4311 .port_initialize = ql_8012_port_initialize,
4312};
4313
Ron Mercercdca8d02009-03-02 08:07:31 +00004314static struct nic_operations qla8000_nic_ops = {
4315 .get_flash = ql_get_8000_flash_params,
4316 .port_initialize = ql_8000_port_initialize,
4317};
4318
Ron Mercere4552f52009-06-09 05:39:32 +00004319/* Find the pcie function number for the other NIC
4320 * on this chip. Since both NIC functions share a
4321 * common firmware we have the lowest enabled function
4322 * do any common work. Examples would be resetting
4323 * after a fatal firmware error, or doing a firmware
4324 * coredump.
4325 */
4326static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327{
Ron Mercere4552f52009-06-09 05:39:32 +00004328 int status = 0;
4329 u32 temp;
4330 u32 nic_func1, nic_func2;
4331
4332 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4333 &temp);
4334 if (status)
4335 return status;
4336
4337 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4338 MPI_TEST_NIC_FUNC_MASK);
4339 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4340 MPI_TEST_NIC_FUNC_MASK);
4341
4342 if (qdev->func == nic_func1)
4343 qdev->alt_func = nic_func2;
4344 else if (qdev->func == nic_func2)
4345 qdev->alt_func = nic_func1;
4346 else
4347 status = -EIO;
4348
4349 return status;
4350}
4351
4352static int ql_get_board_info(struct ql_adapter *qdev)
4353{
4354 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004355 qdev->func =
4356 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004357 if (qdev->func > 3)
4358 return -EIO;
4359
4360 status = ql_get_alt_pcie_func(qdev);
4361 if (status)
4362 return status;
4363
4364 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4365 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004366 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4367 qdev->port_link_up = STS_PL1;
4368 qdev->port_init = STS_PI1;
4369 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4370 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4371 } else {
4372 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4373 qdev->port_link_up = STS_PL0;
4374 qdev->port_init = STS_PI0;
4375 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4376 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4377 }
4378 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004379 qdev->device_id = qdev->pdev->device;
4380 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4381 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004382 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4383 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004384 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004385}
4386
4387static void ql_release_all(struct pci_dev *pdev)
4388{
4389 struct net_device *ndev = pci_get_drvdata(pdev);
4390 struct ql_adapter *qdev = netdev_priv(ndev);
4391
4392 if (qdev->workqueue) {
4393 destroy_workqueue(qdev->workqueue);
4394 qdev->workqueue = NULL;
4395 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004396
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004397 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004398 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004399 if (qdev->doorbell_area)
4400 iounmap(qdev->doorbell_area);
4401 pci_release_regions(pdev);
4402 pci_set_drvdata(pdev, NULL);
4403}
4404
4405static int __devinit ql_init_device(struct pci_dev *pdev,
4406 struct net_device *ndev, int cards_found)
4407{
4408 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004409 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004410
Ron Mercere3324712009-07-02 06:06:13 +00004411 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004412 err = pci_enable_device(pdev);
4413 if (err) {
4414 dev_err(&pdev->dev, "PCI device enable failed.\n");
4415 return err;
4416 }
4417
Ron Mercerebd6e772009-09-29 08:39:25 +00004418 qdev->ndev = ndev;
4419 qdev->pdev = pdev;
4420 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004421
Ron Mercerbc9167f2009-10-10 09:35:04 +00004422 /* Set PCIe read request size */
4423 err = pcie_set_readrq(pdev, 4096);
4424 if (err) {
4425 dev_err(&pdev->dev, "Set readrq failed.\n");
4426 goto err_out;
4427 }
4428
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004429 err = pci_request_regions(pdev, DRV_NAME);
4430 if (err) {
4431 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004432 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004433 }
4434
4435 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004436 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004437 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004438 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004439 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004440 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004441 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004442 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004443 }
4444
4445 if (err) {
4446 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4447 goto err_out;
4448 }
4449
Ron Mercer73475332009-11-06 07:44:58 +00004450 /* Set PCIe reset type for EEH to fundamental. */
4451 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004452 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004453 qdev->reg_base =
4454 ioremap_nocache(pci_resource_start(pdev, 1),
4455 pci_resource_len(pdev, 1));
4456 if (!qdev->reg_base) {
4457 dev_err(&pdev->dev, "Register mapping failed.\n");
4458 err = -ENOMEM;
4459 goto err_out;
4460 }
4461
4462 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4463 qdev->doorbell_area =
4464 ioremap_nocache(pci_resource_start(pdev, 3),
4465 pci_resource_len(pdev, 3));
4466 if (!qdev->doorbell_area) {
4467 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4468 err = -ENOMEM;
4469 goto err_out;
4470 }
4471
Ron Mercere4552f52009-06-09 05:39:32 +00004472 err = ql_get_board_info(qdev);
4473 if (err) {
4474 dev_err(&pdev->dev, "Register access failed.\n");
4475 err = -EIO;
4476 goto err_out;
4477 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004478 qdev->msg_enable = netif_msg_init(debug, default_msg);
4479 spin_lock_init(&qdev->hw_lock);
4480 spin_lock_init(&qdev->stats_lock);
4481
4482 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004483 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004484 if (err) {
4485 dev_err(&pdev->dev, "Invalid FLASH.\n");
4486 goto err_out;
4487 }
4488
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004489 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4490
4491 /* Set up the default ring sizes. */
4492 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4493 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4494
4495 /* Set up the coalescing parameters. */
4496 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4497 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4498 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4499 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4500
4501 /*
4502 * Set up the operating parameters.
4503 */
4504 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004505 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4506 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4507 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4508 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004509 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004510 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004511 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004512
4513 if (!cards_found) {
4514 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4515 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4516 DRV_NAME, DRV_VERSION);
4517 }
4518 return 0;
4519err_out:
4520 ql_release_all(pdev);
4521 pci_disable_device(pdev);
4522 return err;
4523}
4524
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004525static const struct net_device_ops qlge_netdev_ops = {
4526 .ndo_open = qlge_open,
4527 .ndo_stop = qlge_close,
4528 .ndo_start_xmit = qlge_send,
4529 .ndo_change_mtu = qlge_change_mtu,
4530 .ndo_get_stats = qlge_get_stats,
4531 .ndo_set_multicast_list = qlge_set_multicast_list,
4532 .ndo_set_mac_address = qlge_set_mac_address,
4533 .ndo_validate_addr = eth_validate_addr,
4534 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004535 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4536 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4537 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004538};
4539
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004540static int __devinit qlge_probe(struct pci_dev *pdev,
4541 const struct pci_device_id *pci_entry)
4542{
4543 struct net_device *ndev = NULL;
4544 struct ql_adapter *qdev = NULL;
4545 static int cards_found = 0;
4546 int err = 0;
4547
Ron Mercer1e213302009-03-09 10:59:21 +00004548 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4549 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004550 if (!ndev)
4551 return -ENOMEM;
4552
4553 err = ql_init_device(pdev, ndev, cards_found);
4554 if (err < 0) {
4555 free_netdev(ndev);
4556 return err;
4557 }
4558
4559 qdev = netdev_priv(ndev);
4560 SET_NETDEV_DEV(ndev, &pdev->dev);
4561 ndev->features = (0
4562 | NETIF_F_IP_CSUM
4563 | NETIF_F_SG
4564 | NETIF_F_TSO
4565 | NETIF_F_TSO6
4566 | NETIF_F_TSO_ECN
4567 | NETIF_F_HW_VLAN_TX
4568 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004569 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004570
4571 if (test_bit(QL_DMA64, &qdev->flags))
4572 ndev->features |= NETIF_F_HIGHDMA;
4573
4574 /*
4575 * Set up net_device structure.
4576 */
4577 ndev->tx_queue_len = qdev->tx_ring_size;
4578 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004579
4580 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004581 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004583
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004584 err = register_netdev(ndev);
4585 if (err) {
4586 dev_err(&pdev->dev, "net device registration failed.\n");
4587 ql_release_all(pdev);
4588 pci_disable_device(pdev);
4589 return err;
4590 }
Ron Mercer6a473302009-07-02 06:06:12 +00004591 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004592 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004593 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004594 cards_found++;
4595 return 0;
4596}
4597
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004598netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4599{
4600 return qlge_send(skb, ndev);
4601}
4602
4603int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4604{
4605 return ql_clean_inbound_rx_ring(rx_ring, budget);
4606}
4607
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004608static void __devexit qlge_remove(struct pci_dev *pdev)
4609{
4610 struct net_device *ndev = pci_get_drvdata(pdev);
4611 unregister_netdev(ndev);
4612 ql_release_all(pdev);
4613 pci_disable_device(pdev);
4614 free_netdev(ndev);
4615}
4616
Ron Mercer6d190c62009-10-28 08:39:20 +00004617/* Clean up resources without touching hardware. */
4618static void ql_eeh_close(struct net_device *ndev)
4619{
4620 int i;
4621 struct ql_adapter *qdev = netdev_priv(ndev);
4622
4623 if (netif_carrier_ok(ndev)) {
4624 netif_carrier_off(ndev);
4625 netif_stop_queue(ndev);
4626 }
4627
4628 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4629 cancel_delayed_work_sync(&qdev->asic_reset_work);
4630 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4631 cancel_delayed_work_sync(&qdev->mpi_work);
4632 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4633 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4634
4635 for (i = 0; i < qdev->rss_ring_count; i++)
4636 netif_napi_del(&qdev->rx_ring[i].napi);
4637
4638 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4639 ql_tx_ring_clean(qdev);
4640 ql_free_rx_buffers(qdev);
4641 ql_release_adapter_resources(qdev);
4642}
4643
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004644/*
4645 * This callback is called by the PCI subsystem whenever
4646 * a PCI bus error is detected.
4647 */
4648static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4649 enum pci_channel_state state)
4650{
4651 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004652
Ron Mercer6d190c62009-10-28 08:39:20 +00004653 switch (state) {
4654 case pci_channel_io_normal:
4655 return PCI_ERS_RESULT_CAN_RECOVER;
4656 case pci_channel_io_frozen:
4657 netif_device_detach(ndev);
4658 if (netif_running(ndev))
4659 ql_eeh_close(ndev);
4660 pci_disable_device(pdev);
4661 return PCI_ERS_RESULT_NEED_RESET;
4662 case pci_channel_io_perm_failure:
4663 dev_err(&pdev->dev,
4664 "%s: pci_channel_io_perm_failure.\n", __func__);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004665 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004666 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004667
4668 /* Request a slot reset. */
4669 return PCI_ERS_RESULT_NEED_RESET;
4670}
4671
4672/*
4673 * This callback is called after the PCI buss has been reset.
4674 * Basically, this tries to restart the card from scratch.
4675 * This is a shortened version of the device probe/discovery code,
4676 * it resembles the first-half of the () routine.
4677 */
4678static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4679{
4680 struct net_device *ndev = pci_get_drvdata(pdev);
4681 struct ql_adapter *qdev = netdev_priv(ndev);
4682
Ron Mercer6d190c62009-10-28 08:39:20 +00004683 pdev->error_state = pci_channel_io_normal;
4684
4685 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004686 if (pci_enable_device(pdev)) {
4687 QPRINTK(qdev, IFUP, ERR,
4688 "Cannot re-enable PCI device after reset.\n");
4689 return PCI_ERS_RESULT_DISCONNECT;
4690 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004691 pci_set_master(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004692 return PCI_ERS_RESULT_RECOVERED;
4693}
4694
4695static void qlge_io_resume(struct pci_dev *pdev)
4696{
4697 struct net_device *ndev = pci_get_drvdata(pdev);
4698 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004699 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004700
Ron Mercer6d190c62009-10-28 08:39:20 +00004701 if (ql_adapter_reset(qdev))
4702 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004703 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004704 err = qlge_open(ndev);
4705 if (err) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004706 QPRINTK(qdev, IFUP, ERR,
4707 "Device initialization failed after reset.\n");
4708 return;
4709 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004710 } else {
4711 QPRINTK(qdev, IFUP, ERR,
4712 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004713 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004714 netif_device_attach(ndev);
4715}
4716
4717static struct pci_error_handlers qlge_err_handler = {
4718 .error_detected = qlge_io_error_detected,
4719 .slot_reset = qlge_io_slot_reset,
4720 .resume = qlge_io_resume,
4721};
4722
4723static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4724{
4725 struct net_device *ndev = pci_get_drvdata(pdev);
4726 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004727 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004728
4729 netif_device_detach(ndev);
4730
4731 if (netif_running(ndev)) {
4732 err = ql_adapter_down(qdev);
4733 if (!err)
4734 return err;
4735 }
4736
Ron Mercerbc083ce2009-10-21 11:07:40 +00004737 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004738 err = pci_save_state(pdev);
4739 if (err)
4740 return err;
4741
4742 pci_disable_device(pdev);
4743
4744 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4745
4746 return 0;
4747}
4748
David S. Miller04da2cf2008-09-19 16:14:24 -07004749#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004750static int qlge_resume(struct pci_dev *pdev)
4751{
4752 struct net_device *ndev = pci_get_drvdata(pdev);
4753 struct ql_adapter *qdev = netdev_priv(ndev);
4754 int err;
4755
4756 pci_set_power_state(pdev, PCI_D0);
4757 pci_restore_state(pdev);
4758 err = pci_enable_device(pdev);
4759 if (err) {
4760 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4761 return err;
4762 }
4763 pci_set_master(pdev);
4764
4765 pci_enable_wake(pdev, PCI_D3hot, 0);
4766 pci_enable_wake(pdev, PCI_D3cold, 0);
4767
4768 if (netif_running(ndev)) {
4769 err = ql_adapter_up(qdev);
4770 if (err)
4771 return err;
4772 }
4773
4774 netif_device_attach(ndev);
4775
4776 return 0;
4777}
David S. Miller04da2cf2008-09-19 16:14:24 -07004778#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004779
4780static void qlge_shutdown(struct pci_dev *pdev)
4781{
4782 qlge_suspend(pdev, PMSG_SUSPEND);
4783}
4784
4785static struct pci_driver qlge_driver = {
4786 .name = DRV_NAME,
4787 .id_table = qlge_pci_tbl,
4788 .probe = qlge_probe,
4789 .remove = __devexit_p(qlge_remove),
4790#ifdef CONFIG_PM
4791 .suspend = qlge_suspend,
4792 .resume = qlge_resume,
4793#endif
4794 .shutdown = qlge_shutdown,
4795 .err_handler = &qlge_err_handler
4796};
4797
4798static int __init qlge_init_module(void)
4799{
4800 return pci_register_driver(&qlge_driver);
4801}
4802
4803static void __exit qlge_exit(void)
4804{
4805 pci_unregister_driver(&qlge_driver);
4806}
4807
4808module_init(qlge_init_module);
4809module_exit(qlge_exit);