blob: dd9e86ca7c5aa0ebc304bc6fe2acab6053411dfd [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
Ron Mercer8aae2602010-01-15 13:31:28 +000076static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000080 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000087
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000088static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000089 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040091 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
100 */
101static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102{
103 u32 sem_bits = 0;
104
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400132 return -EINVAL;
133 }
134
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
137}
138
139int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000141 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400142 do {
143 if (!ql_sem_trylock(qdev, sem_mask))
144 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000145 udelay(100);
146 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 return -ETIMEDOUT;
148}
149
150void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151{
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
154}
155
156/* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160 */
161int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162{
163 u32 temp;
164 int count = UDELAY_COUNT;
165
166 while (count) {
167 temp = ql_read32(qdev, reg);
168
169 /* check for errors */
170 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000171 netif_alert(qdev, probe, qdev->ndev,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
173 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400174 return -EIO;
175 } else if (temp & bit)
176 return 0;
177 udelay(UDELAY_DELAY);
178 count--;
179 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000180 netif_alert(qdev, probe, qdev->ndev,
181 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400182 return -ETIMEDOUT;
183}
184
185/* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
187 */
188static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189{
190 int count = UDELAY_COUNT;
191 u32 temp;
192
193 while (count) {
194 temp = ql_read32(qdev, CFG);
195 if (temp & CFG_LE)
196 return -EIO;
197 if (!(temp & bit))
198 return 0;
199 udelay(UDELAY_DELAY);
200 count--;
201 }
202 return -ETIMEDOUT;
203}
204
205
206/* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
208 */
209int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 direction =
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220 PCI_DMA_FROMDEVICE;
221
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400225 return -ENOMEM;
226 }
227
Ron Mercer4322c5b2009-07-02 06:06:06 +0000228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 return status;
231
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400236 goto exit;
237 }
238
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
245
246 /*
247 * Wait for the bit to clear after signaling hw.
248 */
249 status = ql_wait_cfg(qdev, bit);
250exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400252 pci_unmap_single(qdev->pdev, map, size, direction);
253 return status;
254}
255
256/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258 u32 *value)
259{
260 u32 offset = 0;
261 int status;
262
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400263 switch (type) {
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
266 {
267 status =
268 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400270 if (status)
271 goto exit;
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275 status =
276 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400278 if (status)
279 goto exit;
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289 status =
290 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400292 if (status)
293 goto exit;
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 status =
297 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400299 if (status)
300 goto exit;
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304 status =
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800306 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400307 if (status)
308 goto exit;
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 }
311 break;
312 }
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
315 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400318 status = -EPERM;
319 }
320exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 return status;
322}
323
324/* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
326 */
327static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328 u16 index)
329{
330 u32 offset = 0;
331 int status = 0;
332
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400333 switch (type) {
334 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000335 {
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
339
340 status =
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 goto exit;
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
347 type | MAC_ADDR_E);
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
349 status =
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 if (status)
353 goto exit;
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
356 type | MAC_ADDR_E);
357
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
359 status =
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 if (status)
363 goto exit;
364 break;
365 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400366 case MAC_ADDR_TYPE_CAM_MAC:
367 {
368 u32 cam_output;
369 u32 upper = (addr[0] << 8) | addr[1];
370 u32 lower =
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372 (addr[5]);
373
Joe Perchesae9540f72010-02-09 11:49:52 +0000374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type == MAC_ADDR_TYPE_MULTI_MAC ?
377 "MULTICAST" : "UNICAST",
378 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379
380 status =
381 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383 if (status)
384 goto exit;
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 type); /* type */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 status =
390 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400392 if (status)
393 goto exit;
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 type); /* type */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 status =
399 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400401 if (status)
402 goto exit;
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 type); /* type */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
409 */
Ron Mercer76b26692009-10-08 09:54:40 +0000410 cam_output = (CAM_OUT_ROUTE_NIC |
411 (qdev->
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
414 if (qdev->vlgrp)
415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400418 break;
419 }
420 case MAC_ADDR_TYPE_VLAN:
421 {
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
427 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000428 netif_info(qdev, ifup, qdev->ndev,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit ? "Adding" : "Removing",
431 index,
432 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400433
434 status =
435 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800436 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400437 if (status)
438 goto exit;
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440 (index << MAC_ADDR_IDX_SHIFT) | /* index */
441 type | /* type */
442 enable_bit); /* enable/disable */
443 break;
444 }
445 case MAC_ADDR_TYPE_MULTI_FLTR:
446 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000447 netif_crit(qdev, ifup, qdev->ndev,
448 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400449 status = -EPERM;
450 }
451exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400452 return status;
453}
454
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000455/* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
458 */
459static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460{
461 int status;
462 char zero_mac_addr[ETH_ALEN];
463 char *addr;
464
465 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000466 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000469 } else {
470 memset(zero_mac_addr, 0, ETH_ALEN);
471 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000474 }
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 return status;
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000482 netif_err(qdev, ifup, qdev->ndev,
483 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000484 return status;
485}
486
Ron Mercer6a473302009-07-02 06:06:12 +0000487void ql_link_on(struct ql_adapter *qdev)
488{
Joe Perchesae9540f72010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
492}
493
494void ql_link_off(struct ql_adapter *qdev)
495{
Joe Perchesae9540f72010-02-09 11:49:52 +0000496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
499}
500
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501/* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
503 */
504int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
505{
506 int status = 0;
507
Ron Mercer939678f2009-01-04 17:08:29 -0800508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 if (status)
510 goto exit;
511
512 ql_write32(qdev, RT_IDX,
513 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400515 if (status)
516 goto exit;
517 *value = ql_read32(qdev, RT_DATA);
518exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400519 return status;
520}
521
522/* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
526 */
527static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
528 int enable)
529{
Ron Mercer8587ea32009-02-23 10:42:15 +0000530 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400531 u32 value = 0;
532
Joe Perchesae9540f72010-02-09 11:49:52 +0000533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534 "%s %s mask %s the routing reg.\n",
535 enable ? "Adding" : "Removing",
536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400554
555 switch (mask) {
556 case RT_IDX_CAM_HIT:
557 {
558 value = RT_IDX_DST_CAM_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
561 break;
562 }
563 case RT_IDX_VALID: /* Promiscuous Mode frames. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
571 {
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000577 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_IP_CSUM_ERR_SLOT <<
582 RT_IDX_IDX_SHIFT); /* index */
583 break;
584 }
585 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
586 {
587 value = RT_IDX_DST_DFLT_Q | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
590 RT_IDX_IDX_SHIFT); /* index */
591 break;
592 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400593 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
594 {
595 value = RT_IDX_DST_DFLT_Q | /* dest */
596 RT_IDX_TYPE_NICQ | /* type */
597 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
598 break;
599 }
600 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
601 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000602 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400603 RT_IDX_TYPE_NICQ | /* type */
604 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 break;
606 }
607 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
608 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000609 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400610 RT_IDX_TYPE_NICQ | /* type */
611 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
612 break;
613 }
614 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
615 {
616 value = RT_IDX_DST_RSS | /* dest */
617 RT_IDX_TYPE_NICQ | /* type */
618 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619 break;
620 }
621 case 0: /* Clear the E-bit on an entry. */
622 {
623 value = RT_IDX_DST_DFLT_Q | /* dest */
624 RT_IDX_TYPE_NICQ | /* type */
625 (index << RT_IDX_IDX_SHIFT);/* index */
626 break;
627 }
628 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000629 netif_err(qdev, ifup, qdev->ndev,
630 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 status = -EPERM;
632 goto exit;
633 }
634
635 if (value) {
636 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
637 if (status)
638 goto exit;
639 value |= (enable ? RT_IDX_E : 0);
640 ql_write32(qdev, RT_IDX, value);
641 ql_write32(qdev, RT_DATA, enable ? mask : 0);
642 }
643exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400644 return status;
645}
646
647static void ql_enable_interrupts(struct ql_adapter *qdev)
648{
649 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
650}
651
652static void ql_disable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
655}
656
657/* If we're running with multiple MSI-X vectors then we enable on the fly.
658 * Otherwise, we may have multiple outstanding workers and don't want to
659 * enable until the last one finishes. In this case, the irq_cnt gets
660 * incremented everytime we queue a worker and decremented everytime
661 * a worker finishes. Once it hits zero we enable the interrupt.
662 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700663u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400664{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700665 u32 var = 0;
666 unsigned long hw_flags = 0;
667 struct intr_context *ctx = qdev->intr_context + intr;
668
669 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
670 /* Always enable if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_en_mask);
675 var = ql_read32(qdev, STS);
676 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678
679 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
680 if (atomic_dec_and_test(&ctx->irq_cnt)) {
681 ql_write32(qdev, INTR_EN,
682 ctx->intr_en_mask);
683 var = ql_read32(qdev, STS);
684 }
685 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
686 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400687}
688
689static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
690{
691 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700692 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693
Ron Mercerbb0d2152008-10-20 10:30:26 -0700694 /* HW disables for us if we're MSIX multi interrupts and
695 * it's not the default (zeroeth) interrupt.
696 */
697 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
698 return 0;
699
700 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000701 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700702 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400703 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700704 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400705 var = ql_read32(qdev, STS);
706 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000708 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400709 return var;
710}
711
712static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
713{
714 int i;
715 for (i = 0; i < qdev->intr_count; i++) {
716 /* The enable call does a atomic_dec_and_test
717 * and enables only if the result is zero.
718 * So we precharge it here.
719 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700720 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
721 i == 0))
722 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400723 ql_enable_completion_interrupt(qdev, i);
724 }
725
726}
727
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000728static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
729{
730 int status, i;
731 u16 csum = 0;
732 __le16 *flash = (__le16 *)&qdev->flash;
733
734 status = strncmp((char *)&qdev->flash, str, 4);
735 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000736 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000737 return status;
738 }
739
740 for (i = 0; i < size; i++)
741 csum += le16_to_cpu(*flash++);
742
743 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000744 netif_err(qdev, ifup, qdev->ndev,
745 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000746
747 return csum;
748}
749
Ron Mercer26351472009-02-02 13:53:57 -0800750static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400751{
752 int status = 0;
753 /* wait for reg to come ready */
754 status = ql_wait_reg_rdy(qdev,
755 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
756 if (status)
757 goto exit;
758 /* set up for reg read */
759 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
760 /* wait for reg to come ready */
761 status = ql_wait_reg_rdy(qdev,
762 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763 if (status)
764 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800765 /* This data is stored on flash as an array of
766 * __le32. Since ql_read32() returns cpu endian
767 * we need to swap it back.
768 */
769 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400770exit:
771 return status;
772}
773
Ron Mercercdca8d02009-03-02 08:07:31 +0000774static int ql_get_8000_flash_params(struct ql_adapter *qdev)
775{
776 u32 i, size;
777 int status;
778 __le32 *p = (__le32 *)&qdev->flash;
779 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000780 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000781
782 /* Get flash offset for function and adjust
783 * for dword access.
784 */
Ron Mercere4552f52009-06-09 05:39:32 +0000785 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000786 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
787 else
788 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
789
790 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
791 return -ETIMEDOUT;
792
793 size = sizeof(struct flash_params_8000) / sizeof(u32);
794 for (i = 0; i < size; i++, p++) {
795 status = ql_read_flash_word(qdev, i+offset, p);
796 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000797 netif_err(qdev, ifup, qdev->ndev,
798 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000799 goto exit;
800 }
801 }
802
803 status = ql_validate_flash(qdev,
804 sizeof(struct flash_params_8000) / sizeof(u16),
805 "8000");
806 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000807 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000808 status = -EINVAL;
809 goto exit;
810 }
811
Ron Mercer542512e2009-06-09 05:39:33 +0000812 /* Extract either manufacturer or BOFM modified
813 * MAC address.
814 */
815 if (qdev->flash.flash_params_8000.data_type1 == 2)
816 memcpy(mac_addr,
817 qdev->flash.flash_params_8000.mac_addr1,
818 qdev->ndev->addr_len);
819 else
820 memcpy(mac_addr,
821 qdev->flash.flash_params_8000.mac_addr,
822 qdev->ndev->addr_len);
823
824 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000825 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000826 status = -EINVAL;
827 goto exit;
828 }
829
830 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000831 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000832 qdev->ndev->addr_len);
833
834exit:
835 ql_sem_unlock(qdev, SEM_FLASH_MASK);
836 return status;
837}
838
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000839static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400840{
841 int i;
842 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800843 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800844 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000845 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800846
847 /* Second function's parameters follow the first
848 * function's.
849 */
Ron Mercere4552f52009-06-09 05:39:32 +0000850 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000851 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400852
853 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
854 return -ETIMEDOUT;
855
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800857 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000859 netif_err(qdev, ifup, qdev->ndev,
860 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400861 goto exit;
862 }
863
864 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000865
866 status = ql_validate_flash(qdev,
867 sizeof(struct flash_params_8012) / sizeof(u16),
868 "8012");
869 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000870 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000871 status = -EINVAL;
872 goto exit;
873 }
874
875 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
876 status = -EINVAL;
877 goto exit;
878 }
879
880 memcpy(qdev->ndev->dev_addr,
881 qdev->flash.flash_params_8012.mac_addr,
882 qdev->ndev->addr_len);
883
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400884exit:
885 ql_sem_unlock(qdev, SEM_FLASH_MASK);
886 return status;
887}
888
889/* xgmac register are located behind the xgmac_addr and xgmac_data
890 * register pair. Each read/write requires us to wait for the ready
891 * bit before reading/writing the data.
892 */
893static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
894{
895 int status;
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 return status;
901 /* write the data to the data reg */
902 ql_write32(qdev, XGMAC_DATA, data);
903 /* trigger the write */
904 ql_write32(qdev, XGMAC_ADDR, reg);
905 return status;
906}
907
908/* xgmac register are located behind the xgmac_addr and xgmac_data
909 * register pair. Each read/write requires us to wait for the ready
910 * bit before reading/writing the data.
911 */
912int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
913{
914 int status = 0;
915 /* wait for reg to come ready */
916 status = ql_wait_reg_rdy(qdev,
917 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
918 if (status)
919 goto exit;
920 /* set up for reg read */
921 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
922 /* wait for reg to come ready */
923 status = ql_wait_reg_rdy(qdev,
924 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925 if (status)
926 goto exit;
927 /* get the data */
928 *data = ql_read32(qdev, XGMAC_DATA);
929exit:
930 return status;
931}
932
933/* This is used for reading the 64-bit statistics regs. */
934int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
935{
936 int status = 0;
937 u32 hi = 0;
938 u32 lo = 0;
939
940 status = ql_read_xgmac_reg(qdev, reg, &lo);
941 if (status)
942 goto exit;
943
944 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
945 if (status)
946 goto exit;
947
948 *data = (u64) lo | ((u64) hi << 32);
949
950exit:
951 return status;
952}
953
Ron Mercercdca8d02009-03-02 08:07:31 +0000954static int ql_8000_port_initialize(struct ql_adapter *qdev)
955{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000956 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000957 /*
958 * Get MPI firmware version for driver banner
959 * and ethool info.
960 */
961 status = ql_mb_about_fw(qdev);
962 if (status)
963 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000964 status = ql_mb_get_fw_state(qdev);
965 if (status)
966 goto exit;
967 /* Wake up a worker to get/set the TX/RX frame sizes. */
968 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
969exit:
970 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000971}
972
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400973/* Take the MAC Core out of reset.
974 * Enable statistics counting.
975 * Take the transmitter/receiver out of reset.
976 * This functionality may be done in the MPI firmware at a
977 * later date.
978 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000979static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400980{
981 int status = 0;
982 u32 data;
983
984 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
985 /* Another function has the semaphore, so
986 * wait for the port init bit to come ready.
987 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000988 netif_info(qdev, link, qdev->ndev,
989 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400990 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
991 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000992 netif_crit(qdev, link, qdev->ndev,
993 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400994 }
995 return status;
996 }
997
Joe Perchesae9540f72010-02-09 11:49:52 +0000998 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 /* Set the core reset. */
1000 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1001 if (status)
1002 goto end;
1003 data |= GLOBAL_CFG_RESET;
1004 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Clear the core reset and turn on jumbo for receiver. */
1009 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1010 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1011 data |= GLOBAL_CFG_TX_STAT_EN;
1012 data |= GLOBAL_CFG_RX_STAT_EN;
1013 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1014 if (status)
1015 goto end;
1016
1017 /* Enable transmitter, and clear it's reset. */
1018 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1019 if (status)
1020 goto end;
1021 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1022 data |= TX_CFG_EN; /* Enable the transmitter. */
1023 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1024 if (status)
1025 goto end;
1026
1027 /* Enable receiver and clear it's reset. */
1028 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1029 if (status)
1030 goto end;
1031 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1032 data |= RX_CFG_EN; /* Enable the receiver. */
1033 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1034 if (status)
1035 goto end;
1036
1037 /* Turn on jumbo. */
1038 status =
1039 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1040 if (status)
1041 goto end;
1042 status =
1043 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1044 if (status)
1045 goto end;
1046
1047 /* Signal to the world that the port is enabled. */
1048 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1049end:
1050 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1051 return status;
1052}
1053
Ron Mercer7c734352009-10-19 03:32:19 +00001054static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1055{
1056 return PAGE_SIZE << qdev->lbq_buf_order;
1057}
1058
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001059/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001060static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001061{
1062 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1063 rx_ring->lbq_curr_idx++;
1064 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1065 rx_ring->lbq_curr_idx = 0;
1066 rx_ring->lbq_free_cnt++;
1067 return lbq_desc;
1068}
1069
Ron Mercer7c734352009-10-19 03:32:19 +00001070static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1071 struct rx_ring *rx_ring)
1072{
1073 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1074
1075 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001076 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001077 rx_ring->lbq_buf_size,
1078 PCI_DMA_FROMDEVICE);
1079
1080 /* If it's the last chunk of our master page then
1081 * we unmap it.
1082 */
1083 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1084 == ql_lbq_block_size(qdev))
1085 pci_unmap_page(qdev->pdev,
1086 lbq_desc->p.pg_chunk.map,
1087 ql_lbq_block_size(qdev),
1088 PCI_DMA_FROMDEVICE);
1089 return lbq_desc;
1090}
1091
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001092/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001093static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001094{
1095 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1096 rx_ring->sbq_curr_idx++;
1097 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1098 rx_ring->sbq_curr_idx = 0;
1099 rx_ring->sbq_free_cnt++;
1100 return sbq_desc;
1101}
1102
1103/* Update an rx ring index. */
1104static void ql_update_cq(struct rx_ring *rx_ring)
1105{
1106 rx_ring->cnsmr_idx++;
1107 rx_ring->curr_entry++;
1108 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1109 rx_ring->cnsmr_idx = 0;
1110 rx_ring->curr_entry = rx_ring->cq_base;
1111 }
1112}
1113
1114static void ql_write_cq_idx(struct rx_ring *rx_ring)
1115{
1116 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1117}
1118
Ron Mercer7c734352009-10-19 03:32:19 +00001119static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1120 struct bq_desc *lbq_desc)
1121{
1122 if (!rx_ring->pg_chunk.page) {
1123 u64 map;
1124 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1125 GFP_ATOMIC,
1126 qdev->lbq_buf_order);
1127 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001128 netif_err(qdev, drv, qdev->ndev,
1129 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001130 return -ENOMEM;
1131 }
1132 rx_ring->pg_chunk.offset = 0;
1133 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1134 0, ql_lbq_block_size(qdev),
1135 PCI_DMA_FROMDEVICE);
1136 if (pci_dma_mapping_error(qdev->pdev, map)) {
1137 __free_pages(rx_ring->pg_chunk.page,
1138 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001139 netif_err(qdev, drv, qdev->ndev,
1140 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001141 return -ENOMEM;
1142 }
1143 rx_ring->pg_chunk.map = map;
1144 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1145 }
1146
1147 /* Copy the current master pg_chunk info
1148 * to the current descriptor.
1149 */
1150 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1151
1152 /* Adjust the master page chunk for next
1153 * buffer get.
1154 */
1155 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1156 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1157 rx_ring->pg_chunk.page = NULL;
1158 lbq_desc->p.pg_chunk.last_flag = 1;
1159 } else {
1160 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1161 get_page(rx_ring->pg_chunk.page);
1162 lbq_desc->p.pg_chunk.last_flag = 0;
1163 }
1164 return 0;
1165}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001166/* Process (refill) a large buffer queue. */
1167static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1168{
Ron Mercer49f21862009-02-23 10:42:16 +00001169 u32 clean_idx = rx_ring->lbq_clean_idx;
1170 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001172 u64 map;
1173 int i;
1174
Ron Mercer7c734352009-10-19 03:32:19 +00001175 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001177 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1178 "lbq: try cleaning clean_idx = %d.\n",
1179 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001180 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001181 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_err(qdev, ifup, qdev->ndev,
1183 "Could not get a page chunk.\n");
1184 return;
1185 }
Ron Mercer7c734352009-10-19 03:32:19 +00001186
1187 map = lbq_desc->p.pg_chunk.map +
1188 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001189 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1190 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001191 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001192 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001193
1194 pci_dma_sync_single_for_device(qdev->pdev, map,
1195 rx_ring->lbq_buf_size,
1196 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 clean_idx++;
1198 if (clean_idx == rx_ring->lbq_len)
1199 clean_idx = 0;
1200 }
1201
1202 rx_ring->lbq_clean_idx = clean_idx;
1203 rx_ring->lbq_prod_idx += 16;
1204 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1205 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001206 rx_ring->lbq_free_cnt -= 16;
1207 }
1208
1209 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001210 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1211 "lbq: updating prod idx = %d.\n",
1212 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 ql_write_db_reg(rx_ring->lbq_prod_idx,
1214 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001215 }
1216}
1217
1218/* Process (refill) a small buffer queue. */
1219static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1220{
Ron Mercer49f21862009-02-23 10:42:16 +00001221 u32 clean_idx = rx_ring->sbq_clean_idx;
1222 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001223 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001224 u64 map;
1225 int i;
1226
1227 while (rx_ring->sbq_free_cnt > 16) {
1228 for (i = 0; i < 16; i++) {
1229 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1231 "sbq: try cleaning clean_idx = %d.\n",
1232 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001233 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001234 netif_printk(qdev, rx_status, KERN_DEBUG,
1235 qdev->ndev,
1236 "sbq: getting new skb for index %d.\n",
1237 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 sbq_desc->p.skb =
1239 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001240 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001241 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001242 netif_err(qdev, probe, qdev->ndev,
1243 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001244 rx_ring->sbq_clean_idx = clean_idx;
1245 return;
1246 }
1247 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1248 map = pci_map_single(qdev->pdev,
1249 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001250 rx_ring->sbq_buf_size,
1251 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001252 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001253 netif_err(qdev, ifup, qdev->ndev,
1254 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001255 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001256 dev_kfree_skb_any(sbq_desc->p.skb);
1257 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001258 return;
1259 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001260 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1261 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001262 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001263 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001264 }
1265
1266 clean_idx++;
1267 if (clean_idx == rx_ring->sbq_len)
1268 clean_idx = 0;
1269 }
1270 rx_ring->sbq_clean_idx = clean_idx;
1271 rx_ring->sbq_prod_idx += 16;
1272 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1273 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001274 rx_ring->sbq_free_cnt -= 16;
1275 }
1276
1277 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1279 "sbq: updating prod idx = %d.\n",
1280 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001281 ql_write_db_reg(rx_ring->sbq_prod_idx,
1282 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001283 }
1284}
1285
1286static void ql_update_buffer_queues(struct ql_adapter *qdev,
1287 struct rx_ring *rx_ring)
1288{
1289 ql_update_sbq(qdev, rx_ring);
1290 ql_update_lbq(qdev, rx_ring);
1291}
1292
1293/* Unmaps tx buffers. Can be called from send() if a pci mapping
1294 * fails at some stage, or from the interrupt when a tx completes.
1295 */
1296static void ql_unmap_send(struct ql_adapter *qdev,
1297 struct tx_ring_desc *tx_ring_desc, int mapped)
1298{
1299 int i;
1300 for (i = 0; i < mapped; i++) {
1301 if (i == 0 || (i == 7 && mapped > 7)) {
1302 /*
1303 * Unmap the skb->data area, or the
1304 * external sglist (AKA the Outbound
1305 * Address List (OAL)).
1306 * If its the zeroeth element, then it's
1307 * the skb->data area. If it's the 7th
1308 * element and there is more than 6 frags,
1309 * then its an OAL.
1310 */
1311 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001312 netif_printk(qdev, tx_done, KERN_DEBUG,
1313 qdev->ndev,
1314 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001315 }
1316 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001317 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001318 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001319 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 maplen),
1321 PCI_DMA_TODEVICE);
1322 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001323 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1324 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001326 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001327 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001328 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001329 maplen), PCI_DMA_TODEVICE);
1330 }
1331 }
1332
1333}
1334
1335/* Map the buffers for this transmit. This will return
1336 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1337 */
1338static int ql_map_send(struct ql_adapter *qdev,
1339 struct ob_mac_iocb_req *mac_iocb_ptr,
1340 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1341{
1342 int len = skb_headlen(skb);
1343 dma_addr_t map;
1344 int frag_idx, err, map_idx = 0;
1345 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1346 int frag_cnt = skb_shinfo(skb)->nr_frags;
1347
1348 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001349 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1350 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001351 }
1352 /*
1353 * Map the skb buffer first.
1354 */
1355 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1356
1357 err = pci_dma_mapping_error(qdev->pdev, map);
1358 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001359 netif_err(qdev, tx_queued, qdev->ndev,
1360 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001361
1362 return NETDEV_TX_BUSY;
1363 }
1364
1365 tbd->len = cpu_to_le32(len);
1366 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001367 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1368 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001369 map_idx++;
1370
1371 /*
1372 * This loop fills the remainder of the 8 address descriptors
1373 * in the IOCB. If there are more than 7 fragments, then the
1374 * eighth address desc will point to an external list (OAL).
1375 * When this happens, the remainder of the frags will be stored
1376 * in this list.
1377 */
1378 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1379 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1380 tbd++;
1381 if (frag_idx == 6 && frag_cnt > 7) {
1382 /* Let's tack on an sglist.
1383 * Our control block will now
1384 * look like this:
1385 * iocb->seg[0] = skb->data
1386 * iocb->seg[1] = frag[0]
1387 * iocb->seg[2] = frag[1]
1388 * iocb->seg[3] = frag[2]
1389 * iocb->seg[4] = frag[3]
1390 * iocb->seg[5] = frag[4]
1391 * iocb->seg[6] = frag[5]
1392 * iocb->seg[7] = ptr to OAL (external sglist)
1393 * oal->seg[0] = frag[6]
1394 * oal->seg[1] = frag[7]
1395 * oal->seg[2] = frag[8]
1396 * oal->seg[3] = frag[9]
1397 * oal->seg[4] = frag[10]
1398 * etc...
1399 */
1400 /* Tack on the OAL in the eighth segment of IOCB. */
1401 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1402 sizeof(struct oal),
1403 PCI_DMA_TODEVICE);
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping outbound address list with error: %d\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 /*
1414 * The length is the number of fragments
1415 * that remain to be mapped times the length
1416 * of our sglist (OAL).
1417 */
1418 tbd->len =
1419 cpu_to_le32((sizeof(struct tx_buf_desc) *
1420 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001421 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001422 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001423 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001424 sizeof(struct oal));
1425 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1426 map_idx++;
1427 }
1428
1429 map =
1430 pci_map_page(qdev->pdev, frag->page,
1431 frag->page_offset, frag->size,
1432 PCI_DMA_TODEVICE);
1433
1434 err = pci_dma_mapping_error(qdev->pdev, map);
1435 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001436 netif_err(qdev, tx_queued, qdev->ndev,
1437 "PCI mapping frags failed with error: %d.\n",
1438 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001439 goto map_error;
1440 }
1441
1442 tbd->addr = cpu_to_le64(map);
1443 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001444 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1445 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001446 frag->size);
1447
1448 }
1449 /* Save the number of segments we've mapped. */
1450 tx_ring_desc->map_cnt = map_idx;
1451 /* Terminate the last segment. */
1452 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1453 return NETDEV_TX_OK;
1454
1455map_error:
1456 /*
1457 * If the first frag mapping failed, then i will be zero.
1458 * This causes the unmap of the skb->data area. Otherwise
1459 * we pass in the number of frags that mapped successfully
1460 * so they can be umapped.
1461 */
1462 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1463 return NETDEV_TX_BUSY;
1464}
1465
Ron Mercer4f848c02010-01-02 10:37:43 +00001466/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001467static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1470 u32 length,
1471 u16 vlan_id)
1472{
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct skb_frag_struct *rx_frag;
1476 int nr_frags;
1477 struct napi_struct *napi = &rx_ring->napi;
1478
1479 napi->dev = qdev->ndev;
1480
1481 skb = napi_get_frags(napi);
1482 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001483 netif_err(qdev, drv, qdev->ndev,
1484 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001485 rx_ring->rx_dropped++;
1486 put_page(lbq_desc->p.pg_chunk.page);
1487 return;
1488 }
1489 prefetch(lbq_desc->p.pg_chunk.va);
1490 rx_frag = skb_shinfo(skb)->frags;
1491 nr_frags = skb_shinfo(skb)->nr_frags;
1492 rx_frag += nr_frags;
1493 rx_frag->page = lbq_desc->p.pg_chunk.page;
1494 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1495 rx_frag->size = length;
1496
1497 skb->len += length;
1498 skb->data_len += length;
1499 skb->truesize += length;
1500 skb_shinfo(skb)->nr_frags++;
1501
1502 rx_ring->rx_packets++;
1503 rx_ring->rx_bytes += length;
1504 skb->ip_summed = CHECKSUM_UNNECESSARY;
1505 skb_record_rx_queue(skb, rx_ring->cq_id);
1506 if (qdev->vlgrp && (vlan_id != 0xffff))
1507 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1508 else
1509 napi_gro_frags(napi);
1510}
1511
1512/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001513static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1514 struct rx_ring *rx_ring,
1515 struct ib_mac_iocb_rsp *ib_mac_rsp,
1516 u32 length,
1517 u16 vlan_id)
1518{
1519 struct net_device *ndev = qdev->ndev;
1520 struct sk_buff *skb = NULL;
1521 void *addr;
1522 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1523 struct napi_struct *napi = &rx_ring->napi;
1524
1525 skb = netdev_alloc_skb(ndev, length);
1526 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001527 netif_err(qdev, drv, qdev->ndev,
1528 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001529 rx_ring->rx_dropped++;
1530 put_page(lbq_desc->p.pg_chunk.page);
1531 return;
1532 }
1533
1534 addr = lbq_desc->p.pg_chunk.va;
1535 prefetch(addr);
1536
1537
1538 /* Frame error, so drop the packet. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001540 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001541 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001542 rx_ring->rx_errors++;
1543 goto err_out;
1544 }
1545
1546 /* The max framesize filter on this chip is set higher than
1547 * MTU since FCoE uses 2k frames.
1548 */
1549 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001550 netif_err(qdev, drv, qdev->ndev,
1551 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001552 rx_ring->rx_dropped++;
1553 goto err_out;
1554 }
1555 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001556 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1558 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001559 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1561 length-ETH_HLEN);
1562 skb->len += length-ETH_HLEN;
1563 skb->data_len += length-ETH_HLEN;
1564 skb->truesize += length-ETH_HLEN;
1565
1566 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev);
1569 skb->ip_summed = CHECKSUM_NONE;
1570
1571 if (qdev->rx_csum &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573 /* TCP frame. */
1574 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001575 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580 /* Unfragmented ipv4 UDP frame. */
1581 struct iphdr *iph = (struct iphdr *) skb->data;
1582 if (!(iph->frag_off &
1583 cpu_to_be16(IP_MF|IP_OFFSET))) {
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001585 netif_printk(qdev, rx_status, KERN_DEBUG,
1586 qdev->ndev,
1587 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001588 }
1589 }
1590 }
1591
1592 skb_record_rx_queue(skb, rx_ring->cq_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1594 if (qdev->vlgrp && (vlan_id != 0xffff))
1595 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1596 else
1597 napi_gro_receive(napi, skb);
1598 } else {
1599 if (qdev->vlgrp && (vlan_id != 0xffff))
1600 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1601 else
1602 netif_receive_skb(skb);
1603 }
1604 return;
1605err_out:
1606 dev_kfree_skb_any(skb);
1607 put_page(lbq_desc->p.pg_chunk.page);
1608}
1609
1610/* Process an inbound completion from an rx ring. */
1611static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1612 struct rx_ring *rx_ring,
1613 struct ib_mac_iocb_rsp *ib_mac_rsp,
1614 u32 length,
1615 u16 vlan_id)
1616{
1617 struct net_device *ndev = qdev->ndev;
1618 struct sk_buff *skb = NULL;
1619 struct sk_buff *new_skb = NULL;
1620 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1621
1622 skb = sbq_desc->p.skb;
1623 /* Allocate new_skb and copy */
1624 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1625 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001626 netif_err(qdev, probe, qdev->ndev,
1627 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001628 rx_ring->rx_dropped++;
1629 return;
1630 }
1631 skb_reserve(new_skb, NET_IP_ALIGN);
1632 memcpy(skb_put(new_skb, length), skb->data, length);
1633 skb = new_skb;
1634
1635 /* Frame error, so drop the packet. */
1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001637 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001638 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_errors++;
1641 return;
1642 }
1643
1644 /* loopback self test for ethtool */
1645 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646 ql_check_lb_frame(qdev, skb);
1647 dev_kfree_skb_any(skb);
1648 return;
1649 }
1650
1651 /* The max framesize filter on this chip is set higher than
1652 * MTU since FCoE uses 2k frames.
1653 */
1654 if (skb->len > ndev->mtu + ETH_HLEN) {
1655 dev_kfree_skb_any(skb);
1656 rx_ring->rx_dropped++;
1657 return;
1658 }
1659
1660 prefetch(skb->data);
1661 skb->dev = ndev;
1662 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001663 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1664 "%s Multicast.\n",
1665 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1666 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1667 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1668 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001671 }
1672 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001673 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1674 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001675
1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev);
1679 skb->ip_summed = CHECKSUM_NONE;
1680
1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors.
1683 */
1684 if (qdev->rx_csum &&
1685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686 /* TCP frame. */
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1692 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1693 /* Unfragmented ipv4 UDP frame. */
1694 struct iphdr *iph = (struct iphdr *) skb->data;
1695 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001696 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001697 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001698 netif_printk(qdev, rx_status, KERN_DEBUG,
1699 qdev->ndev,
1700 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 }
1702 }
1703 }
1704
1705 skb_record_rx_queue(skb, rx_ring->cq_id);
1706 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1707 if (qdev->vlgrp && (vlan_id != 0xffff))
1708 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1709 vlan_id, skb);
1710 else
1711 napi_gro_receive(&rx_ring->napi, skb);
1712 } else {
1713 if (qdev->vlgrp && (vlan_id != 0xffff))
1714 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1715 else
1716 netif_receive_skb(skb);
1717 }
1718}
1719
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001720static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001721{
1722 void *temp_addr = skb->data;
1723
1724 /* Undo the skb_reserve(skb,32) we did before
1725 * giving to hardware, and realign data on
1726 * a 2-byte boundary.
1727 */
1728 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1729 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1730 skb_copy_to_linear_data(skb, temp_addr,
1731 (unsigned int)len);
1732}
1733
1734/*
1735 * This function builds an skb for the given inbound
1736 * completion. It will be rewritten for readability in the near
1737 * future, but for not it works well.
1738 */
1739static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1740 struct rx_ring *rx_ring,
1741 struct ib_mac_iocb_rsp *ib_mac_rsp)
1742{
1743 struct bq_desc *lbq_desc;
1744 struct bq_desc *sbq_desc;
1745 struct sk_buff *skb = NULL;
1746 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1747 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1748
1749 /*
1750 * Handle the header buffer if present.
1751 */
1752 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1753 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 /*
1757 * Headers fit nicely into a small buffer.
1758 */
1759 sbq_desc = ql_get_curr_sbuf(rx_ring);
1760 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001761 dma_unmap_addr(sbq_desc, mapaddr),
1762 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001763 PCI_DMA_FROMDEVICE);
1764 skb = sbq_desc->p.skb;
1765 ql_realign_skb(skb, hdr_len);
1766 skb_put(skb, hdr_len);
1767 sbq_desc->p.skb = NULL;
1768 }
1769
1770 /*
1771 * Handle the data buffer(s).
1772 */
1773 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001776 return skb;
1777 }
1778
1779 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1780 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001781 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1782 "Headers in small, data of %d bytes in small, combine them.\n",
1783 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001784 /*
1785 * Data is less than small buffer size so it's
1786 * stuffed in a small buffer.
1787 * For this case we append the data
1788 * from the "data" small buffer to the "header" small
1789 * buffer.
1790 */
1791 sbq_desc = ql_get_curr_sbuf(rx_ring);
1792 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001793 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001794 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001795 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 (sbq_desc, maplen),
1797 PCI_DMA_FROMDEVICE);
1798 memcpy(skb_put(skb, length),
1799 sbq_desc->p.skb->data, length);
1800 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001801 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001802 (sbq_desc,
1803 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 (sbq_desc,
1806 maplen),
1807 PCI_DMA_FROMDEVICE);
1808 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001809 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1810 "%d bytes in a single small buffer.\n",
1811 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001812 sbq_desc = ql_get_curr_sbuf(rx_ring);
1813 skb = sbq_desc->p.skb;
1814 ql_realign_skb(skb, length);
1815 skb_put(skb, length);
1816 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001817 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001818 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001819 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001820 maplen),
1821 PCI_DMA_FROMDEVICE);
1822 sbq_desc->p.skb = NULL;
1823 }
1824 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1825 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001826 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827 "Header in small, %d bytes in large. Chain large to small!\n",
1828 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001829 /*
1830 * The data is in a single large buffer. We
1831 * chain it to the header buffer's skb and let
1832 * it rip.
1833 */
Ron Mercer7c734352009-10-19 03:32:19 +00001834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001835 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1836 "Chaining page at offset = %d, for %d bytes to skb.\n",
1837 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001838 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1839 lbq_desc->p.pg_chunk.offset,
1840 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001841 skb->len += length;
1842 skb->data_len += length;
1843 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001844 } else {
1845 /*
1846 * The headers and data are in a single large buffer. We
1847 * copy it to a new skb and let it go. This can happen with
1848 * jumbo mtu on a non-TCP/UDP frame.
1849 */
Ron Mercer7c734352009-10-19 03:32:19 +00001850 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001851 skb = netdev_alloc_skb(qdev->ndev, length);
1852 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001853 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1854 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 return NULL;
1856 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001857 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001858 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001859 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001860 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001861 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001862 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1865 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001866 skb_fill_page_desc(skb, 0,
1867 lbq_desc->p.pg_chunk.page,
1868 lbq_desc->p.pg_chunk.offset,
1869 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001870 skb->len += length;
1871 skb->data_len += length;
1872 skb->truesize += length;
1873 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874 __pskb_pull_tail(skb,
1875 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1876 VLAN_ETH_HLEN : ETH_HLEN);
1877 }
1878 } else {
1879 /*
1880 * The data is in a chain of large buffers
1881 * pointed to by a small buffer. We loop
1882 * thru and chain them to the our small header
1883 * buffer's skb.
1884 * frags: There are 18 max frags and our small
1885 * buffer will hold 32 of them. The thing is,
1886 * we'll use 3 max for our 9000 byte jumbo
1887 * frames. If the MTU goes up we could
1888 * eventually be in trouble.
1889 */
Ron Mercer7c734352009-10-19 03:32:19 +00001890 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001891 sbq_desc = ql_get_curr_sbuf(rx_ring);
1892 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001893 dma_unmap_addr(sbq_desc, mapaddr),
1894 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 PCI_DMA_FROMDEVICE);
1896 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1897 /*
1898 * This is an non TCP/UDP IP frame, so
1899 * the headers aren't split into a small
1900 * buffer. We have to use the small buffer
1901 * that contains our sg list as our skb to
1902 * send upstairs. Copy the sg list here to
1903 * a local buffer and use it to find the
1904 * pages to chain.
1905 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001906 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1907 "%d bytes of headers & data in chain of large.\n",
1908 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001909 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001910 sbq_desc->p.skb = NULL;
1911 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 }
1913 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001914 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1915 size = (length < rx_ring->lbq_buf_size) ? length :
1916 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001917
Joe Perchesae9540f72010-02-09 11:49:52 +00001918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "Adding page %d to skb for %d bytes.\n",
1920 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001921 skb_fill_page_desc(skb, i,
1922 lbq_desc->p.pg_chunk.page,
1923 lbq_desc->p.pg_chunk.offset,
1924 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001925 skb->len += size;
1926 skb->data_len += size;
1927 skb->truesize += size;
1928 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929 i++;
1930 }
1931 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1932 VLAN_ETH_HLEN : ETH_HLEN);
1933 }
1934 return skb;
1935}
1936
1937/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001938static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001939 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001940 struct ib_mac_iocb_rsp *ib_mac_rsp,
1941 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942{
1943 struct net_device *ndev = qdev->ndev;
1944 struct sk_buff *skb = NULL;
1945
1946 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1947
1948 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1949 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001952 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001953 return;
1954 }
1955
Ron Mercera32959c2009-06-09 05:39:27 +00001956 /* Frame error, so drop the packet. */
1957 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001958 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001959 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001960 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001961 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001962 return;
1963 }
Ron Mercerec33a492009-06-09 05:39:28 +00001964
1965 /* The max framesize filter on this chip is set higher than
1966 * MTU since FCoE uses 2k frames.
1967 */
1968 if (skb->len > ndev->mtu + ETH_HLEN) {
1969 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001970 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001971 return;
1972 }
1973
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001974 /* loopback self test for ethtool */
1975 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1976 ql_check_lb_frame(qdev, skb);
1977 dev_kfree_skb_any(skb);
1978 return;
1979 }
1980
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001981 prefetch(skb->data);
1982 skb->dev = ndev;
1983 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001984 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1985 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1987 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1989 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001991 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001992 }
1993 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1995 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001996 }
Ron Mercerd555f592009-03-09 10:59:19 +00001997
Ron Mercerd555f592009-03-09 10:59:19 +00001998 skb->protocol = eth_type_trans(skb, ndev);
1999 skb->ip_summed = CHECKSUM_NONE;
2000
2001 /* If rx checksum is on, and there are no
2002 * csum or frame errors.
2003 */
2004 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00002005 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2006 /* TCP frame. */
2007 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002008 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2009 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002010 skb->ip_summed = CHECKSUM_UNNECESSARY;
2011 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2012 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2013 /* Unfragmented ipv4 UDP frame. */
2014 struct iphdr *iph = (struct iphdr *) skb->data;
2015 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002016 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002017 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002020 }
2021 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002022 }
Ron Mercerd555f592009-03-09 10:59:19 +00002023
Ron Mercer885ee392009-11-03 13:49:31 +00002024 rx_ring->rx_packets++;
2025 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002026 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002027 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2028 if (qdev->vlgrp &&
2029 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2030 (vlan_id != 0))
2031 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2032 vlan_id, skb);
2033 else
2034 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002035 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002036 if (qdev->vlgrp &&
2037 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2038 (vlan_id != 0))
2039 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2040 else
2041 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002042 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002043}
2044
Ron Mercer4f848c02010-01-02 10:37:43 +00002045/* Process an inbound completion from an rx ring. */
2046static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 struct rx_ring *rx_ring,
2048 struct ib_mac_iocb_rsp *ib_mac_rsp)
2049{
2050 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2052 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2053 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2054
2055 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2056
2057 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2058 /* The data and headers are split into
2059 * separate buffers.
2060 */
2061 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2062 vlan_id);
2063 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2064 /* The data fit in a single small buffer.
2065 * Allocate a new skb, copy the data and
2066 * return the buffer to the free pool.
2067 */
2068 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2069 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002070 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2071 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2072 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2073 /* TCP packet in a page chunk that's been checksummed.
2074 * Tack it on to our GRO skb and let it go.
2075 */
2076 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2077 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002078 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2079 /* Non-TCP packet in a page chunk. Allocate an
2080 * skb, tack it on frags, and send it up.
2081 */
2082 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2083 length, vlan_id);
2084 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002085 /* Non-TCP/UDP large frames that span multiple buffers
2086 * can be processed corrrectly by the split frame logic.
2087 */
2088 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2089 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002090 }
2091
2092 return (unsigned long)length;
2093}
2094
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095/* Process an outbound completion from an rx ring. */
2096static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2097 struct ob_mac_iocb_rsp *mac_rsp)
2098{
2099 struct tx_ring *tx_ring;
2100 struct tx_ring_desc *tx_ring_desc;
2101
2102 QL_DUMP_OB_MAC_RSP(mac_rsp);
2103 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2104 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2105 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002106 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2107 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 dev_kfree_skb(tx_ring_desc->skb);
2109 tx_ring_desc->skb = NULL;
2110
2111 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2112 OB_MAC_IOCB_RSP_S |
2113 OB_MAC_IOCB_RSP_L |
2114 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2115 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002116 netif_warn(qdev, tx_done, qdev->ndev,
2117 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002118 }
2119 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002120 netif_warn(qdev, tx_done, qdev->ndev,
2121 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 }
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 }
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002130 }
2131 }
2132 atomic_inc(&tx_ring->tx_count);
2133}
2134
2135/* Fire up a handler to reset the MPI processor. */
2136void ql_queue_fw_error(struct ql_adapter *qdev)
2137{
Ron Mercer6a473302009-07-02 06:06:12 +00002138 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002139 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2140}
2141
2142void ql_queue_asic_error(struct ql_adapter *qdev)
2143{
Ron Mercer6a473302009-07-02 06:06:12 +00002144 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002146 /* Clear adapter up bit to signal the recovery
2147 * process that it shouldn't kill the reset worker
2148 * thread
2149 */
2150 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2152}
2153
2154static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2155 struct ib_ae_iocb_rsp *ib_ae_rsp)
2156{
2157 switch (ib_ae_rsp->event) {
2158 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002159 netif_err(qdev, rx_err, qdev->ndev,
2160 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002161 ql_queue_fw_error(qdev);
2162 return;
2163
2164 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002165 netif_err(qdev, link, qdev->ndev,
2166 "Multiple CAM hits lookup occurred.\n");
2167 netif_err(qdev, drv, qdev->ndev,
2168 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002169 ql_queue_asic_error(qdev);
2170 return;
2171
2172 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002173 netif_err(qdev, rx_err, qdev->ndev,
2174 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 ql_queue_asic_error(qdev);
2176 break;
2177
2178 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f72010-02-09 11:49:52 +00002179 netif_err(qdev, rx_err, qdev->ndev,
2180 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2181 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002182 ql_queue_asic_error(qdev);
2183 break;
2184
2185 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002186 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2187 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002188 ql_queue_asic_error(qdev);
2189 break;
2190 }
2191}
2192
2193static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2194{
2195 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002196 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002197 struct ob_mac_iocb_rsp *net_rsp = NULL;
2198 int count = 0;
2199
Ron Mercer1e213302009-03-09 10:59:21 +00002200 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2203
Joe Perchesae9540f72010-02-09 11:49:52 +00002204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002207
2208 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2209 rmb();
2210 switch (net_rsp->opcode) {
2211
2212 case OPCODE_OB_MAC_TSO_IOCB:
2213 case OPCODE_OB_MAC_IOCB:
2214 ql_process_mac_tx_intr(qdev, net_rsp);
2215 break;
2216 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002217 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2218 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2219 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002220 }
2221 count++;
2222 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 }
2225 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002226 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2227 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2228 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002229 if (atomic_read(&tx_ring->queue_stopped) &&
2230 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2231 /*
2232 * The queue got stopped because the tx_ring was full.
2233 * Wake it up, because it's now at least 25% empty.
2234 */
Ron Mercer1e213302009-03-09 10:59:21 +00002235 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002236 }
2237
2238 return count;
2239}
2240
2241static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2242{
2243 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002244 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002245 struct ql_net_rsp_iocb *net_rsp;
2246 int count = 0;
2247
2248 /* While there are entries in the completion queue. */
2249 while (prod != rx_ring->cnsmr_idx) {
2250
Joe Perchesae9540f72010-02-09 11:49:52 +00002251 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2252 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2253 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002254
2255 net_rsp = rx_ring->curr_entry;
2256 rmb();
2257 switch (net_rsp->opcode) {
2258 case OPCODE_IB_MAC_IOCB:
2259 ql_process_mac_rx_intr(qdev, rx_ring,
2260 (struct ib_mac_iocb_rsp *)
2261 net_rsp);
2262 break;
2263
2264 case OPCODE_IB_AE_IOCB:
2265 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2266 net_rsp);
2267 break;
2268 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002269 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2270 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2271 net_rsp->opcode);
2272 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002273 }
2274 count++;
2275 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002276 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277 if (count == budget)
2278 break;
2279 }
2280 ql_update_buffer_queues(qdev, rx_ring);
2281 ql_write_cq_idx(rx_ring);
2282 return count;
2283}
2284
2285static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2286{
2287 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2288 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002289 struct rx_ring *trx_ring;
2290 int i, work_done = 0;
2291 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002292
Joe Perchesae9540f72010-02-09 11:49:52 +00002293 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2294 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002295
Ron Mercer39aa8162009-08-27 11:02:11 +00002296 /* Service the TX rings first. They start
2297 * right after the RSS rings. */
2298 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2299 trx_ring = &qdev->rx_ring[i];
2300 /* If this TX completion ring belongs to this vector and
2301 * it's not empty then service it.
2302 */
2303 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2304 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2305 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002306 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2307 "%s: Servicing TX completion ring %d.\n",
2308 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002309 ql_clean_outbound_rx_ring(trx_ring);
2310 }
2311 }
2312
2313 /*
2314 * Now service the RSS ring if it's active.
2315 */
2316 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2317 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002318 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2319 "%s: Servicing RX completion ring %d.\n",
2320 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002321 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2322 }
2323
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002324 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002325 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002326 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2327 }
2328 return work_done;
2329}
2330
Ron Mercer01e6b952009-10-30 12:13:34 +00002331static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002332{
2333 struct ql_adapter *qdev = netdev_priv(ndev);
2334
2335 qdev->vlgrp = grp;
2336 if (grp) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002337 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2338 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002339 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2340 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2341 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00002342 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2343 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002344 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2345 }
2346}
2347
Ron Mercer01e6b952009-10-30 12:13:34 +00002348static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349{
2350 struct ql_adapter *qdev = netdev_priv(ndev);
2351 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002352 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002353
Ron Mercercc288f52009-02-23 10:42:14 +00002354 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2355 if (status)
2356 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002357 if (ql_set_mac_addr_reg
2358 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002359 netif_err(qdev, ifup, qdev->ndev,
2360 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002361 }
Ron Mercercc288f52009-02-23 10:42:14 +00002362 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002363}
2364
Ron Mercer01e6b952009-10-30 12:13:34 +00002365static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002366{
2367 struct ql_adapter *qdev = netdev_priv(ndev);
2368 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002369 int status;
2370
2371 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2372 if (status)
2373 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002374
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375 if (ql_set_mac_addr_reg
2376 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002377 netif_err(qdev, ifup, qdev->ndev,
2378 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379 }
Ron Mercercc288f52009-02-23 10:42:14 +00002380 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002381
2382}
2383
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2385static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2386{
2387 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002388 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002389 return IRQ_HANDLED;
2390}
2391
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002392/* This handles a fatal error, MPI activity, and the default
2393 * rx_ring in an MSI-X multiple vector environment.
2394 * In MSI/Legacy environment it also process the rest of
2395 * the rx_rings.
2396 */
2397static irqreturn_t qlge_isr(int irq, void *dev_id)
2398{
2399 struct rx_ring *rx_ring = dev_id;
2400 struct ql_adapter *qdev = rx_ring->qdev;
2401 struct intr_context *intr_context = &qdev->intr_context[0];
2402 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002403 int work_done = 0;
2404
Ron Mercerbb0d2152008-10-20 10:30:26 -07002405 spin_lock(&qdev->hw_lock);
2406 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002407 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2408 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002409 spin_unlock(&qdev->hw_lock);
2410 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002411 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002412 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002413
Ron Mercerbb0d2152008-10-20 10:30:26 -07002414 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002415
2416 /*
2417 * Check for fatal error.
2418 */
2419 if (var & STS_FE) {
2420 ql_queue_asic_error(qdev);
Joe Perchesae9540f72010-02-09 11:49:52 +00002421 netif_err(qdev, intr, qdev->ndev,
2422 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002423 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f72010-02-09 11:49:52 +00002424 netif_err(qdev, intr, qdev->ndev,
2425 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002426 return IRQ_HANDLED;
2427 }
2428
2429 /*
2430 * Check MPI processor activity.
2431 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002432 if ((var & STS_PI) &&
2433 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434 /*
2435 * We've got an async event or mailbox completion.
2436 * Handle it and clear the source of the interrupt.
2437 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002438 netif_err(qdev, intr, qdev->ndev,
2439 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002440 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002441 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2442 queue_delayed_work_on(smp_processor_id(),
2443 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002444 work_done++;
2445 }
2446
2447 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002448 * Get the bit-mask that shows the active queues for this
2449 * pass. Compare it to the queues that this irq services
2450 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002451 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002452 var = ql_read32(qdev, ISR1);
2453 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002454 netif_info(qdev, intr, qdev->ndev,
2455 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002456 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002457 napi_schedule(&rx_ring->napi);
2458 work_done++;
2459 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002460 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002461 return work_done ? IRQ_HANDLED : IRQ_NONE;
2462}
2463
2464static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2465{
2466
2467 if (skb_is_gso(skb)) {
2468 int err;
2469 if (skb_header_cloned(skb)) {
2470 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2471 if (err)
2472 return err;
2473 }
2474
2475 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2476 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2477 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2478 mac_iocb_ptr->total_hdrs_len =
2479 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2480 mac_iocb_ptr->net_trans_offset =
2481 cpu_to_le16(skb_network_offset(skb) |
2482 skb_transport_offset(skb)
2483 << OB_MAC_TRANSPORT_HDR_SHIFT);
2484 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2485 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2486 if (likely(skb->protocol == htons(ETH_P_IP))) {
2487 struct iphdr *iph = ip_hdr(skb);
2488 iph->check = 0;
2489 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2490 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2491 iph->daddr, 0,
2492 IPPROTO_TCP,
2493 0);
2494 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2495 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2496 tcp_hdr(skb)->check =
2497 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2498 &ipv6_hdr(skb)->daddr,
2499 0, IPPROTO_TCP, 0);
2500 }
2501 return 1;
2502 }
2503 return 0;
2504}
2505
2506static void ql_hw_csum_setup(struct sk_buff *skb,
2507 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2508{
2509 int len;
2510 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002511 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002512 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2513 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2514 mac_iocb_ptr->net_trans_offset =
2515 cpu_to_le16(skb_network_offset(skb) |
2516 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2517
2518 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2519 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2520 if (likely(iph->protocol == IPPROTO_TCP)) {
2521 check = &(tcp_hdr(skb)->check);
2522 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2523 mac_iocb_ptr->total_hdrs_len =
2524 cpu_to_le16(skb_transport_offset(skb) +
2525 (tcp_hdr(skb)->doff << 2));
2526 } else {
2527 check = &(udp_hdr(skb)->check);
2528 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2529 mac_iocb_ptr->total_hdrs_len =
2530 cpu_to_le16(skb_transport_offset(skb) +
2531 sizeof(struct udphdr));
2532 }
2533 *check = ~csum_tcpudp_magic(iph->saddr,
2534 iph->daddr, len, iph->protocol, 0);
2535}
2536
Stephen Hemminger613573252009-08-31 19:50:58 +00002537static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002538{
2539 struct tx_ring_desc *tx_ring_desc;
2540 struct ob_mac_iocb_req *mac_iocb_ptr;
2541 struct ql_adapter *qdev = netdev_priv(ndev);
2542 int tso;
2543 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002544 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002545
2546 tx_ring = &qdev->tx_ring[tx_ring_idx];
2547
Ron Mercer74c50b42009-03-09 10:59:27 +00002548 if (skb_padto(skb, ETH_ZLEN))
2549 return NETDEV_TX_OK;
2550
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002551 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002552 netif_info(qdev, tx_queued, qdev->ndev,
2553 "%s: shutting down tx queue %d du to lack of resources.\n",
2554 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002555 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002556 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002557 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002558 return NETDEV_TX_BUSY;
2559 }
2560 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2561 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002562 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002563
2564 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2565 mac_iocb_ptr->tid = tx_ring_desc->index;
2566 /* We use the upper 32-bits to store the tx queue for this IO.
2567 * When we get the completion we can use it to establish the context.
2568 */
2569 mac_iocb_ptr->txq_idx = tx_ring_idx;
2570 tx_ring_desc->skb = skb;
2571
2572 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2573
2574 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002575 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2576 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002577 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2578 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2579 }
2580 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2581 if (tso < 0) {
2582 dev_kfree_skb_any(skb);
2583 return NETDEV_TX_OK;
2584 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2585 ql_hw_csum_setup(skb,
2586 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2587 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002588 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2589 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002590 netif_err(qdev, tx_queued, qdev->ndev,
2591 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002592 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002593 return NETDEV_TX_BUSY;
2594 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002595 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2596 tx_ring->prod_idx++;
2597 if (tx_ring->prod_idx == tx_ring->wq_len)
2598 tx_ring->prod_idx = 0;
2599 wmb();
2600
2601 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002602 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2603 "tx queued, slot %d, len %d\n",
2604 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002605
2606 atomic_dec(&tx_ring->tx_count);
2607 return NETDEV_TX_OK;
2608}
2609
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002610
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611static void ql_free_shadow_space(struct ql_adapter *qdev)
2612{
2613 if (qdev->rx_ring_shadow_reg_area) {
2614 pci_free_consistent(qdev->pdev,
2615 PAGE_SIZE,
2616 qdev->rx_ring_shadow_reg_area,
2617 qdev->rx_ring_shadow_reg_dma);
2618 qdev->rx_ring_shadow_reg_area = NULL;
2619 }
2620 if (qdev->tx_ring_shadow_reg_area) {
2621 pci_free_consistent(qdev->pdev,
2622 PAGE_SIZE,
2623 qdev->tx_ring_shadow_reg_area,
2624 qdev->tx_ring_shadow_reg_dma);
2625 qdev->tx_ring_shadow_reg_area = NULL;
2626 }
2627}
2628
2629static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2630{
2631 qdev->rx_ring_shadow_reg_area =
2632 pci_alloc_consistent(qdev->pdev,
2633 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2634 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002635 netif_err(qdev, ifup, qdev->ndev,
2636 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002637 return -ENOMEM;
2638 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002639 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002640 qdev->tx_ring_shadow_reg_area =
2641 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2642 &qdev->tx_ring_shadow_reg_dma);
2643 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002644 netif_err(qdev, ifup, qdev->ndev,
2645 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002646 goto err_wqp_sh_area;
2647 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002648 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002649 return 0;
2650
2651err_wqp_sh_area:
2652 pci_free_consistent(qdev->pdev,
2653 PAGE_SIZE,
2654 qdev->rx_ring_shadow_reg_area,
2655 qdev->rx_ring_shadow_reg_dma);
2656 return -ENOMEM;
2657}
2658
2659static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2660{
2661 struct tx_ring_desc *tx_ring_desc;
2662 int i;
2663 struct ob_mac_iocb_req *mac_iocb_ptr;
2664
2665 mac_iocb_ptr = tx_ring->wq_base;
2666 tx_ring_desc = tx_ring->q;
2667 for (i = 0; i < tx_ring->wq_len; i++) {
2668 tx_ring_desc->index = i;
2669 tx_ring_desc->skb = NULL;
2670 tx_ring_desc->queue_entry = mac_iocb_ptr;
2671 mac_iocb_ptr++;
2672 tx_ring_desc++;
2673 }
2674 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2675 atomic_set(&tx_ring->queue_stopped, 0);
2676}
2677
2678static void ql_free_tx_resources(struct ql_adapter *qdev,
2679 struct tx_ring *tx_ring)
2680{
2681 if (tx_ring->wq_base) {
2682 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2683 tx_ring->wq_base, tx_ring->wq_base_dma);
2684 tx_ring->wq_base = NULL;
2685 }
2686 kfree(tx_ring->q);
2687 tx_ring->q = NULL;
2688}
2689
2690static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2691 struct tx_ring *tx_ring)
2692{
2693 tx_ring->wq_base =
2694 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2695 &tx_ring->wq_base_dma);
2696
Joe Perches8e95a202009-12-03 07:58:21 +00002697 if ((tx_ring->wq_base == NULL) ||
2698 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002699 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002700 return -ENOMEM;
2701 }
2702 tx_ring->q =
2703 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2704 if (tx_ring->q == NULL)
2705 goto err;
2706
2707 return 0;
2708err:
2709 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2710 tx_ring->wq_base, tx_ring->wq_base_dma);
2711 return -ENOMEM;
2712}
2713
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002714static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002715{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002716 struct bq_desc *lbq_desc;
2717
Ron Mercer7c734352009-10-19 03:32:19 +00002718 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002719
Ron Mercer7c734352009-10-19 03:32:19 +00002720 curr_idx = rx_ring->lbq_curr_idx;
2721 clean_idx = rx_ring->lbq_clean_idx;
2722 while (curr_idx != clean_idx) {
2723 lbq_desc = &rx_ring->lbq[curr_idx];
2724
2725 if (lbq_desc->p.pg_chunk.last_flag) {
2726 pci_unmap_page(qdev->pdev,
2727 lbq_desc->p.pg_chunk.map,
2728 ql_lbq_block_size(qdev),
2729 PCI_DMA_FROMDEVICE);
2730 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002731 }
Ron Mercer7c734352009-10-19 03:32:19 +00002732
2733 put_page(lbq_desc->p.pg_chunk.page);
2734 lbq_desc->p.pg_chunk.page = NULL;
2735
2736 if (++curr_idx == rx_ring->lbq_len)
2737 curr_idx = 0;
2738
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739 }
2740}
2741
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002742static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002743{
2744 int i;
2745 struct bq_desc *sbq_desc;
2746
2747 for (i = 0; i < rx_ring->sbq_len; i++) {
2748 sbq_desc = &rx_ring->sbq[i];
2749 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002750 netif_err(qdev, ifup, qdev->ndev,
2751 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002752 return;
2753 }
2754 if (sbq_desc->p.skb) {
2755 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002756 dma_unmap_addr(sbq_desc, mapaddr),
2757 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002758 PCI_DMA_FROMDEVICE);
2759 dev_kfree_skb(sbq_desc->p.skb);
2760 sbq_desc->p.skb = NULL;
2761 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002762 }
2763}
2764
Ron Mercer4545a3f2009-02-23 10:42:17 +00002765/* Free all large and small rx buffers associated
2766 * with the completion queues for this device.
2767 */
2768static void ql_free_rx_buffers(struct ql_adapter *qdev)
2769{
2770 int i;
2771 struct rx_ring *rx_ring;
2772
2773 for (i = 0; i < qdev->rx_ring_count; i++) {
2774 rx_ring = &qdev->rx_ring[i];
2775 if (rx_ring->lbq)
2776 ql_free_lbq_buffers(qdev, rx_ring);
2777 if (rx_ring->sbq)
2778 ql_free_sbq_buffers(qdev, rx_ring);
2779 }
2780}
2781
2782static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2783{
2784 struct rx_ring *rx_ring;
2785 int i;
2786
2787 for (i = 0; i < qdev->rx_ring_count; i++) {
2788 rx_ring = &qdev->rx_ring[i];
2789 if (rx_ring->type != TX_Q)
2790 ql_update_buffer_queues(qdev, rx_ring);
2791 }
2792}
2793
2794static void ql_init_lbq_ring(struct ql_adapter *qdev,
2795 struct rx_ring *rx_ring)
2796{
2797 int i;
2798 struct bq_desc *lbq_desc;
2799 __le64 *bq = rx_ring->lbq_base;
2800
2801 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2802 for (i = 0; i < rx_ring->lbq_len; i++) {
2803 lbq_desc = &rx_ring->lbq[i];
2804 memset(lbq_desc, 0, sizeof(*lbq_desc));
2805 lbq_desc->index = i;
2806 lbq_desc->addr = bq;
2807 bq++;
2808 }
2809}
2810
2811static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002812 struct rx_ring *rx_ring)
2813{
2814 int i;
2815 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002816 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002817
Ron Mercer4545a3f2009-02-23 10:42:17 +00002818 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002819 for (i = 0; i < rx_ring->sbq_len; i++) {
2820 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002821 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002822 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002823 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002824 bq++;
2825 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002826}
2827
2828static void ql_free_rx_resources(struct ql_adapter *qdev,
2829 struct rx_ring *rx_ring)
2830{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002831 /* Free the small buffer queue. */
2832 if (rx_ring->sbq_base) {
2833 pci_free_consistent(qdev->pdev,
2834 rx_ring->sbq_size,
2835 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2836 rx_ring->sbq_base = NULL;
2837 }
2838
2839 /* Free the small buffer queue control blocks. */
2840 kfree(rx_ring->sbq);
2841 rx_ring->sbq = NULL;
2842
2843 /* Free the large buffer queue. */
2844 if (rx_ring->lbq_base) {
2845 pci_free_consistent(qdev->pdev,
2846 rx_ring->lbq_size,
2847 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2848 rx_ring->lbq_base = NULL;
2849 }
2850
2851 /* Free the large buffer queue control blocks. */
2852 kfree(rx_ring->lbq);
2853 rx_ring->lbq = NULL;
2854
2855 /* Free the rx queue. */
2856 if (rx_ring->cq_base) {
2857 pci_free_consistent(qdev->pdev,
2858 rx_ring->cq_size,
2859 rx_ring->cq_base, rx_ring->cq_base_dma);
2860 rx_ring->cq_base = NULL;
2861 }
2862}
2863
2864/* Allocate queues and buffers for this completions queue based
2865 * on the values in the parameter structure. */
2866static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2867 struct rx_ring *rx_ring)
2868{
2869
2870 /*
2871 * Allocate the completion queue for this rx_ring.
2872 */
2873 rx_ring->cq_base =
2874 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2875 &rx_ring->cq_base_dma);
2876
2877 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002878 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002879 return -ENOMEM;
2880 }
2881
2882 if (rx_ring->sbq_len) {
2883 /*
2884 * Allocate small buffer queue.
2885 */
2886 rx_ring->sbq_base =
2887 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2888 &rx_ring->sbq_base_dma);
2889
2890 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002891 netif_err(qdev, ifup, qdev->ndev,
2892 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002893 goto err_mem;
2894 }
2895
2896 /*
2897 * Allocate small buffer queue control blocks.
2898 */
2899 rx_ring->sbq =
2900 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2901 GFP_KERNEL);
2902 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002903 netif_err(qdev, ifup, qdev->ndev,
2904 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002905 goto err_mem;
2906 }
2907
Ron Mercer4545a3f2009-02-23 10:42:17 +00002908 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002909 }
2910
2911 if (rx_ring->lbq_len) {
2912 /*
2913 * Allocate large buffer queue.
2914 */
2915 rx_ring->lbq_base =
2916 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2917 &rx_ring->lbq_base_dma);
2918
2919 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002920 netif_err(qdev, ifup, qdev->ndev,
2921 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002922 goto err_mem;
2923 }
2924 /*
2925 * Allocate large buffer queue control blocks.
2926 */
2927 rx_ring->lbq =
2928 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2929 GFP_KERNEL);
2930 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002931 netif_err(qdev, ifup, qdev->ndev,
2932 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002933 goto err_mem;
2934 }
2935
Ron Mercer4545a3f2009-02-23 10:42:17 +00002936 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 }
2938
2939 return 0;
2940
2941err_mem:
2942 ql_free_rx_resources(qdev, rx_ring);
2943 return -ENOMEM;
2944}
2945
2946static void ql_tx_ring_clean(struct ql_adapter *qdev)
2947{
2948 struct tx_ring *tx_ring;
2949 struct tx_ring_desc *tx_ring_desc;
2950 int i, j;
2951
2952 /*
2953 * Loop through all queues and free
2954 * any resources.
2955 */
2956 for (j = 0; j < qdev->tx_ring_count; j++) {
2957 tx_ring = &qdev->tx_ring[j];
2958 for (i = 0; i < tx_ring->wq_len; i++) {
2959 tx_ring_desc = &tx_ring->q[i];
2960 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002961 netif_err(qdev, ifdown, qdev->ndev,
2962 "Freeing lost SKB %p, from queue %d, index %d.\n",
2963 tx_ring_desc->skb, j,
2964 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002965 ql_unmap_send(qdev, tx_ring_desc,
2966 tx_ring_desc->map_cnt);
2967 dev_kfree_skb(tx_ring_desc->skb);
2968 tx_ring_desc->skb = NULL;
2969 }
2970 }
2971 }
2972}
2973
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002974static void ql_free_mem_resources(struct ql_adapter *qdev)
2975{
2976 int i;
2977
2978 for (i = 0; i < qdev->tx_ring_count; i++)
2979 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2980 for (i = 0; i < qdev->rx_ring_count; i++)
2981 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2982 ql_free_shadow_space(qdev);
2983}
2984
2985static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2986{
2987 int i;
2988
2989 /* Allocate space for our shadow registers and such. */
2990 if (ql_alloc_shadow_space(qdev))
2991 return -ENOMEM;
2992
2993 for (i = 0; i < qdev->rx_ring_count; i++) {
2994 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002995 netif_err(qdev, ifup, qdev->ndev,
2996 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002997 goto err_mem;
2998 }
2999 }
3000 /* Allocate tx queue resources */
3001 for (i = 0; i < qdev->tx_ring_count; i++) {
3002 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003003 netif_err(qdev, ifup, qdev->ndev,
3004 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003005 goto err_mem;
3006 }
3007 }
3008 return 0;
3009
3010err_mem:
3011 ql_free_mem_resources(qdev);
3012 return -ENOMEM;
3013}
3014
3015/* Set up the rx ring control block and pass it to the chip.
3016 * The control block is defined as
3017 * "Completion Queue Initialization Control Block", or cqicb.
3018 */
3019static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3020{
3021 struct cqicb *cqicb = &rx_ring->cqicb;
3022 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003023 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003024 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003025 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003026 void __iomem *doorbell_area =
3027 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3028 int err = 0;
3029 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003030 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003031 __le64 *base_indirect_ptr;
3032 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003033
3034 /* Set up the shadow registers for this ring. */
3035 rx_ring->prod_idx_sh_reg = shadow_reg;
3036 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003037 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003038 shadow_reg += sizeof(u64);
3039 shadow_reg_dma += sizeof(u64);
3040 rx_ring->lbq_base_indirect = shadow_reg;
3041 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003042 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3043 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003044 rx_ring->sbq_base_indirect = shadow_reg;
3045 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3046
3047 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003048 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003049 rx_ring->cnsmr_idx = 0;
3050 rx_ring->curr_entry = rx_ring->cq_base;
3051
3052 /* PCI doorbell mem area + 0x04 for valid register */
3053 rx_ring->valid_db_reg = doorbell_area + 0x04;
3054
3055 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003056 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003057
3058 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003059 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003060
3061 memset((void *)cqicb, 0, sizeof(struct cqicb));
3062 cqicb->msix_vect = rx_ring->irq;
3063
Ron Mercer459caf52009-01-04 17:08:11 -08003064 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3065 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003066
Ron Mercer97345522009-01-09 11:31:50 +00003067 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003068
Ron Mercer97345522009-01-09 11:31:50 +00003069 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070
3071 /*
3072 * Set up the control block load flags.
3073 */
3074 cqicb->flags = FLAGS_LC | /* Load queue base address */
3075 FLAGS_LV | /* Load MSI-X vector */
3076 FLAGS_LI; /* Load irq delay values */
3077 if (rx_ring->lbq_len) {
3078 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003079 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003080 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3081 page_entries = 0;
3082 do {
3083 *base_indirect_ptr = cpu_to_le64(tmp);
3084 tmp += DB_PAGE_SIZE;
3085 base_indirect_ptr++;
3086 page_entries++;
3087 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003088 cqicb->lbq_addr =
3089 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003090 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3091 (u16) rx_ring->lbq_buf_size;
3092 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3093 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3094 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003095 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003096 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003097 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003098 rx_ring->lbq_clean_idx = 0;
3099 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003100 }
3101 if (rx_ring->sbq_len) {
3102 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003103 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003104 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3105 page_entries = 0;
3106 do {
3107 *base_indirect_ptr = cpu_to_le64(tmp);
3108 tmp += DB_PAGE_SIZE;
3109 base_indirect_ptr++;
3110 page_entries++;
3111 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003112 cqicb->sbq_addr =
3113 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003114 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003115 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003116 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3117 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003118 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003119 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003120 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003121 rx_ring->sbq_clean_idx = 0;
3122 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003123 }
3124 switch (rx_ring->type) {
3125 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003126 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3127 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3128 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003129 case RX_Q:
3130 /* Inbound completion handling rx_rings run in
3131 * separate NAPI contexts.
3132 */
3133 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3134 64);
3135 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3136 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3137 break;
3138 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003139 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3140 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003141 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003142 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3143 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003144 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3145 CFG_LCQ, rx_ring->cq_id);
3146 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003147 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003148 return err;
3149 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003150 return err;
3151}
3152
3153static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3154{
3155 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3156 void __iomem *doorbell_area =
3157 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3158 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3159 (tx_ring->wq_id * sizeof(u64));
3160 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3161 (tx_ring->wq_id * sizeof(u64));
3162 int err = 0;
3163
3164 /*
3165 * Assign doorbell registers for this tx_ring.
3166 */
3167 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003168 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003169 tx_ring->prod_idx = 0;
3170 /* TX PCI doorbell mem area + 0x04 */
3171 tx_ring->valid_db_reg = doorbell_area + 0x04;
3172
3173 /*
3174 * Assign shadow registers for this tx_ring.
3175 */
3176 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3177 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3178
3179 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3180 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3181 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3182 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3183 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003184 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003185
Ron Mercer97345522009-01-09 11:31:50 +00003186 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003187
3188 ql_init_tx_ring(qdev, tx_ring);
3189
Ron Mercere3324712009-07-02 06:06:13 +00003190 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003191 (u16) tx_ring->wq_id);
3192 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003193 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003194 return err;
3195 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003196 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3197 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198 return err;
3199}
3200
3201static void ql_disable_msix(struct ql_adapter *qdev)
3202{
3203 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3204 pci_disable_msix(qdev->pdev);
3205 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3206 kfree(qdev->msi_x_entry);
3207 qdev->msi_x_entry = NULL;
3208 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3209 pci_disable_msi(qdev->pdev);
3210 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3211 }
3212}
3213
Ron Mercera4ab6132009-08-27 11:02:10 +00003214/* We start by trying to get the number of vectors
3215 * stored in qdev->intr_count. If we don't get that
3216 * many then we reduce the count and try again.
3217 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003218static void ql_enable_msix(struct ql_adapter *qdev)
3219{
Ron Mercera4ab6132009-08-27 11:02:10 +00003220 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003221
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003222 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003223 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003224 /* Try to alloc space for the msix struct,
3225 * if it fails then go to MSI/legacy.
3226 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003227 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003228 sizeof(struct msix_entry),
3229 GFP_KERNEL);
3230 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003231 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 goto msi;
3233 }
3234
Ron Mercera4ab6132009-08-27 11:02:10 +00003235 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 qdev->msi_x_entry[i].entry = i;
3237
Ron Mercera4ab6132009-08-27 11:02:10 +00003238 /* Loop to get our vectors. We start with
3239 * what we want and settle for what we get.
3240 */
3241 do {
3242 err = pci_enable_msix(qdev->pdev,
3243 qdev->msi_x_entry, qdev->intr_count);
3244 if (err > 0)
3245 qdev->intr_count = err;
3246 } while (err > 0);
3247
3248 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003249 kfree(qdev->msi_x_entry);
3250 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003251 netif_warn(qdev, ifup, qdev->ndev,
3252 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003253 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003254 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003255 } else if (err == 0) {
3256 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003257 netif_info(qdev, ifup, qdev->ndev,
3258 "MSI-X Enabled, got %d vectors.\n",
3259 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003260 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003261 }
3262 }
3263msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003264 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003265 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003266 if (!pci_enable_msi(qdev->pdev)) {
3267 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003268 netif_info(qdev, ifup, qdev->ndev,
3269 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270 return;
3271 }
3272 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003273 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003274 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3275 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003276}
3277
Ron Mercer39aa8162009-08-27 11:02:11 +00003278/* Each vector services 1 RSS ring and and 1 or more
3279 * TX completion rings. This function loops through
3280 * the TX completion rings and assigns the vector that
3281 * will service it. An example would be if there are
3282 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3283 * This would mean that vector 0 would service RSS ring 0
3284 * and TX competion rings 0,1,2 and 3. Vector 1 would
3285 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3286 */
3287static void ql_set_tx_vect(struct ql_adapter *qdev)
3288{
3289 int i, j, vect;
3290 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3291
3292 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3293 /* Assign irq vectors to TX rx_rings.*/
3294 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3295 i < qdev->rx_ring_count; i++) {
3296 if (j == tx_rings_per_vector) {
3297 vect++;
3298 j = 0;
3299 }
3300 qdev->rx_ring[i].irq = vect;
3301 j++;
3302 }
3303 } else {
3304 /* For single vector all rings have an irq
3305 * of zero.
3306 */
3307 for (i = 0; i < qdev->rx_ring_count; i++)
3308 qdev->rx_ring[i].irq = 0;
3309 }
3310}
3311
3312/* Set the interrupt mask for this vector. Each vector
3313 * will service 1 RSS ring and 1 or more TX completion
3314 * rings. This function sets up a bit mask per vector
3315 * that indicates which rings it services.
3316 */
3317static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3318{
3319 int j, vect = ctx->intr;
3320 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3321
3322 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3323 /* Add the RSS ring serviced by this vector
3324 * to the mask.
3325 */
3326 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3327 /* Add the TX ring(s) serviced by this vector
3328 * to the mask. */
3329 for (j = 0; j < tx_rings_per_vector; j++) {
3330 ctx->irq_mask |=
3331 (1 << qdev->rx_ring[qdev->rss_ring_count +
3332 (vect * tx_rings_per_vector) + j].cq_id);
3333 }
3334 } else {
3335 /* For single vector we just shift each queue's
3336 * ID into the mask.
3337 */
3338 for (j = 0; j < qdev->rx_ring_count; j++)
3339 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3340 }
3341}
3342
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003343/*
3344 * Here we build the intr_context structures based on
3345 * our rx_ring count and intr vector count.
3346 * The intr_context structure is used to hook each vector
3347 * to possibly different handlers.
3348 */
3349static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3350{
3351 int i = 0;
3352 struct intr_context *intr_context = &qdev->intr_context[0];
3353
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003354 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3355 /* Each rx_ring has it's
3356 * own intr_context since we have separate
3357 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003358 */
3359 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3360 qdev->rx_ring[i].irq = i;
3361 intr_context->intr = i;
3362 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003363 /* Set up this vector's bit-mask that indicates
3364 * which queues it services.
3365 */
3366 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003367 /*
3368 * We set up each vectors enable/disable/read bits so
3369 * there's no bit/mask calculations in the critical path.
3370 */
3371 intr_context->intr_en_mask =
3372 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3373 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3374 | i;
3375 intr_context->intr_dis_mask =
3376 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3377 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3378 INTR_EN_IHD | i;
3379 intr_context->intr_read_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3382 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003383 if (i == 0) {
3384 /* The first vector/queue handles
3385 * broadcast/multicast, fatal errors,
3386 * and firmware events. This in addition
3387 * to normal inbound NAPI processing.
3388 */
3389 intr_context->handler = qlge_isr;
3390 sprintf(intr_context->name, "%s-rx-%d",
3391 qdev->ndev->name, i);
3392 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003393 /*
3394 * Inbound queues handle unicast frames only.
3395 */
3396 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003397 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003398 qdev->ndev->name, i);
3399 }
3400 }
3401 } else {
3402 /*
3403 * All rx_rings use the same intr_context since
3404 * there is only one vector.
3405 */
3406 intr_context->intr = 0;
3407 intr_context->qdev = qdev;
3408 /*
3409 * We set up each vectors enable/disable/read bits so
3410 * there's no bit/mask calculations in the critical path.
3411 */
3412 intr_context->intr_en_mask =
3413 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3414 intr_context->intr_dis_mask =
3415 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3416 INTR_EN_TYPE_DISABLE;
3417 intr_context->intr_read_mask =
3418 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3419 /*
3420 * Single interrupt means one handler for all rings.
3421 */
3422 intr_context->handler = qlge_isr;
3423 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003424 /* Set up this vector's bit-mask that indicates
3425 * which queues it services. In this case there is
3426 * a single vector so it will service all RSS and
3427 * TX completion rings.
3428 */
3429 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003430 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003431 /* Tell the TX completion rings which MSIx vector
3432 * they will be using.
3433 */
3434 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003435}
3436
3437static void ql_free_irq(struct ql_adapter *qdev)
3438{
3439 int i;
3440 struct intr_context *intr_context = &qdev->intr_context[0];
3441
3442 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3443 if (intr_context->hooked) {
3444 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3445 free_irq(qdev->msi_x_entry[i].vector,
3446 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003447 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3448 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003449 } else {
3450 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003451 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3452 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453 }
3454 }
3455 }
3456 ql_disable_msix(qdev);
3457}
3458
3459static int ql_request_irq(struct ql_adapter *qdev)
3460{
3461 int i;
3462 int status = 0;
3463 struct pci_dev *pdev = qdev->pdev;
3464 struct intr_context *intr_context = &qdev->intr_context[0];
3465
3466 ql_resolve_queues_to_irqs(qdev);
3467
3468 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3469 atomic_set(&intr_context->irq_cnt, 0);
3470 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3471 status = request_irq(qdev->msi_x_entry[i].vector,
3472 intr_context->handler,
3473 0,
3474 intr_context->name,
3475 &qdev->rx_ring[i]);
3476 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003477 netif_err(qdev, ifup, qdev->ndev,
3478 "Failed request for MSIX interrupt %d.\n",
3479 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003480 goto err_irq;
3481 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003482 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3483 "Hooked intr %d, queue type %s, with name %s.\n",
3484 i,
3485 qdev->rx_ring[i].type == DEFAULT_Q ?
3486 "DEFAULT_Q" :
3487 qdev->rx_ring[i].type == TX_Q ?
3488 "TX_Q" :
3489 qdev->rx_ring[i].type == RX_Q ?
3490 "RX_Q" : "",
3491 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003492 }
3493 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003494 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3495 "trying msi or legacy interrupts.\n");
3496 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3497 "%s: irq = %d.\n", __func__, pdev->irq);
3498 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3499 "%s: context->name = %s.\n", __func__,
3500 intr_context->name);
3501 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3502 "%s: dev_id = 0x%p.\n", __func__,
3503 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003504 status =
3505 request_irq(pdev->irq, qlge_isr,
3506 test_bit(QL_MSI_ENABLED,
3507 &qdev->
3508 flags) ? 0 : IRQF_SHARED,
3509 intr_context->name, &qdev->rx_ring[0]);
3510 if (status)
3511 goto err_irq;
3512
Joe Perchesae9540f72010-02-09 11:49:52 +00003513 netif_err(qdev, ifup, qdev->ndev,
3514 "Hooked intr %d, queue type %s, with name %s.\n",
3515 i,
3516 qdev->rx_ring[0].type == DEFAULT_Q ?
3517 "DEFAULT_Q" :
3518 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3519 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3520 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003521 }
3522 intr_context->hooked = 1;
3523 }
3524 return status;
3525err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003526 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527 ql_free_irq(qdev);
3528 return status;
3529}
3530
3531static int ql_start_rss(struct ql_adapter *qdev)
3532{
Ron Mercer541ae282009-10-08 09:54:37 +00003533 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3534 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3535 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3536 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3537 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3538 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003539 struct ricb *ricb = &qdev->ricb;
3540 int status = 0;
3541 int i;
3542 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3543
Ron Mercere3324712009-07-02 06:06:13 +00003544 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003545
Ron Mercerb2014ff2009-08-27 11:02:09 +00003546 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003547 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003548 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3549 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003550
3551 /*
3552 * Fill out the Indirection Table.
3553 */
Ron Mercer541ae282009-10-08 09:54:37 +00003554 for (i = 0; i < 1024; i++)
3555 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003556
Ron Mercer541ae282009-10-08 09:54:37 +00003557 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3558 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559
Joe Perchesae9540f72010-02-09 11:49:52 +00003560 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003561
Ron Mercere3324712009-07-02 06:06:13 +00003562 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003564 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565 return status;
3566 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003567 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3568 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003569 return status;
3570}
3571
Ron Mercera5f59dc2009-07-02 06:06:07 +00003572static int ql_clear_routing_entries(struct ql_adapter *qdev)
3573{
3574 int i, status = 0;
3575
3576 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3577 if (status)
3578 return status;
3579 /* Clear all the entries in the routing table. */
3580 for (i = 0; i < 16; i++) {
3581 status = ql_set_routing_reg(qdev, i, 0, 0);
3582 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003583 netif_err(qdev, ifup, qdev->ndev,
3584 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003585 break;
3586 }
3587 }
3588 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3589 return status;
3590}
3591
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003592/* Initialize the frame-to-queue routing. */
3593static int ql_route_initialize(struct ql_adapter *qdev)
3594{
3595 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596
3597 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003598 status = ql_clear_routing_entries(qdev);
3599 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003600 return status;
3601
3602 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3603 if (status)
3604 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003605
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003606 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3607 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003608 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003609 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003610 "Failed to init routing register "
3611 "for IP CSUM error packets.\n");
3612 goto exit;
3613 }
3614 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3615 RT_IDX_TU_CSUM_ERR, 1);
3616 if (status) {
3617 netif_err(qdev, ifup, qdev->ndev,
3618 "Failed to init routing register "
3619 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003620 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003621 }
3622 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3623 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003624 netif_err(qdev, ifup, qdev->ndev,
3625 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003626 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003627 }
3628 /* If we have more than one inbound queue, then turn on RSS in the
3629 * routing block.
3630 */
3631 if (qdev->rss_ring_count > 1) {
3632 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3633 RT_IDX_RSS_MATCH, 1);
3634 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003635 netif_err(qdev, ifup, qdev->ndev,
3636 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003637 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638 }
3639 }
3640
3641 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3642 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003643 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003644 netif_err(qdev, ifup, qdev->ndev,
3645 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003646exit:
3647 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648 return status;
3649}
3650
Ron Mercer2ee1e272009-03-03 12:10:33 +00003651int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003652{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003653 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003654
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003655 /* If check if the link is up and use to
3656 * determine if we are setting or clearing
3657 * the MAC address in the CAM.
3658 */
3659 set = ql_read32(qdev, STS);
3660 set &= qdev->port_link_up;
3661 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003662 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003663 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003664 return status;
3665 }
3666
3667 status = ql_route_initialize(qdev);
3668 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003669 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003670
3671 return status;
3672}
3673
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003674static int ql_adapter_initialize(struct ql_adapter *qdev)
3675{
3676 u32 value, mask;
3677 int i;
3678 int status = 0;
3679
3680 /*
3681 * Set up the System register to halt on errors.
3682 */
3683 value = SYS_EFE | SYS_FAE;
3684 mask = value << 16;
3685 ql_write32(qdev, SYS, mask | value);
3686
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003687 /* Set the default queue, and VLAN behavior. */
3688 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3689 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003690 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3691
3692 /* Set the MPI interrupt to enabled. */
3693 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3694
3695 /* Enable the function, set pagesize, enable error checking. */
3696 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003697 FSC_EC | FSC_VM_PAGE_4K;
3698 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003699
3700 /* Set/clear header splitting. */
3701 mask = FSC_VM_PAGESIZE_MASK |
3702 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3703 ql_write32(qdev, FSC, mask | value);
3704
Ron Mercer572c5262010-01-02 10:37:42 +00003705 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003706
Ron Mercera3b71932009-10-08 09:54:38 +00003707 /* Set RX packet routing to use port/pci function on which the
3708 * packet arrived on in addition to usual frame routing.
3709 * This is helpful on bonding where both interfaces can have
3710 * the same MAC address.
3711 */
3712 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003713 /* Reroute all packets to our Interface.
3714 * They may have been routed to MPI firmware
3715 * due to WOL.
3716 */
3717 value = ql_read32(qdev, MGMT_RCV_CFG);
3718 value &= ~MGMT_RCV_CFG_RM;
3719 mask = 0xffff0000;
3720
3721 /* Sticky reg needs clearing due to WOL. */
3722 ql_write32(qdev, MGMT_RCV_CFG, mask);
3723 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3724
3725 /* Default WOL is enable on Mezz cards */
3726 if (qdev->pdev->subsystem_device == 0x0068 ||
3727 qdev->pdev->subsystem_device == 0x0180)
3728 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003729
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003730 /* Start up the rx queues. */
3731 for (i = 0; i < qdev->rx_ring_count; i++) {
3732 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3733 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003734 netif_err(qdev, ifup, qdev->ndev,
3735 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003736 return status;
3737 }
3738 }
3739
3740 /* If there is more than one inbound completion queue
3741 * then download a RICB to configure RSS.
3742 */
3743 if (qdev->rss_ring_count > 1) {
3744 status = ql_start_rss(qdev);
3745 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003746 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003747 return status;
3748 }
3749 }
3750
3751 /* Start up the tx queues. */
3752 for (i = 0; i < qdev->tx_ring_count; i++) {
3753 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3754 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003755 netif_err(qdev, ifup, qdev->ndev,
3756 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003757 return status;
3758 }
3759 }
3760
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003761 /* Initialize the port and set the max framesize. */
3762 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003763 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003764 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003765
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003766 /* Set up the MAC address and frame routing filter. */
3767 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003768 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003769 netif_err(qdev, ifup, qdev->ndev,
3770 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771 return status;
3772 }
3773
3774 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003775 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003776 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3777 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778 napi_enable(&qdev->rx_ring[i].napi);
3779 }
3780
3781 return status;
3782}
3783
3784/* Issue soft reset to chip. */
3785static int ql_adapter_reset(struct ql_adapter *qdev)
3786{
3787 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003788 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003789 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003790
Ron Mercera5f59dc2009-07-02 06:06:07 +00003791 /* Clear all the entries in the routing table. */
3792 status = ql_clear_routing_entries(qdev);
3793 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003794 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003795 return status;
3796 }
3797
3798 end_jiffies = jiffies +
3799 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003800
3801 /* Stop management traffic. */
3802 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3803
3804 /* Wait for the NIC and MGMNT FIFOs to empty. */
3805 ql_wait_fifo_empty(qdev);
3806
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003807 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003808
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003809 do {
3810 value = ql_read32(qdev, RST_FO);
3811 if ((value & RST_FO_FR) == 0)
3812 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003813 cpu_relax();
3814 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003815
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003816 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003817 netif_err(qdev, ifdown, qdev->ndev,
3818 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003819 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003820 }
3821
Ron Mercer84087f42009-10-08 09:54:41 +00003822 /* Resume management traffic. */
3823 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003824 return status;
3825}
3826
3827static void ql_display_dev_info(struct net_device *ndev)
3828{
3829 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3830
Joe Perchesae9540f72010-02-09 11:49:52 +00003831 netif_info(qdev, probe, qdev->ndev,
3832 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3833 "XG Roll = %d, XG Rev = %d.\n",
3834 qdev->func,
3835 qdev->port,
3836 qdev->chip_rev_id & 0x0000000f,
3837 qdev->chip_rev_id >> 4 & 0x0000000f,
3838 qdev->chip_rev_id >> 8 & 0x0000000f,
3839 qdev->chip_rev_id >> 12 & 0x0000000f);
3840 netif_info(qdev, probe, qdev->ndev,
3841 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003842}
3843
Ron Mercerbc083ce2009-10-21 11:07:40 +00003844int ql_wol(struct ql_adapter *qdev)
3845{
3846 int status = 0;
3847 u32 wol = MB_WOL_DISABLE;
3848
3849 /* The CAM is still intact after a reset, but if we
3850 * are doing WOL, then we may need to program the
3851 * routing regs. We would also need to issue the mailbox
3852 * commands to instruct the MPI what to do per the ethtool
3853 * settings.
3854 */
3855
3856 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3857 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003858 netif_err(qdev, ifdown, qdev->ndev,
3859 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3860 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003861 return -EINVAL;
3862 }
3863
3864 if (qdev->wol & WAKE_MAGIC) {
3865 status = ql_mb_wol_set_magic(qdev, 1);
3866 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003867 netif_err(qdev, ifdown, qdev->ndev,
3868 "Failed to set magic packet on %s.\n",
3869 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003870 return status;
3871 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003872 netif_info(qdev, drv, qdev->ndev,
3873 "Enabled magic packet successfully on %s.\n",
3874 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003875
3876 wol |= MB_WOL_MAGIC_PKT;
3877 }
3878
3879 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003880 wol |= MB_WOL_MODE_ON;
3881 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003882 netif_err(qdev, drv, qdev->ndev,
3883 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003884 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003885 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003886 }
3887
3888 return status;
3889}
3890
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003891static int ql_adapter_down(struct ql_adapter *qdev)
3892{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003893 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003894
Ron Mercer6a473302009-07-02 06:06:12 +00003895 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003896
Ron Mercer6497b602009-02-12 16:37:13 -08003897 /* Don't kill the reset worker thread if we
3898 * are in the process of recovery.
3899 */
3900 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3901 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3903 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003904 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003905 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003906 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003907
Ron Mercer39aa8162009-08-27 11:02:11 +00003908 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910
3911 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3912
3913 ql_disable_interrupts(qdev);
3914
3915 ql_tx_ring_clean(qdev);
3916
Ron Mercer6b318cb2009-03-09 10:59:26 +00003917 /* Call netif_napi_del() from common point.
3918 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003919 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003920 netif_napi_del(&qdev->rx_ring[i].napi);
3921
Ron Mercer4545a3f2009-02-23 10:42:17 +00003922 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003923
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003924 status = ql_adapter_reset(qdev);
3925 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003926 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3927 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003928 return status;
3929}
3930
3931static int ql_adapter_up(struct ql_adapter *qdev)
3932{
3933 int err = 0;
3934
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003935 err = ql_adapter_initialize(qdev);
3936 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003937 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938 goto err_init;
3939 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003940 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003941 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003942 /* If the port is initialized and the
3943 * link is up the turn on the carrier.
3944 */
3945 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3946 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003947 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003948 /* Restore rx mode. */
3949 clear_bit(QL_ALLMULTI, &qdev->flags);
3950 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3951 qlge_set_multicast_list(qdev->ndev);
3952
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003953 ql_enable_interrupts(qdev);
3954 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003955 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003956
3957 return 0;
3958err_init:
3959 ql_adapter_reset(qdev);
3960 return err;
3961}
3962
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003963static void ql_release_adapter_resources(struct ql_adapter *qdev)
3964{
3965 ql_free_mem_resources(qdev);
3966 ql_free_irq(qdev);
3967}
3968
3969static int ql_get_adapter_resources(struct ql_adapter *qdev)
3970{
3971 int status = 0;
3972
3973 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003974 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003975 return -ENOMEM;
3976 }
3977 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003978 return status;
3979}
3980
3981static int qlge_close(struct net_device *ndev)
3982{
3983 struct ql_adapter *qdev = netdev_priv(ndev);
3984
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003985 /* If we hit pci_channel_io_perm_failure
3986 * failure condition, then we already
3987 * brought the adapter down.
3988 */
3989 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003990 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003991 clear_bit(QL_EEH_FATAL, &qdev->flags);
3992 return 0;
3993 }
3994
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003995 /*
3996 * Wait for device to recover from a reset.
3997 * (Rarely happens, but possible.)
3998 */
3999 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4000 msleep(1);
4001 ql_adapter_down(qdev);
4002 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004003 return 0;
4004}
4005
4006static int ql_configure_rings(struct ql_adapter *qdev)
4007{
4008 int i;
4009 struct rx_ring *rx_ring;
4010 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004011 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004012 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4013 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4014
4015 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004016
Ron Mercera4ab6132009-08-27 11:02:10 +00004017 /* In a perfect world we have one RSS ring for each CPU
4018 * and each has it's own vector. To do that we ask for
4019 * cpu_cnt vectors. ql_enable_msix() will adjust the
4020 * vector count to what we actually get. We then
4021 * allocate an RSS ring for each.
4022 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004023 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004024 qdev->intr_count = cpu_cnt;
4025 ql_enable_msix(qdev);
4026 /* Adjust the RSS ring count to the actual vector count. */
4027 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004028 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004029 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004030
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004031 for (i = 0; i < qdev->tx_ring_count; i++) {
4032 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004033 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004034 tx_ring->qdev = qdev;
4035 tx_ring->wq_id = i;
4036 tx_ring->wq_len = qdev->tx_ring_size;
4037 tx_ring->wq_size =
4038 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4039
4040 /*
4041 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004042 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004043 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004044 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045 }
4046
4047 for (i = 0; i < qdev->rx_ring_count; i++) {
4048 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004049 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004050 rx_ring->qdev = qdev;
4051 rx_ring->cq_id = i;
4052 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004053 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004054 /*
4055 * Inbound (RSS) queues.
4056 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004057 rx_ring->cq_len = qdev->rx_ring_size;
4058 rx_ring->cq_size =
4059 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4060 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4061 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004062 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004063 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004064 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4065 "lbq_buf_size %d, order = %d\n",
4066 rx_ring->lbq_buf_size,
4067 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004068 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4069 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004070 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004071 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004072 rx_ring->type = RX_Q;
4073 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004074 /*
4075 * Outbound queue handles outbound completions only.
4076 */
4077 /* outbound cq is same size as tx_ring it services. */
4078 rx_ring->cq_len = qdev->tx_ring_size;
4079 rx_ring->cq_size =
4080 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4081 rx_ring->lbq_len = 0;
4082 rx_ring->lbq_size = 0;
4083 rx_ring->lbq_buf_size = 0;
4084 rx_ring->sbq_len = 0;
4085 rx_ring->sbq_size = 0;
4086 rx_ring->sbq_buf_size = 0;
4087 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004088 }
4089 }
4090 return 0;
4091}
4092
4093static int qlge_open(struct net_device *ndev)
4094{
4095 int err = 0;
4096 struct ql_adapter *qdev = netdev_priv(ndev);
4097
Ron Mercer74e12432009-11-11 12:54:04 +00004098 err = ql_adapter_reset(qdev);
4099 if (err)
4100 return err;
4101
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004102 err = ql_configure_rings(qdev);
4103 if (err)
4104 return err;
4105
4106 err = ql_get_adapter_resources(qdev);
4107 if (err)
4108 goto error_up;
4109
4110 err = ql_adapter_up(qdev);
4111 if (err)
4112 goto error_up;
4113
4114 return err;
4115
4116error_up:
4117 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004118 return err;
4119}
4120
Ron Mercer7c734352009-10-19 03:32:19 +00004121static int ql_change_rx_buffers(struct ql_adapter *qdev)
4122{
4123 struct rx_ring *rx_ring;
4124 int i, status;
4125 u32 lbq_buf_len;
4126
4127 /* Wait for an oustanding reset to complete. */
4128 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4129 int i = 3;
4130 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004131 netif_err(qdev, ifup, qdev->ndev,
4132 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004133 ssleep(1);
4134 }
4135
4136 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004137 netif_err(qdev, ifup, qdev->ndev,
4138 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004139 return -ETIMEDOUT;
4140 }
4141 }
4142
4143 status = ql_adapter_down(qdev);
4144 if (status)
4145 goto error;
4146
4147 /* Get the new rx buffer size. */
4148 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4149 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4150 qdev->lbq_buf_order = get_order(lbq_buf_len);
4151
4152 for (i = 0; i < qdev->rss_ring_count; i++) {
4153 rx_ring = &qdev->rx_ring[i];
4154 /* Set the new size. */
4155 rx_ring->lbq_buf_size = lbq_buf_len;
4156 }
4157
4158 status = ql_adapter_up(qdev);
4159 if (status)
4160 goto error;
4161
4162 return status;
4163error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004164 netif_alert(qdev, ifup, qdev->ndev,
4165 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004166 set_bit(QL_ADAPTER_UP, &qdev->flags);
4167 dev_close(qdev->ndev);
4168 return status;
4169}
4170
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004171static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4172{
4173 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004174 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004175
4176 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004177 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004178 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004179 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004180 } else
4181 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004182
4183 queue_delayed_work(qdev->workqueue,
4184 &qdev->mpi_port_cfg_work, 3*HZ);
4185
Breno Leitao746079d2010-02-04 10:11:19 +00004186 ndev->mtu = new_mtu;
4187
Ron Mercer7c734352009-10-19 03:32:19 +00004188 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004189 return 0;
4190 }
4191
Ron Mercer7c734352009-10-19 03:32:19 +00004192 status = ql_change_rx_buffers(qdev);
4193 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004194 netif_err(qdev, ifup, qdev->ndev,
4195 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004196 }
4197
4198 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004199}
4200
4201static struct net_device_stats *qlge_get_stats(struct net_device
4202 *ndev)
4203{
Ron Mercer885ee392009-11-03 13:49:31 +00004204 struct ql_adapter *qdev = netdev_priv(ndev);
4205 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4206 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4207 unsigned long pkts, mcast, dropped, errors, bytes;
4208 int i;
4209
4210 /* Get RX stats. */
4211 pkts = mcast = dropped = errors = bytes = 0;
4212 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4213 pkts += rx_ring->rx_packets;
4214 bytes += rx_ring->rx_bytes;
4215 dropped += rx_ring->rx_dropped;
4216 errors += rx_ring->rx_errors;
4217 mcast += rx_ring->rx_multicast;
4218 }
4219 ndev->stats.rx_packets = pkts;
4220 ndev->stats.rx_bytes = bytes;
4221 ndev->stats.rx_dropped = dropped;
4222 ndev->stats.rx_errors = errors;
4223 ndev->stats.multicast = mcast;
4224
4225 /* Get TX stats. */
4226 pkts = errors = bytes = 0;
4227 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4228 pkts += tx_ring->tx_packets;
4229 bytes += tx_ring->tx_bytes;
4230 errors += tx_ring->tx_errors;
4231 }
4232 ndev->stats.tx_packets = pkts;
4233 ndev->stats.tx_bytes = bytes;
4234 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004235 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004236}
4237
Ron Mercerf2c05002010-07-05 12:19:37 +00004238void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004239{
4240 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004241 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004242 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004243
Ron Mercercc288f52009-02-23 10:42:14 +00004244 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4245 if (status)
4246 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004247 /*
4248 * Set or clear promiscuous mode if a
4249 * transition is taking place.
4250 */
4251 if (ndev->flags & IFF_PROMISC) {
4252 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4253 if (ql_set_routing_reg
4254 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004255 netif_err(qdev, hw, qdev->ndev,
4256 "Failed to set promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004257 } else {
4258 set_bit(QL_PROMISCUOUS, &qdev->flags);
4259 }
4260 }
4261 } else {
4262 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4263 if (ql_set_routing_reg
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004265 netif_err(qdev, hw, qdev->ndev,
4266 "Failed to clear promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004267 } else {
4268 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4269 }
4270 }
4271 }
4272
4273 /*
4274 * Set or clear all multicast mode if a
4275 * transition is taking place.
4276 */
4277 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004278 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004279 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4280 if (ql_set_routing_reg
4281 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004282 netif_err(qdev, hw, qdev->ndev,
4283 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004284 } else {
4285 set_bit(QL_ALLMULTI, &qdev->flags);
4286 }
4287 }
4288 } else {
4289 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004294 } else {
4295 clear_bit(QL_ALLMULTI, &qdev->flags);
4296 }
4297 }
4298 }
4299
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004300 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004301 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4302 if (status)
4303 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004304 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004305 netdev_for_each_mc_addr(ha, ndev) {
4306 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004307 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004308 netif_err(qdev, hw, qdev->ndev,
4309 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004310 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004311 goto exit;
4312 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004313 i++;
4314 }
Ron Mercercc288f52009-02-23 10:42:14 +00004315 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004316 if (ql_set_routing_reg
4317 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004318 netif_err(qdev, hw, qdev->ndev,
4319 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004320 } else {
4321 set_bit(QL_ALLMULTI, &qdev->flags);
4322 }
4323 }
4324exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004325 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004326}
4327
4328static int qlge_set_mac_address(struct net_device *ndev, void *p)
4329{
4330 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4331 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004332 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004334 if (!is_valid_ether_addr(addr->sa_data))
4335 return -EADDRNOTAVAIL;
4336 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004337 /* Update local copy of current mac address. */
4338 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004339
Ron Mercercc288f52009-02-23 10:42:14 +00004340 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4341 if (status)
4342 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004343 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4344 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004345 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004346 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004347 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4348 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349}
4350
4351static void qlge_tx_timeout(struct net_device *ndev)
4352{
4353 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004354 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004355}
4356
4357static void ql_asic_reset_work(struct work_struct *work)
4358{
4359 struct ql_adapter *qdev =
4360 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004361 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004362 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004363 status = ql_adapter_down(qdev);
4364 if (status)
4365 goto error;
4366
4367 status = ql_adapter_up(qdev);
4368 if (status)
4369 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004370
4371 /* Restore rx mode. */
4372 clear_bit(QL_ALLMULTI, &qdev->flags);
4373 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4374 qlge_set_multicast_list(qdev->ndev);
4375
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004376 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004377 return;
4378error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004379 netif_alert(qdev, ifup, qdev->ndev,
4380 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004381
Ron Mercerdb988122009-03-09 10:59:17 +00004382 set_bit(QL_ADAPTER_UP, &qdev->flags);
4383 dev_close(qdev->ndev);
4384 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004385}
4386
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004387static struct nic_operations qla8012_nic_ops = {
4388 .get_flash = ql_get_8012_flash_params,
4389 .port_initialize = ql_8012_port_initialize,
4390};
4391
Ron Mercercdca8d02009-03-02 08:07:31 +00004392static struct nic_operations qla8000_nic_ops = {
4393 .get_flash = ql_get_8000_flash_params,
4394 .port_initialize = ql_8000_port_initialize,
4395};
4396
Ron Mercere4552f52009-06-09 05:39:32 +00004397/* Find the pcie function number for the other NIC
4398 * on this chip. Since both NIC functions share a
4399 * common firmware we have the lowest enabled function
4400 * do any common work. Examples would be resetting
4401 * after a fatal firmware error, or doing a firmware
4402 * coredump.
4403 */
4404static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004405{
Ron Mercere4552f52009-06-09 05:39:32 +00004406 int status = 0;
4407 u32 temp;
4408 u32 nic_func1, nic_func2;
4409
4410 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4411 &temp);
4412 if (status)
4413 return status;
4414
4415 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4416 MPI_TEST_NIC_FUNC_MASK);
4417 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4418 MPI_TEST_NIC_FUNC_MASK);
4419
4420 if (qdev->func == nic_func1)
4421 qdev->alt_func = nic_func2;
4422 else if (qdev->func == nic_func2)
4423 qdev->alt_func = nic_func1;
4424 else
4425 status = -EIO;
4426
4427 return status;
4428}
4429
4430static int ql_get_board_info(struct ql_adapter *qdev)
4431{
4432 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004433 qdev->func =
4434 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004435 if (qdev->func > 3)
4436 return -EIO;
4437
4438 status = ql_get_alt_pcie_func(qdev);
4439 if (status)
4440 return status;
4441
4442 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4443 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004444 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4445 qdev->port_link_up = STS_PL1;
4446 qdev->port_init = STS_PI1;
4447 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4448 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4449 } else {
4450 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4451 qdev->port_link_up = STS_PL0;
4452 qdev->port_init = STS_PI0;
4453 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4454 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4455 }
4456 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004457 qdev->device_id = qdev->pdev->device;
4458 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4459 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004460 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4461 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004462 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004463}
4464
4465static void ql_release_all(struct pci_dev *pdev)
4466{
4467 struct net_device *ndev = pci_get_drvdata(pdev);
4468 struct ql_adapter *qdev = netdev_priv(ndev);
4469
4470 if (qdev->workqueue) {
4471 destroy_workqueue(qdev->workqueue);
4472 qdev->workqueue = NULL;
4473 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004474
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004475 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004476 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004477 if (qdev->doorbell_area)
4478 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004479 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004480 pci_release_regions(pdev);
4481 pci_set_drvdata(pdev, NULL);
4482}
4483
4484static int __devinit ql_init_device(struct pci_dev *pdev,
4485 struct net_device *ndev, int cards_found)
4486{
4487 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004488 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004489
Ron Mercere3324712009-07-02 06:06:13 +00004490 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004491 err = pci_enable_device(pdev);
4492 if (err) {
4493 dev_err(&pdev->dev, "PCI device enable failed.\n");
4494 return err;
4495 }
4496
Ron Mercerebd6e772009-09-29 08:39:25 +00004497 qdev->ndev = ndev;
4498 qdev->pdev = pdev;
4499 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004500
Ron Mercerbc9167f2009-10-10 09:35:04 +00004501 /* Set PCIe read request size */
4502 err = pcie_set_readrq(pdev, 4096);
4503 if (err) {
4504 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004505 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004506 }
4507
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004508 err = pci_request_regions(pdev, DRV_NAME);
4509 if (err) {
4510 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004511 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004512 }
4513
4514 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004515 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004516 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004517 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004519 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004520 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004521 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004522 }
4523
4524 if (err) {
4525 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004526 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004527 }
4528
Ron Mercer73475332009-11-06 07:44:58 +00004529 /* Set PCIe reset type for EEH to fundamental. */
4530 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004531 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004532 qdev->reg_base =
4533 ioremap_nocache(pci_resource_start(pdev, 1),
4534 pci_resource_len(pdev, 1));
4535 if (!qdev->reg_base) {
4536 dev_err(&pdev->dev, "Register mapping failed.\n");
4537 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004538 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004539 }
4540
4541 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4542 qdev->doorbell_area =
4543 ioremap_nocache(pci_resource_start(pdev, 3),
4544 pci_resource_len(pdev, 3));
4545 if (!qdev->doorbell_area) {
4546 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4547 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004548 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549 }
4550
Ron Mercere4552f52009-06-09 05:39:32 +00004551 err = ql_get_board_info(qdev);
4552 if (err) {
4553 dev_err(&pdev->dev, "Register access failed.\n");
4554 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004555 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004556 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004557 qdev->msg_enable = netif_msg_init(debug, default_msg);
4558 spin_lock_init(&qdev->hw_lock);
4559 spin_lock_init(&qdev->stats_lock);
4560
Ron Mercer8aae2602010-01-15 13:31:28 +00004561 if (qlge_mpi_coredump) {
4562 qdev->mpi_coredump =
4563 vmalloc(sizeof(struct ql_mpi_coredump));
4564 if (qdev->mpi_coredump == NULL) {
4565 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4566 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004567 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004568 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004569 if (qlge_force_coredump)
4570 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004571 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004573 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574 if (err) {
4575 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004576 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004577 }
4578
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004580 /* Keep local copy of current mac address. */
4581 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582
4583 /* Set up the default ring sizes. */
4584 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4585 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4586
4587 /* Set up the coalescing parameters. */
4588 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4589 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4590 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4591 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4592
4593 /*
4594 * Set up the operating parameters.
4595 */
4596 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004597 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4598 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4599 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004601 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004602 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004603 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004604 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004605
4606 if (!cards_found) {
4607 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4608 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4609 DRV_NAME, DRV_VERSION);
4610 }
4611 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004612err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004613 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004614err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004615 pci_disable_device(pdev);
4616 return err;
4617}
4618
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004619static const struct net_device_ops qlge_netdev_ops = {
4620 .ndo_open = qlge_open,
4621 .ndo_stop = qlge_close,
4622 .ndo_start_xmit = qlge_send,
4623 .ndo_change_mtu = qlge_change_mtu,
4624 .ndo_get_stats = qlge_get_stats,
4625 .ndo_set_multicast_list = qlge_set_multicast_list,
4626 .ndo_set_mac_address = qlge_set_mac_address,
4627 .ndo_validate_addr = eth_validate_addr,
4628 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004629 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4630 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4631 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004632};
4633
Ron Mercer15c052f2010-02-04 13:32:46 -08004634static void ql_timer(unsigned long data)
4635{
4636 struct ql_adapter *qdev = (struct ql_adapter *)data;
4637 u32 var = 0;
4638
4639 var = ql_read32(qdev, STS);
4640 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004641 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004642 return;
4643 }
4644
4645 qdev->timer.expires = jiffies + (5*HZ);
4646 add_timer(&qdev->timer);
4647}
4648
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004649static int __devinit qlge_probe(struct pci_dev *pdev,
4650 const struct pci_device_id *pci_entry)
4651{
4652 struct net_device *ndev = NULL;
4653 struct ql_adapter *qdev = NULL;
4654 static int cards_found = 0;
4655 int err = 0;
4656
Ron Mercer1e213302009-03-09 10:59:21 +00004657 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4658 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004659 if (!ndev)
4660 return -ENOMEM;
4661
4662 err = ql_init_device(pdev, ndev, cards_found);
4663 if (err < 0) {
4664 free_netdev(ndev);
4665 return err;
4666 }
4667
4668 qdev = netdev_priv(ndev);
4669 SET_NETDEV_DEV(ndev, &pdev->dev);
4670 ndev->features = (0
4671 | NETIF_F_IP_CSUM
4672 | NETIF_F_SG
4673 | NETIF_F_TSO
4674 | NETIF_F_TSO6
4675 | NETIF_F_TSO_ECN
4676 | NETIF_F_HW_VLAN_TX
4677 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004678 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004679
4680 if (test_bit(QL_DMA64, &qdev->flags))
4681 ndev->features |= NETIF_F_HIGHDMA;
4682
4683 /*
4684 * Set up net_device structure.
4685 */
4686 ndev->tx_queue_len = qdev->tx_ring_size;
4687 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004688
4689 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004690 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004691 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004692
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004693 err = register_netdev(ndev);
4694 if (err) {
4695 dev_err(&pdev->dev, "net device registration failed.\n");
4696 ql_release_all(pdev);
4697 pci_disable_device(pdev);
4698 return err;
4699 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004700 /* Start up the timer to trigger EEH if
4701 * the bus goes dead
4702 */
4703 init_timer_deferrable(&qdev->timer);
4704 qdev->timer.data = (unsigned long)qdev;
4705 qdev->timer.function = ql_timer;
4706 qdev->timer.expires = jiffies + (5*HZ);
4707 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004708 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004709 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004710 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004711 cards_found++;
4712 return 0;
4713}
4714
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004715netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4716{
4717 return qlge_send(skb, ndev);
4718}
4719
4720int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4721{
4722 return ql_clean_inbound_rx_ring(rx_ring, budget);
4723}
4724
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004725static void __devexit qlge_remove(struct pci_dev *pdev)
4726{
4727 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004728 struct ql_adapter *qdev = netdev_priv(ndev);
4729 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004730 unregister_netdev(ndev);
4731 ql_release_all(pdev);
4732 pci_disable_device(pdev);
4733 free_netdev(ndev);
4734}
4735
Ron Mercer6d190c62009-10-28 08:39:20 +00004736/* Clean up resources without touching hardware. */
4737static void ql_eeh_close(struct net_device *ndev)
4738{
4739 int i;
4740 struct ql_adapter *qdev = netdev_priv(ndev);
4741
4742 if (netif_carrier_ok(ndev)) {
4743 netif_carrier_off(ndev);
4744 netif_stop_queue(ndev);
4745 }
4746
4747 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4748 cancel_delayed_work_sync(&qdev->asic_reset_work);
4749 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4750 cancel_delayed_work_sync(&qdev->mpi_work);
4751 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004752 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercer6d190c62009-10-28 08:39:20 +00004753 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4754
4755 for (i = 0; i < qdev->rss_ring_count; i++)
4756 netif_napi_del(&qdev->rx_ring[i].napi);
4757
4758 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4759 ql_tx_ring_clean(qdev);
4760 ql_free_rx_buffers(qdev);
4761 ql_release_adapter_resources(qdev);
4762}
4763
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004764/*
4765 * This callback is called by the PCI subsystem whenever
4766 * a PCI bus error is detected.
4767 */
4768static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4769 enum pci_channel_state state)
4770{
4771 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004772 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004773
Ron Mercer6d190c62009-10-28 08:39:20 +00004774 switch (state) {
4775 case pci_channel_io_normal:
4776 return PCI_ERS_RESULT_CAN_RECOVER;
4777 case pci_channel_io_frozen:
4778 netif_device_detach(ndev);
4779 if (netif_running(ndev))
4780 ql_eeh_close(ndev);
4781 pci_disable_device(pdev);
4782 return PCI_ERS_RESULT_NEED_RESET;
4783 case pci_channel_io_perm_failure:
4784 dev_err(&pdev->dev,
4785 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004786 ql_eeh_close(ndev);
4787 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004788 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004789 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004790
4791 /* Request a slot reset. */
4792 return PCI_ERS_RESULT_NEED_RESET;
4793}
4794
4795/*
4796 * This callback is called after the PCI buss has been reset.
4797 * Basically, this tries to restart the card from scratch.
4798 * This is a shortened version of the device probe/discovery code,
4799 * it resembles the first-half of the () routine.
4800 */
4801static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4802{
4803 struct net_device *ndev = pci_get_drvdata(pdev);
4804 struct ql_adapter *qdev = netdev_priv(ndev);
4805
Ron Mercer6d190c62009-10-28 08:39:20 +00004806 pdev->error_state = pci_channel_io_normal;
4807
4808 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004809 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004810 netif_err(qdev, ifup, qdev->ndev,
4811 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004812 return PCI_ERS_RESULT_DISCONNECT;
4813 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004814 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004815
4816 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004817 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004818 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004819 return PCI_ERS_RESULT_DISCONNECT;
4820 }
4821
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004822 return PCI_ERS_RESULT_RECOVERED;
4823}
4824
4825static void qlge_io_resume(struct pci_dev *pdev)
4826{
4827 struct net_device *ndev = pci_get_drvdata(pdev);
4828 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004829 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004830
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004831 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004832 err = qlge_open(ndev);
4833 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004834 netif_err(qdev, ifup, qdev->ndev,
4835 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004836 return;
4837 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004838 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004839 netif_err(qdev, ifup, qdev->ndev,
4840 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004841 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004842 qdev->timer.expires = jiffies + (5*HZ);
4843 add_timer(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004844 netif_device_attach(ndev);
4845}
4846
4847static struct pci_error_handlers qlge_err_handler = {
4848 .error_detected = qlge_io_error_detected,
4849 .slot_reset = qlge_io_slot_reset,
4850 .resume = qlge_io_resume,
4851};
4852
4853static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4854{
4855 struct net_device *ndev = pci_get_drvdata(pdev);
4856 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004857 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004858
4859 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004860 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004861
4862 if (netif_running(ndev)) {
4863 err = ql_adapter_down(qdev);
4864 if (!err)
4865 return err;
4866 }
4867
Ron Mercerbc083ce2009-10-21 11:07:40 +00004868 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004869 err = pci_save_state(pdev);
4870 if (err)
4871 return err;
4872
4873 pci_disable_device(pdev);
4874
4875 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4876
4877 return 0;
4878}
4879
David S. Miller04da2cf2008-09-19 16:14:24 -07004880#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004881static int qlge_resume(struct pci_dev *pdev)
4882{
4883 struct net_device *ndev = pci_get_drvdata(pdev);
4884 struct ql_adapter *qdev = netdev_priv(ndev);
4885 int err;
4886
4887 pci_set_power_state(pdev, PCI_D0);
4888 pci_restore_state(pdev);
4889 err = pci_enable_device(pdev);
4890 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004891 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004892 return err;
4893 }
4894 pci_set_master(pdev);
4895
4896 pci_enable_wake(pdev, PCI_D3hot, 0);
4897 pci_enable_wake(pdev, PCI_D3cold, 0);
4898
4899 if (netif_running(ndev)) {
4900 err = ql_adapter_up(qdev);
4901 if (err)
4902 return err;
4903 }
4904
Ron Mercer15c052f2010-02-04 13:32:46 -08004905 qdev->timer.expires = jiffies + (5*HZ);
4906 add_timer(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004907 netif_device_attach(ndev);
4908
4909 return 0;
4910}
David S. Miller04da2cf2008-09-19 16:14:24 -07004911#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004912
4913static void qlge_shutdown(struct pci_dev *pdev)
4914{
4915 qlge_suspend(pdev, PMSG_SUSPEND);
4916}
4917
4918static struct pci_driver qlge_driver = {
4919 .name = DRV_NAME,
4920 .id_table = qlge_pci_tbl,
4921 .probe = qlge_probe,
4922 .remove = __devexit_p(qlge_remove),
4923#ifdef CONFIG_PM
4924 .suspend = qlge_suspend,
4925 .resume = qlge_resume,
4926#endif
4927 .shutdown = qlge_shutdown,
4928 .err_handler = &qlge_err_handler
4929};
4930
4931static int __init qlge_init_module(void)
4932{
4933 return pci_register_driver(&qlge_driver);
4934}
4935
4936static void __exit qlge_exit(void)
4937{
4938 pci_unregister_driver(&qlge_driver);
4939}
4940
4941module_init(qlge_init_module);
4942module_exit(qlge_exit);