blob: ccba01f5a2604d2eeed7d13a17788fe8195886d4 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
Jiri Pirkof859d7c2011-07-20 04:54:14 +000018#include <linux/bitops.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070019#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040027#include <linux/prefetch.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070028
29#include "bnad.h"
30#include "bna.h"
31#include "cna.h"
32
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000033static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070034
35/*
36 * Module params
37 */
38static uint bnad_msix_disable;
39module_param(bnad_msix_disable, uint, 0444);
40MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42static uint bnad_ioc_auto_recover = 1;
43module_param(bnad_ioc_auto_recover, uint, 0444);
44MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46/*
47 * Global variables
48 */
49u32 bnad_rxqs_per_cq = 2;
50
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000051static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Rasesh Mody8b230ed2010-08-23 20:24:12 -070052
53/*
54 * Local MACROS
55 */
56#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60#define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
Rasesh Mody8811e262011-07-22 08:07:44 +000062 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
Rasesh Mody8b230ed2010-08-23 20:24:12 -070063 ((_bnad)->pcidev->irq))
64
65#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73} while (0)
74
Rasesh Modybe7fa322010-12-23 21:45:01 +000075#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76
Rasesh Mody8b230ed2010-08-23 20:24:12 -070077/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
81bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82{
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
86
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
89
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
99 }
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
102 }
103}
104
Rasesh Mody271e8b72011-08-30 15:27:40 +0000105static u32
106bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108{
109 int j;
110 array[index].skb = NULL;
111
112 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&array[index], dma_addr, 0);
115 BNA_QE_INDX_ADD(index, 1, depth);
116
117 for (j = 0; j < frag; j++) {
118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119 skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120 dma_unmap_addr_set(&array[index], dma_addr, 0);
121 BNA_QE_INDX_ADD(index, 1, depth);
122 }
123
124 return index;
125}
126
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700127/*
128 * Frees all pending Tx Bufs
129 * At this point no activity is expected on the Q,
130 * so DMA unmap & freeing is fine.
131 */
132static void
133bnad_free_all_txbufs(struct bnad *bnad,
134 struct bna_tcb *tcb)
135{
Rasesh Mody0120b992011-07-22 08:07:41 +0000136 u32 unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700137 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138 struct bnad_skb_unmap *unmap_array;
Rasesh Mody0120b992011-07-22 08:07:41 +0000139 struct sk_buff *skb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700140 int i;
141
142 unmap_array = unmap_q->unmap_array;
143
144 unmap_cons = 0;
145 while (unmap_cons < unmap_q->q_depth) {
146 skb = unmap_array[unmap_cons].skb;
147 if (!skb) {
148 unmap_cons++;
149 continue;
150 }
151 unmap_array[unmap_cons].skb = NULL;
152
Ivan Vecera5ea74312011-02-02 04:37:02 +0000153 dma_unmap_single(&bnad->pcidev->dev,
154 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700155 dma_addr), skb_headlen(skb),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000156 DMA_TO_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700157
Ivan Vecera5ea74312011-02-02 04:37:02 +0000158 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000159 if (++unmap_cons >= unmap_q->q_depth)
160 break;
161
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700162 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000163 dma_unmap_page(&bnad->pcidev->dev,
164 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700165 dma_addr),
166 skb_shinfo(skb)->frags[i].size,
Ivan Vecera5ea74312011-02-02 04:37:02 +0000167 DMA_TO_DEVICE);
168 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700169 0);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000170 if (++unmap_cons >= unmap_q->q_depth)
171 break;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700172 }
173 dev_kfree_skb_any(skb);
174 }
175}
176
177/* Data Path Handlers */
178
179/*
180 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
181 * Can be called in a) Interrupt context
182 * b) Sending context
183 * c) Tasklet context
184 */
185static u32
186bnad_free_txbufs(struct bnad *bnad,
187 struct bna_tcb *tcb)
188{
Rasesh Mody271e8b72011-08-30 15:27:40 +0000189 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
190 u16 wis, updated_hw_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700191 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
192 struct bnad_skb_unmap *unmap_array;
Rasesh Mody0120b992011-07-22 08:07:41 +0000193 struct sk_buff *skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700194
195 /*
196 * Just return if TX is stopped. This check is useful
197 * when bnad_free_txbufs() runs out of a tasklet scheduled
Rasesh Modybe7fa322010-12-23 21:45:01 +0000198 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700199 * but this routine runs actually after the cleanup has been
200 * executed.
201 */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000202 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700203 return 0;
204
205 updated_hw_cons = *(tcb->hw_consumer_index);
206
207 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
208 updated_hw_cons, tcb->q_depth);
209
210 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
211
212 unmap_array = unmap_q->unmap_array;
213 unmap_cons = unmap_q->consumer_index;
214
215 prefetch(&unmap_array[unmap_cons + 1]);
216 while (wis) {
217 skb = unmap_array[unmap_cons].skb;
218
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700219 sent_packets++;
220 sent_bytes += skb->len;
221 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
222
Rasesh Mody271e8b72011-08-30 15:27:40 +0000223 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
224 unmap_cons, unmap_q->q_depth, skb,
225 skb_shinfo(skb)->nr_frags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700226
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700227 dev_kfree_skb_any(skb);
228 }
229
230 /* Update consumer pointers. */
231 tcb->consumer_index = updated_hw_cons;
232 unmap_q->consumer_index = unmap_cons;
233
234 tcb->txq->tx_packets += sent_packets;
235 tcb->txq->tx_bytes += sent_bytes;
236
237 return sent_packets;
238}
239
240/* Tx Free Tasklet function */
241/* Frees for all the tcb's in all the Tx's */
242/*
243 * Scheduled from sending context, so that
244 * the fat Tx lock is not held for too long
245 * in the sending context.
246 */
247static void
248bnad_tx_free_tasklet(unsigned long bnad_ptr)
249{
250 struct bnad *bnad = (struct bnad *)bnad_ptr;
251 struct bna_tcb *tcb;
Rasesh Mody0120b992011-07-22 08:07:41 +0000252 u32 acked = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700253 int i, j;
254
255 for (i = 0; i < bnad->num_tx; i++) {
256 for (j = 0; j < bnad->num_txq_per_tx; j++) {
257 tcb = bnad->tx_info[i].tcb[j];
258 if (!tcb)
259 continue;
260 if (((u16) (*tcb->hw_consumer_index) !=
261 tcb->consumer_index) &&
262 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
263 &tcb->flags))) {
264 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000265 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
266 &tcb->flags)))
267 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700268 smp_mb__before_clear_bit();
269 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
270 }
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000271 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 continue;
274 if (netif_queue_stopped(bnad->netdev)) {
275 if (acked && netif_carrier_ok(bnad->netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(bnad->netdev);
279 /* TODO */
280 /* Counters for individual TxQs? */
281 BNAD_UPDATE_CTR(bnad,
282 netif_queue_wakeup);
283 }
284 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700285 }
286 }
287}
288
289static u32
290bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
291{
292 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000293 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700294
295 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
296 return 0;
297
298 sent = bnad_free_txbufs(bnad, tcb);
299 if (sent) {
300 if (netif_queue_stopped(netdev) &&
301 netif_carrier_ok(netdev) &&
302 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
303 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000304 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
305 netif_wake_queue(netdev);
306 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
307 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700308 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000309 }
310
311 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700312 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700313
314 smp_mb__before_clear_bit();
315 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
316
317 return sent;
318}
319
320/* MSIX Tx Completion Handler */
321static irqreturn_t
322bnad_msix_tx(int irq, void *data)
323{
324 struct bna_tcb *tcb = (struct bna_tcb *)data;
325 struct bnad *bnad = tcb->bnad;
326
327 bnad_tx(bnad, tcb);
328
329 return IRQ_HANDLED;
330}
331
332static void
333bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
334{
335 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
336
337 rcb->producer_index = 0;
338 rcb->consumer_index = 0;
339
340 unmap_q->producer_index = 0;
341 unmap_q->consumer_index = 0;
342}
343
344static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000345bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700346{
347 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000348 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700349 struct sk_buff *skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000350 int unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700351
352 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000353 unmap_array = unmap_q->unmap_array;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000354 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000355 skb = unmap_array[unmap_cons].skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000356 if (!skb)
357 continue;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000358 unmap_array[unmap_cons].skb = NULL;
359 dma_unmap_single(&bnad->pcidev->dev,
360 dma_unmap_addr(&unmap_array[unmap_cons],
361 dma_addr),
362 rcb->rxq->buffer_size,
363 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700364 dev_kfree_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700365 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700366 bnad_reset_rcb(bnad, rcb);
367}
368
369static void
370bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
371{
372 u16 to_alloc, alloced, unmap_prod, wi_range;
373 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
374 struct bnad_skb_unmap *unmap_array;
375 struct bna_rxq_entry *rxent;
376 struct sk_buff *skb;
377 dma_addr_t dma_addr;
378
379 alloced = 0;
380 to_alloc =
381 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
382
383 unmap_array = unmap_q->unmap_array;
384 unmap_prod = unmap_q->producer_index;
385
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
387
388 while (to_alloc--) {
389 if (!wi_range) {
390 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
391 wi_range);
392 }
Eric Dumazet0a0e2342011-07-08 05:29:30 +0000393 skb = netdev_alloc_skb_ip_align(bnad->netdev,
394 rcb->rxq->buffer_size);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700395 if (unlikely(!skb)) {
396 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
397 goto finishing;
398 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700399 unmap_array[unmap_prod].skb = skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000400 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
401 rcb->rxq->buffer_size,
402 DMA_FROM_DEVICE);
403 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700404 dma_addr);
405 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
406 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
407
408 rxent++;
409 wi_range--;
410 alloced++;
411 }
412
413finishing:
414 if (likely(alloced)) {
415 unmap_q->producer_index = unmap_prod;
416 rcb->producer_index = unmap_prod;
417 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000418 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
419 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700420 }
421}
422
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700423static inline void
424bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
425{
426 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
427
428 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
429 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
430 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
431 bnad_alloc_n_post_rxbufs(bnad, rcb);
432 smp_mb__before_clear_bit();
433 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
434 }
435}
436
437static u32
438bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
439{
440 struct bna_cq_entry *cmpl, *next_cmpl;
441 struct bna_rcb *rcb = NULL;
442 unsigned int wi_range, packets = 0, wis = 0;
443 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000444 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700445 struct sk_buff *skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000446 u32 flags, unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700447 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
Rasesh Mody078086f2011-08-08 16:21:39 +0000448 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700449
Rasesh Mody078086f2011-08-08 16:21:39 +0000450 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
451
452 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
453 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000454 return 0;
Rasesh Mody078086f2011-08-08 16:21:39 +0000455 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000456
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700457 prefetch(bnad->netdev);
458 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
459 wi_range);
460 BUG_ON(!(wi_range <= ccb->q_depth));
461 while (cmpl->valid && packets < budget) {
462 packets++;
463 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
464
Rasesh Mody078086f2011-08-08 16:21:39 +0000465 if (bna_is_small_rxq(cmpl->rxq_id))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700466 rcb = ccb->rcb[1];
Rasesh Mody078086f2011-08-08 16:21:39 +0000467 else
468 rcb = ccb->rcb[0];
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700469
470 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000471 unmap_array = unmap_q->unmap_array;
472 unmap_cons = unmap_q->consumer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700473
Ivan Vecera5ea74312011-02-02 04:37:02 +0000474 skb = unmap_array[unmap_cons].skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700475 BUG_ON(!(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +0000476 unmap_array[unmap_cons].skb = NULL;
477 dma_unmap_single(&bnad->pcidev->dev,
478 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700479 dma_addr),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000480 rcb->rxq->buffer_size,
481 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700482 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
483
484 /* Should be more efficient ? Performance ? */
485 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
486
487 wis++;
488 if (likely(--wi_range))
489 next_cmpl = cmpl + 1;
490 else {
491 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
492 wis = 0;
493 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
494 next_cmpl, wi_range);
495 BUG_ON(!(wi_range <= ccb->q_depth));
496 }
497 prefetch(next_cmpl);
498
499 flags = ntohl(cmpl->flags);
500 if (unlikely
501 (flags &
502 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
503 BNA_CQ_EF_TOO_LONG))) {
504 dev_kfree_skb_any(skb);
505 rcb->rxq->rx_packets_with_error++;
506 goto next;
507 }
508
509 skb_put(skb, ntohs(cmpl->length));
510 if (likely
Michał Mirosławe5ee20e2011-04-12 09:38:23 +0000511 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700512 (((flags & BNA_CQ_EF_IPV4) &&
513 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
514 (flags & BNA_CQ_EF_IPV6)) &&
515 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
516 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
517 skb->ip_summed = CHECKSUM_UNNECESSARY;
518 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700519 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700520
521 rcb->rxq->rx_packets++;
522 rcb->rxq->rx_bytes += skb->len;
523 skb->protocol = eth_type_trans(skb, bnad->netdev);
524
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000525 if (flags & BNA_CQ_EF_VLAN)
526 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700527
Rasesh Mody078086f2011-08-08 16:21:39 +0000528 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000529 napi_gro_receive(&rx_ctrl->napi, skb);
Rasesh Mody078086f2011-08-08 16:21:39 +0000530 else {
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000531 netif_receive_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700532 }
533
534next:
535 cmpl->valid = 0;
536 cmpl = next_cmpl;
537 }
538
539 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
540
Rasesh Mody2be67142011-08-30 15:27:39 +0000541 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
Rasesh Mody271e8b72011-08-30 15:27:40 +0000542 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
543
Rasesh Mody2be67142011-08-30 15:27:39 +0000544 bnad_refill_rxq(bnad, ccb->rcb[0]);
545 if (ccb->rcb[1])
546 bnad_refill_rxq(bnad, ccb->rcb[1]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700547
Rasesh Mody078086f2011-08-08 16:21:39 +0000548 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
549
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700550 return packets;
551}
552
553static void
554bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
555{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000556 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
557 return;
558
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700559 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
560 bna_ib_ack(ccb->i_dbell, 0);
561}
562
563static void
564bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
565{
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000566 unsigned long flags;
567
Rasesh Modyaad75b62010-12-23 21:45:08 +0000568 /* Because of polling context */
569 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700570 bnad_enable_rx_irq_unsafe(ccb);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000571 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700572}
573
574static void
575bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
576{
577 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000578 struct napi_struct *napi = &rx_ctrl->napi;
579
580 if (likely(napi_schedule_prep(napi))) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000581 __napi_schedule(napi);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000582 rx_ctrl->rx_schedule++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700583 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700584}
585
586/* MSIX Rx Path Handler */
587static irqreturn_t
588bnad_msix_rx(int irq, void *data)
589{
590 struct bna_ccb *ccb = (struct bna_ccb *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700591
Rasesh Mody271e8b72011-08-30 15:27:40 +0000592 if (ccb) {
593 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
Rasesh Mody2be67142011-08-30 15:27:39 +0000594 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000595 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700596
597 return IRQ_HANDLED;
598}
599
600/* Interrupt handlers */
601
602/* Mbox Interrupt Handlers */
603static irqreturn_t
604bnad_msix_mbox_handler(int irq, void *data)
605{
606 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000607 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000608 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700609
Rasesh Modybe7fa322010-12-23 21:45:01 +0000610 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
611 return IRQ_HANDLED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700612
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700613 spin_lock_irqsave(&bnad->bna_lock, flags);
614
615 bna_intr_status_get(&bnad->bna, intr_status);
616
Rasesh Mody078086f2011-08-08 16:21:39 +0000617 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700618 bna_mbox_handler(&bnad->bna, intr_status);
619
620 spin_unlock_irqrestore(&bnad->bna_lock, flags);
621
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700622 return IRQ_HANDLED;
623}
624
625static irqreturn_t
626bnad_isr(int irq, void *data)
627{
628 int i, j;
629 u32 intr_status;
630 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000631 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700632 struct bnad_rx_info *rx_info;
633 struct bnad_rx_ctrl *rx_ctrl;
Rasesh Mody078086f2011-08-08 16:21:39 +0000634 struct bna_tcb *tcb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700635
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000636 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
637 return IRQ_NONE;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700638
639 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000640
641 if (unlikely(!intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700642 return IRQ_NONE;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000643
644 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700645
Rasesh Mody078086f2011-08-08 16:21:39 +0000646 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700647 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000648
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700649 spin_unlock_irqrestore(&bnad->bna_lock, flags);
650
Rasesh Modybe7fa322010-12-23 21:45:01 +0000651 if (!BNA_IS_INTX_DATA_INTR(intr_status))
652 return IRQ_HANDLED;
653
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700654 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000655 /* Tx processing */
656 for (i = 0; i < bnad->num_tx; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000657 for (j = 0; j < bnad->num_txq_per_tx; j++) {
658 tcb = bnad->tx_info[i].tcb[j];
659 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
660 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
661 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000662 }
663 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700664 for (i = 0; i < bnad->num_rx; i++) {
665 rx_info = &bnad->rx_info[i];
666 if (!rx_info->rx)
667 continue;
668 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
669 rx_ctrl = &rx_info->rx_ctrl[j];
670 if (rx_ctrl->ccb)
671 bnad_netif_rx_schedule_poll(bnad,
672 rx_ctrl->ccb);
673 }
674 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700675 return IRQ_HANDLED;
676}
677
678/*
679 * Called in interrupt / callback context
680 * with bna_lock held, so cfg_flags access is OK
681 */
682static void
683bnad_enable_mbox_irq(struct bnad *bnad)
684{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000685 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000686
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700687 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
688}
689
690/*
691 * Called with bnad->bna_lock held b'cos of
692 * bnad->cfg_flags access.
693 */
Rasesh Modyb7ee31c52010-10-05 15:46:05 +0000694static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700695bnad_disable_mbox_irq(struct bnad *bnad)
696{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000697 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000698
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700699 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
700}
701
Rasesh Modybe7fa322010-12-23 21:45:01 +0000702static void
703bnad_set_netdev_perm_addr(struct bnad *bnad)
704{
705 struct net_device *netdev = bnad->netdev;
706
707 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
708 if (is_zero_ether_addr(netdev->dev_addr))
709 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
710}
711
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700712/* Control Path Handlers */
713
714/* Callbacks */
715void
Rasesh Mody078086f2011-08-08 16:21:39 +0000716bnad_cb_mbox_intr_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700717{
718 bnad_enable_mbox_irq(bnad);
719}
720
721void
Rasesh Mody078086f2011-08-08 16:21:39 +0000722bnad_cb_mbox_intr_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700723{
724 bnad_disable_mbox_irq(bnad);
725}
726
727void
Rasesh Mody078086f2011-08-08 16:21:39 +0000728bnad_cb_ioceth_ready(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700729{
Rasesh Mody078086f2011-08-08 16:21:39 +0000730 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700731 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700732}
733
734void
Rasesh Mody078086f2011-08-08 16:21:39 +0000735bnad_cb_ioceth_failed(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700736{
Rasesh Mody078086f2011-08-08 16:21:39 +0000737 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700738 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody078086f2011-08-08 16:21:39 +0000739}
740
741void
742bnad_cb_ioceth_disabled(struct bnad *bnad)
743{
744 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
745 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700746}
747
748static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000749bnad_cb_enet_disabled(void *arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700750{
751 struct bnad *bnad = (struct bnad *)arg;
752
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700753 netif_carrier_off(bnad->netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +0000754 complete(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700755}
756
757void
Rasesh Mody078086f2011-08-08 16:21:39 +0000758bnad_cb_ethport_link_status(struct bnad *bnad,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700759 enum bna_link_status link_status)
760{
761 bool link_up = 0;
762
763 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
764
765 if (link_status == BNA_CEE_UP) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000766 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
767 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700768 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000769 } else {
770 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
771 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700772 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000773 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700774
775 if (link_up) {
776 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000777 uint tx_id, tcb_id;
778 printk(KERN_WARNING "bna: %s link up\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700779 bnad->netdev->name);
780 netif_carrier_on(bnad->netdev);
781 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Mody078086f2011-08-08 16:21:39 +0000782 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
783 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
784 tcb_id++) {
785 struct bna_tcb *tcb =
786 bnad->tx_info[tx_id].tcb[tcb_id];
787 u32 txq_id;
788 if (!tcb)
789 continue;
790
791 txq_id = tcb->id;
792
793 if (test_bit(BNAD_TXQ_TX_STARTED,
794 &tcb->flags)) {
795 /*
796 * Force an immediate
797 * Transmit Schedule */
798 printk(KERN_INFO "bna: %s %d "
799 "TXQ_STARTED\n",
800 bnad->netdev->name,
801 txq_id);
802 netif_wake_subqueue(
803 bnad->netdev,
804 txq_id);
805 BNAD_UPDATE_CTR(bnad,
806 netif_queue_wakeup);
807 } else {
808 netif_stop_subqueue(
809 bnad->netdev,
810 txq_id);
811 BNAD_UPDATE_CTR(bnad,
812 netif_queue_stop);
813 }
814 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700815 }
816 }
817 } else {
818 if (netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000819 printk(KERN_WARNING "bna: %s link down\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700820 bnad->netdev->name);
821 netif_carrier_off(bnad->netdev);
822 BNAD_UPDATE_CTR(bnad, link_toggle);
823 }
824 }
825}
826
827static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000828bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700829{
830 struct bnad *bnad = (struct bnad *)arg;
831
832 complete(&bnad->bnad_completions.tx_comp);
833}
834
835static void
836bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
837{
838 struct bnad_tx_info *tx_info =
839 (struct bnad_tx_info *)tcb->txq->tx->priv;
840 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
841
842 tx_info->tcb[tcb->id] = tcb;
843 unmap_q->producer_index = 0;
844 unmap_q->consumer_index = 0;
845 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
846}
847
848static void
849bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
850{
851 struct bnad_tx_info *tx_info =
852 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000853 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
854
855 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
856 cpu_relax();
857
858 bnad_free_all_txbufs(bnad, tcb);
859
860 unmap_q->producer_index = 0;
861 unmap_q->consumer_index = 0;
862
863 smp_mb__before_clear_bit();
864 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700865
866 tx_info->tcb[tcb->id] = NULL;
867}
868
869static void
870bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
871{
872 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
873
874 unmap_q->producer_index = 0;
875 unmap_q->consumer_index = 0;
876 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
877}
878
879static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000880bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
881{
882 bnad_free_all_rxbufs(bnad, rcb);
883}
884
885static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700886bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
887{
888 struct bnad_rx_info *rx_info =
889 (struct bnad_rx_info *)ccb->cq->rx->priv;
890
891 rx_info->rx_ctrl[ccb->id].ccb = ccb;
892 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
893}
894
895static void
896bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
897{
898 struct bnad_rx_info *rx_info =
899 (struct bnad_rx_info *)ccb->cq->rx->priv;
900
901 rx_info->rx_ctrl[ccb->id].ccb = NULL;
902}
903
904static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000905bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700906{
907 struct bnad_tx_info *tx_info =
Rasesh Mody078086f2011-08-08 16:21:39 +0000908 (struct bnad_tx_info *)tx->priv;
909 struct bna_tcb *tcb;
910 u32 txq_id;
911 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700912
Rasesh Mody078086f2011-08-08 16:21:39 +0000913 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
914 tcb = tx_info->tcb[i];
915 if (!tcb)
916 continue;
917 txq_id = tcb->id;
918 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
919 netif_stop_subqueue(bnad->netdev, txq_id);
920 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
921 bnad->netdev->name, txq_id);
922 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700923}
924
925static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000926bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700927{
Rasesh Mody078086f2011-08-08 16:21:39 +0000928 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
929 struct bna_tcb *tcb;
930 struct bnad_unmap_q *unmap_q;
931 u32 txq_id;
932 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000933
Rasesh Mody078086f2011-08-08 16:21:39 +0000934 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
935 tcb = tx_info->tcb[i];
936 if (!tcb)
937 continue;
938 txq_id = tcb->id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700939
Rasesh Mody078086f2011-08-08 16:21:39 +0000940 unmap_q = tcb->unmap_q;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000941
Rasesh Mody078086f2011-08-08 16:21:39 +0000942 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
943 continue;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000944
Rasesh Mody078086f2011-08-08 16:21:39 +0000945 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
946 cpu_relax();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000947
Rasesh Mody078086f2011-08-08 16:21:39 +0000948 bnad_free_all_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000949
Rasesh Mody078086f2011-08-08 16:21:39 +0000950 unmap_q->producer_index = 0;
951 unmap_q->consumer_index = 0;
952
953 smp_mb__before_clear_bit();
954 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
955
956 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
957
958 if (netif_carrier_ok(bnad->netdev)) {
959 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
960 bnad->netdev->name, txq_id);
961 netif_wake_subqueue(bnad->netdev, txq_id);
962 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
963 }
964 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000965
966 /*
Rasesh Mody078086f2011-08-08 16:21:39 +0000967 * Workaround for first ioceth enable failure & we
Rasesh Modybe7fa322010-12-23 21:45:01 +0000968 * get a 0 MAC address. We try to get the MAC address
969 * again here.
970 */
971 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000972 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000973 bnad_set_netdev_perm_addr(bnad);
974 }
Rasesh Mody078086f2011-08-08 16:21:39 +0000975}
Rasesh Modybe7fa322010-12-23 21:45:01 +0000976
Rasesh Mody078086f2011-08-08 16:21:39 +0000977static void
978bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
979{
980 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
981 struct bna_tcb *tcb;
982 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000983
Rasesh Mody078086f2011-08-08 16:21:39 +0000984 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
985 tcb = tx_info->tcb[i];
986 if (!tcb)
987 continue;
988 }
989
990 mdelay(BNAD_TXRX_SYNC_MDELAY);
991 bna_tx_cleanup_complete(tx);
992}
993
994static void
995bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
996{
997 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
998 struct bna_ccb *ccb;
999 struct bnad_rx_ctrl *rx_ctrl;
1000 int i;
1001
1002 mdelay(BNAD_TXRX_SYNC_MDELAY);
1003
Rasesh Mody772b5232011-08-30 15:27:37 +00001004 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +00001005 rx_ctrl = &rx_info->rx_ctrl[i];
1006 ccb = rx_ctrl->ccb;
1007 if (!ccb)
1008 continue;
1009
1010 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1011
1012 if (ccb->rcb[1])
1013 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1014
1015 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1016 cpu_relax();
1017 }
1018
1019 bna_rx_cleanup_complete(rx);
1020}
1021
1022static void
1023bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1024{
1025 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1026 struct bna_ccb *ccb;
1027 struct bna_rcb *rcb;
1028 struct bnad_rx_ctrl *rx_ctrl;
1029 struct bnad_unmap_q *unmap_q;
1030 int i;
1031 int j;
1032
Rasesh Mody772b5232011-08-30 15:27:37 +00001033 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +00001034 rx_ctrl = &rx_info->rx_ctrl[i];
1035 ccb = rx_ctrl->ccb;
1036 if (!ccb)
1037 continue;
1038
1039 bnad_cq_cmpl_init(bnad, ccb);
1040
1041 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1042 rcb = ccb->rcb[j];
1043 if (!rcb)
1044 continue;
1045 bnad_free_all_rxbufs(bnad, rcb);
1046
1047 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1048 unmap_q = rcb->unmap_q;
1049
1050 /* Now allocate & post buffers for this RCB */
1051 /* !!Allocation in callback context */
1052 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1053 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1054 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1055 bnad_alloc_n_post_rxbufs(bnad, rcb);
1056 smp_mb__before_clear_bit();
1057 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1058 }
1059 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001060 }
1061}
1062
1063static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001064bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001065{
1066 struct bnad *bnad = (struct bnad *)arg;
1067
1068 complete(&bnad->bnad_completions.rx_comp);
1069}
1070
1071static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001072bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001073{
Rasesh Mody078086f2011-08-08 16:21:39 +00001074 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001075 complete(&bnad->bnad_completions.mcast_comp);
1076}
1077
1078void
1079bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1080 struct bna_stats *stats)
1081{
1082 if (status == BNA_CB_SUCCESS)
1083 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1084
1085 if (!netif_running(bnad->netdev) ||
1086 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1087 return;
1088
1089 mod_timer(&bnad->stats_timer,
1090 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1091}
1092
Rasesh Mody078086f2011-08-08 16:21:39 +00001093static void
1094bnad_cb_enet_mtu_set(struct bnad *bnad)
1095{
1096 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1097 complete(&bnad->bnad_completions.mtu_comp);
1098}
1099
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001100/* Resource allocation, free functions */
1101
1102static void
1103bnad_mem_free(struct bnad *bnad,
1104 struct bna_mem_info *mem_info)
1105{
1106 int i;
1107 dma_addr_t dma_pa;
1108
1109 if (mem_info->mdl == NULL)
1110 return;
1111
1112 for (i = 0; i < mem_info->num; i++) {
1113 if (mem_info->mdl[i].kva != NULL) {
1114 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1115 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1116 dma_pa);
Ivan Vecera5ea74312011-02-02 04:37:02 +00001117 dma_free_coherent(&bnad->pcidev->dev,
1118 mem_info->mdl[i].len,
1119 mem_info->mdl[i].kva, dma_pa);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001120 } else
1121 kfree(mem_info->mdl[i].kva);
1122 }
1123 }
1124 kfree(mem_info->mdl);
1125 mem_info->mdl = NULL;
1126}
1127
1128static int
1129bnad_mem_alloc(struct bnad *bnad,
1130 struct bna_mem_info *mem_info)
1131{
1132 int i;
1133 dma_addr_t dma_pa;
1134
1135 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1136 mem_info->mdl = NULL;
1137 return 0;
1138 }
1139
1140 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1141 GFP_KERNEL);
1142 if (mem_info->mdl == NULL)
1143 return -ENOMEM;
1144
1145 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1146 for (i = 0; i < mem_info->num; i++) {
1147 mem_info->mdl[i].len = mem_info->len;
1148 mem_info->mdl[i].kva =
Ivan Vecera5ea74312011-02-02 04:37:02 +00001149 dma_alloc_coherent(&bnad->pcidev->dev,
1150 mem_info->len, &dma_pa,
1151 GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001152
1153 if (mem_info->mdl[i].kva == NULL)
1154 goto err_return;
1155
1156 BNA_SET_DMA_ADDR(dma_pa,
1157 &(mem_info->mdl[i].dma));
1158 }
1159 } else {
1160 for (i = 0; i < mem_info->num; i++) {
1161 mem_info->mdl[i].len = mem_info->len;
1162 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1163 GFP_KERNEL);
1164 if (mem_info->mdl[i].kva == NULL)
1165 goto err_return;
1166 }
1167 }
1168
1169 return 0;
1170
1171err_return:
1172 bnad_mem_free(bnad, mem_info);
1173 return -ENOMEM;
1174}
1175
1176/* Free IRQ for Mailbox */
1177static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001178bnad_mbox_irq_free(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001179{
1180 int irq;
1181 unsigned long flags;
1182
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001183 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001184 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001185 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001186
1187 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001188 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001189}
1190
1191/*
1192 * Allocates IRQ for Mailbox, but keep it disabled
1193 * This will be enabled once we get the mbox enable callback
1194 * from bna
1195 */
1196static int
Rasesh Mody078086f2011-08-08 16:21:39 +00001197bnad_mbox_irq_alloc(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001198{
Rasesh Mody0120b992011-07-22 08:07:41 +00001199 int err = 0;
1200 unsigned long irq_flags, flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001201 u32 irq;
Rasesh Mody0120b992011-07-22 08:07:41 +00001202 irq_handler_t irq_handler;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001203
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001204 spin_lock_irqsave(&bnad->bna_lock, flags);
1205 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1206 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
Rasesh Mody8811e262011-07-22 08:07:44 +00001207 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Shyam Iyer82791712011-07-14 15:00:32 +00001208 irq_flags = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001209 } else {
1210 irq_handler = (irq_handler_t)bnad_isr;
1211 irq = bnad->pcidev->irq;
Shyam Iyer5f778982011-06-28 08:58:05 +00001212 irq_flags = IRQF_SHARED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001213 }
Rasesh Mody8811e262011-07-22 08:07:44 +00001214
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001215 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001216 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1217
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001218 /*
1219 * Set the Mbox IRQ disable flag, so that the IRQ handler
1220 * called from request_irq() for SHARED IRQs do not execute
1221 */
1222 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1223
Rasesh Modybe7fa322010-12-23 21:45:01 +00001224 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1225
Shyam Iyer82791712011-07-14 15:00:32 +00001226 err = request_irq(irq, irq_handler, irq_flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001227 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001228
Rasesh Modybe7fa322010-12-23 21:45:01 +00001229 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001230}
1231
1232static void
1233bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1234{
1235 kfree(intr_info->idl);
1236 intr_info->idl = NULL;
1237}
1238
1239/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1240static int
1241bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
Rasesh Mody078086f2011-08-08 16:21:39 +00001242 u32 txrx_id, struct bna_intr_info *intr_info)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001243{
1244 int i, vector_start = 0;
1245 u32 cfg_flags;
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&bnad->bna_lock, flags);
1249 cfg_flags = bnad->cfg_flags;
1250 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1251
1252 if (cfg_flags & BNAD_CF_MSIX) {
1253 intr_info->intr_type = BNA_INTR_T_MSIX;
1254 intr_info->idl = kcalloc(intr_info->num,
1255 sizeof(struct bna_intr_descr),
1256 GFP_KERNEL);
1257 if (!intr_info->idl)
1258 return -ENOMEM;
1259
1260 switch (src) {
1261 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001262 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001263 break;
1264
1265 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001266 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1267 (bnad->num_tx * bnad->num_txq_per_tx) +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001268 txrx_id;
1269 break;
1270
1271 default:
1272 BUG();
1273 }
1274
1275 for (i = 0; i < intr_info->num; i++)
1276 intr_info->idl[i].vector = vector_start + i;
1277 } else {
1278 intr_info->intr_type = BNA_INTR_T_INTX;
1279 intr_info->num = 1;
1280 intr_info->idl = kcalloc(intr_info->num,
1281 sizeof(struct bna_intr_descr),
1282 GFP_KERNEL);
1283 if (!intr_info->idl)
1284 return -ENOMEM;
1285
1286 switch (src) {
1287 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001288 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001289 break;
1290
1291 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001292 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001293 break;
1294 }
1295 }
1296 return 0;
1297}
1298
1299/**
1300 * NOTE: Should be called for MSIX only
1301 * Unregisters Tx MSIX vector(s) from the kernel
1302 */
1303static void
1304bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1305 int num_txqs)
1306{
1307 int i;
1308 int vector_num;
1309
1310 for (i = 0; i < num_txqs; i++) {
1311 if (tx_info->tcb[i] == NULL)
1312 continue;
1313
1314 vector_num = tx_info->tcb[i]->intr_vector;
1315 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1316 }
1317}
1318
1319/**
1320 * NOTE: Should be called for MSIX only
1321 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1322 */
1323static int
1324bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001325 u32 tx_id, int num_txqs)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001326{
1327 int i;
1328 int err;
1329 int vector_num;
1330
1331 for (i = 0; i < num_txqs; i++) {
1332 vector_num = tx_info->tcb[i]->intr_vector;
1333 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1334 tx_id + tx_info->tcb[i]->id);
1335 err = request_irq(bnad->msix_table[vector_num].vector,
1336 (irq_handler_t)bnad_msix_tx, 0,
1337 tx_info->tcb[i]->name,
1338 tx_info->tcb[i]);
1339 if (err)
1340 goto err_return;
1341 }
1342
1343 return 0;
1344
1345err_return:
1346 if (i > 0)
1347 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1348 return -1;
1349}
1350
1351/**
1352 * NOTE: Should be called for MSIX only
1353 * Unregisters Rx MSIX vector(s) from the kernel
1354 */
1355static void
1356bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1357 int num_rxps)
1358{
1359 int i;
1360 int vector_num;
1361
1362 for (i = 0; i < num_rxps; i++) {
1363 if (rx_info->rx_ctrl[i].ccb == NULL)
1364 continue;
1365
1366 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1367 free_irq(bnad->msix_table[vector_num].vector,
1368 rx_info->rx_ctrl[i].ccb);
1369 }
1370}
1371
1372/**
1373 * NOTE: Should be called for MSIX only
1374 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1375 */
1376static int
1377bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001378 u32 rx_id, int num_rxps)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001379{
1380 int i;
1381 int err;
1382 int vector_num;
1383
1384 for (i = 0; i < num_rxps; i++) {
1385 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1386 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1387 bnad->netdev->name,
1388 rx_id + rx_info->rx_ctrl[i].ccb->id);
1389 err = request_irq(bnad->msix_table[vector_num].vector,
1390 (irq_handler_t)bnad_msix_rx, 0,
1391 rx_info->rx_ctrl[i].ccb->name,
1392 rx_info->rx_ctrl[i].ccb);
1393 if (err)
1394 goto err_return;
1395 }
1396
1397 return 0;
1398
1399err_return:
1400 if (i > 0)
1401 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1402 return -1;
1403}
1404
1405/* Free Tx object Resources */
1406static void
1407bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1408{
1409 int i;
1410
1411 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1412 if (res_info[i].res_type == BNA_RES_T_MEM)
1413 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1414 else if (res_info[i].res_type == BNA_RES_T_INTR)
1415 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1416 }
1417}
1418
1419/* Allocates memory and interrupt resources for Tx object */
1420static int
1421bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001422 u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001423{
1424 int i, err = 0;
1425
1426 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1427 if (res_info[i].res_type == BNA_RES_T_MEM)
1428 err = bnad_mem_alloc(bnad,
1429 &res_info[i].res_u.mem_info);
1430 else if (res_info[i].res_type == BNA_RES_T_INTR)
1431 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1432 &res_info[i].res_u.intr_info);
1433 if (err)
1434 goto err_return;
1435 }
1436 return 0;
1437
1438err_return:
1439 bnad_tx_res_free(bnad, res_info);
1440 return err;
1441}
1442
1443/* Free Rx object Resources */
1444static void
1445bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1446{
1447 int i;
1448
1449 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1450 if (res_info[i].res_type == BNA_RES_T_MEM)
1451 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1452 else if (res_info[i].res_type == BNA_RES_T_INTR)
1453 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1454 }
1455}
1456
1457/* Allocates memory and interrupt resources for Rx object */
1458static int
1459bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1460 uint rx_id)
1461{
1462 int i, err = 0;
1463
1464 /* All memory needs to be allocated before setup_ccbs */
1465 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1466 if (res_info[i].res_type == BNA_RES_T_MEM)
1467 err = bnad_mem_alloc(bnad,
1468 &res_info[i].res_u.mem_info);
1469 else if (res_info[i].res_type == BNA_RES_T_INTR)
1470 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1471 &res_info[i].res_u.intr_info);
1472 if (err)
1473 goto err_return;
1474 }
1475 return 0;
1476
1477err_return:
1478 bnad_rx_res_free(bnad, res_info);
1479 return err;
1480}
1481
1482/* Timer callbacks */
1483/* a) IOC timer */
1484static void
1485bnad_ioc_timeout(unsigned long data)
1486{
1487 struct bnad *bnad = (struct bnad *)data;
1488 unsigned long flags;
1489
1490 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001491 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001492 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1493}
1494
1495static void
1496bnad_ioc_hb_check(unsigned long data)
1497{
1498 struct bnad *bnad = (struct bnad *)data;
1499 unsigned long flags;
1500
1501 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001502 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001503 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1504}
1505
1506static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001507bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001508{
1509 struct bnad *bnad = (struct bnad *)data;
1510 unsigned long flags;
1511
1512 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001513 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001514 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1515}
1516
1517static void
1518bnad_iocpf_sem_timeout(unsigned long data)
1519{
1520 struct bnad *bnad = (struct bnad *)data;
1521 unsigned long flags;
1522
1523 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001524 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001525 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1526}
1527
1528/*
1529 * All timer routines use bnad->bna_lock to protect against
1530 * the following race, which may occur in case of no locking:
Rasesh Mody0120b992011-07-22 08:07:41 +00001531 * Time CPU m CPU n
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001532 * 0 1 = test_bit
1533 * 1 clear_bit
1534 * 2 del_timer_sync
1535 * 3 mod_timer
1536 */
1537
1538/* b) Dynamic Interrupt Moderation Timer */
1539static void
1540bnad_dim_timeout(unsigned long data)
1541{
1542 struct bnad *bnad = (struct bnad *)data;
1543 struct bnad_rx_info *rx_info;
1544 struct bnad_rx_ctrl *rx_ctrl;
1545 int i, j;
1546 unsigned long flags;
1547
1548 if (!netif_carrier_ok(bnad->netdev))
1549 return;
1550
1551 spin_lock_irqsave(&bnad->bna_lock, flags);
1552 for (i = 0; i < bnad->num_rx; i++) {
1553 rx_info = &bnad->rx_info[i];
1554 if (!rx_info->rx)
1555 continue;
1556 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1557 rx_ctrl = &rx_info->rx_ctrl[j];
1558 if (!rx_ctrl->ccb)
1559 continue;
1560 bna_rx_dim_update(rx_ctrl->ccb);
1561 }
1562 }
1563
1564 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1565 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1566 mod_timer(&bnad->dim_timer,
1567 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1568 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1569}
1570
1571/* c) Statistics Timer */
1572static void
1573bnad_stats_timeout(unsigned long data)
1574{
1575 struct bnad *bnad = (struct bnad *)data;
1576 unsigned long flags;
1577
1578 if (!netif_running(bnad->netdev) ||
1579 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1580 return;
1581
1582 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001583 bna_hw_stats_get(&bnad->bna);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001584 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1585}
1586
1587/*
1588 * Set up timer for DIM
1589 * Called with bnad->bna_lock held
1590 */
1591void
1592bnad_dim_timer_start(struct bnad *bnad)
1593{
1594 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1595 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1596 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1597 (unsigned long)bnad);
1598 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1599 mod_timer(&bnad->dim_timer,
1600 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1601 }
1602}
1603
1604/*
1605 * Set up timer for statistics
1606 * Called with mutex_lock(&bnad->conf_mutex) held
1607 */
1608static void
1609bnad_stats_timer_start(struct bnad *bnad)
1610{
1611 unsigned long flags;
1612
1613 spin_lock_irqsave(&bnad->bna_lock, flags);
1614 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1615 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1616 (unsigned long)bnad);
1617 mod_timer(&bnad->stats_timer,
1618 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1619 }
1620 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001621}
1622
1623/*
1624 * Stops the stats timer
1625 * Called with mutex_lock(&bnad->conf_mutex) held
1626 */
1627static void
1628bnad_stats_timer_stop(struct bnad *bnad)
1629{
1630 int to_del = 0;
1631 unsigned long flags;
1632
1633 spin_lock_irqsave(&bnad->bna_lock, flags);
1634 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1635 to_del = 1;
1636 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1637 if (to_del)
1638 del_timer_sync(&bnad->stats_timer);
1639}
1640
1641/* Utilities */
1642
1643static void
1644bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1645{
1646 int i = 1; /* Index 0 has broadcast address */
1647 struct netdev_hw_addr *mc_addr;
1648
1649 netdev_for_each_mc_addr(mc_addr, netdev) {
1650 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1651 ETH_ALEN);
1652 i++;
1653 }
1654}
1655
1656static int
1657bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1658{
1659 struct bnad_rx_ctrl *rx_ctrl =
1660 container_of(napi, struct bnad_rx_ctrl, napi);
Rasesh Mody2be67142011-08-30 15:27:39 +00001661 struct bnad *bnad = rx_ctrl->bnad;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001662 int rcvd = 0;
1663
Rasesh Mody271e8b72011-08-30 15:27:40 +00001664 rx_ctrl->rx_poll_ctr++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001665
1666 if (!netif_carrier_ok(bnad->netdev))
1667 goto poll_exit;
1668
Rasesh Mody2be67142011-08-30 15:27:39 +00001669 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001670 if (rcvd >= budget)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001671 return rcvd;
1672
1673poll_exit:
1674 napi_complete((napi));
1675
Rasesh Mody271e8b72011-08-30 15:27:40 +00001676 rx_ctrl->rx_complete++;
Rasesh Mody2be67142011-08-30 15:27:39 +00001677
1678 if (rx_ctrl->ccb)
Rasesh Mody271e8b72011-08-30 15:27:40 +00001679 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1680
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001681 return rcvd;
1682}
1683
Rasesh Mody2be67142011-08-30 15:27:39 +00001684#define BNAD_NAPI_POLL_QUOTA 64
1685static void
1686bnad_napi_init(struct bnad *bnad, u32 rx_id)
1687{
1688 struct bnad_rx_ctrl *rx_ctrl;
1689 int i;
1690
1691 /* Initialize & enable NAPI */
1692 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1693 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1694 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1695 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1696 }
1697}
1698
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001699static void
1700bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1701{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001702 struct bnad_rx_ctrl *rx_ctrl;
1703 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001704
1705 /* Initialize & enable NAPI */
1706 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1707 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
Rasesh Modybe7fa322010-12-23 21:45:01 +00001708
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001709 napi_enable(&rx_ctrl->napi);
1710 }
1711}
1712
1713static void
1714bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1715{
1716 int i;
1717
1718 /* First disable and then clean up */
1719 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1720 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1721 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1722 }
1723}
1724
1725/* Should be held with conf_lock held */
1726void
Rasesh Mody078086f2011-08-08 16:21:39 +00001727bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001728{
1729 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1730 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1731 unsigned long flags;
1732
1733 if (!tx_info->tx)
1734 return;
1735
1736 init_completion(&bnad->bnad_completions.tx_comp);
1737 spin_lock_irqsave(&bnad->bna_lock, flags);
1738 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1739 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1740 wait_for_completion(&bnad->bnad_completions.tx_comp);
1741
1742 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1743 bnad_tx_msix_unregister(bnad, tx_info,
1744 bnad->num_txq_per_tx);
1745
Rasesh Mody2be67142011-08-30 15:27:39 +00001746 if (0 == tx_id)
1747 tasklet_kill(&bnad->tx_free_tasklet);
1748
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001749 spin_lock_irqsave(&bnad->bna_lock, flags);
1750 bna_tx_destroy(tx_info->tx);
1751 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1752
1753 tx_info->tx = NULL;
Rasesh Mody078086f2011-08-08 16:21:39 +00001754 tx_info->tx_id = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001755
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001756 bnad_tx_res_free(bnad, res_info);
1757}
1758
1759/* Should be held with conf_lock held */
1760int
Rasesh Mody078086f2011-08-08 16:21:39 +00001761bnad_setup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001762{
1763 int err;
1764 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1765 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1766 struct bna_intr_info *intr_info =
1767 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1768 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1769 struct bna_tx_event_cbfn tx_cbfn;
1770 struct bna_tx *tx;
1771 unsigned long flags;
1772
Rasesh Mody078086f2011-08-08 16:21:39 +00001773 tx_info->tx_id = tx_id;
1774
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001775 /* Initialize the Tx object configuration */
1776 tx_config->num_txq = bnad->num_txq_per_tx;
1777 tx_config->txq_depth = bnad->txq_depth;
1778 tx_config->tx_type = BNA_TX_T_REGULAR;
Rasesh Mody078086f2011-08-08 16:21:39 +00001779 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001780
1781 /* Initialize the tx event handlers */
1782 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1783 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1784 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1785 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1786 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1787
1788 /* Get BNA's resource requirement for one tx object */
1789 spin_lock_irqsave(&bnad->bna_lock, flags);
1790 bna_tx_res_req(bnad->num_txq_per_tx,
1791 bnad->txq_depth, res_info);
1792 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1793
1794 /* Fill Unmap Q memory requirements */
1795 BNAD_FILL_UNMAPQ_MEM_REQ(
1796 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1797 bnad->num_txq_per_tx,
1798 BNAD_TX_UNMAPQ_DEPTH);
1799
1800 /* Allocate resources */
1801 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1802 if (err)
1803 return err;
1804
1805 /* Ask BNA to create one Tx object, supplying required resources */
1806 spin_lock_irqsave(&bnad->bna_lock, flags);
1807 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1808 tx_info);
1809 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1810 if (!tx)
1811 goto err_return;
1812 tx_info->tx = tx;
1813
1814 /* Register ISR for the Tx object */
1815 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1816 err = bnad_tx_msix_register(bnad, tx_info,
1817 tx_id, bnad->num_txq_per_tx);
1818 if (err)
1819 goto err_return;
1820 }
1821
1822 spin_lock_irqsave(&bnad->bna_lock, flags);
1823 bna_tx_enable(tx);
1824 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1825
1826 return 0;
1827
1828err_return:
1829 bnad_tx_res_free(bnad, res_info);
1830 return err;
1831}
1832
1833/* Setup the rx config for bna_rx_create */
1834/* bnad decides the configuration */
1835static void
1836bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1837{
1838 rx_config->rx_type = BNA_RX_T_REGULAR;
1839 rx_config->num_paths = bnad->num_rxp_per_rx;
Rasesh Mody078086f2011-08-08 16:21:39 +00001840 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001841
1842 if (bnad->num_rxp_per_rx > 1) {
1843 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1844 rx_config->rss_config.hash_type =
Rasesh Mody078086f2011-08-08 16:21:39 +00001845 (BFI_ENET_RSS_IPV6 |
1846 BFI_ENET_RSS_IPV6_TCP |
1847 BFI_ENET_RSS_IPV4 |
1848 BFI_ENET_RSS_IPV4_TCP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001849 rx_config->rss_config.hash_mask =
1850 bnad->num_rxp_per_rx - 1;
1851 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1852 sizeof(rx_config->rss_config.toeplitz_hash_key));
1853 } else {
1854 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1855 memset(&rx_config->rss_config, 0,
1856 sizeof(rx_config->rss_config));
1857 }
1858 rx_config->rxp_type = BNA_RXP_SLR;
1859 rx_config->q_depth = bnad->rxq_depth;
1860
1861 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1862
1863 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1864}
1865
Rasesh Mody2be67142011-08-30 15:27:39 +00001866static void
1867bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1868{
1869 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1870 int i;
1871
1872 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1873 rx_info->rx_ctrl[i].bnad = bnad;
1874}
1875
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001876/* Called with mutex_lock(&bnad->conf_mutex) held */
1877void
Rasesh Mody078086f2011-08-08 16:21:39 +00001878bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001879{
1880 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1881 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1882 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1883 unsigned long flags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00001884 int to_del = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001885
1886 if (!rx_info->rx)
1887 return;
1888
1889 if (0 == rx_id) {
1890 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001891 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1892 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001893 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001894 to_del = 1;
1895 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001896 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001897 if (to_del)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001898 del_timer_sync(&bnad->dim_timer);
1899 }
1900
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001901 init_completion(&bnad->bnad_completions.rx_comp);
1902 spin_lock_irqsave(&bnad->bna_lock, flags);
1903 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1904 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1905 wait_for_completion(&bnad->bnad_completions.rx_comp);
1906
1907 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1908 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1909
Rasesh Mody2be67142011-08-30 15:27:39 +00001910 bnad_napi_disable(bnad, rx_id);
1911
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001912 spin_lock_irqsave(&bnad->bna_lock, flags);
1913 bna_rx_destroy(rx_info->rx);
1914 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1915
1916 rx_info->rx = NULL;
1917
1918 bnad_rx_res_free(bnad, res_info);
1919}
1920
1921/* Called with mutex_lock(&bnad->conf_mutex) held */
1922int
Rasesh Mody078086f2011-08-08 16:21:39 +00001923bnad_setup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001924{
1925 int err;
1926 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1927 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1928 struct bna_intr_info *intr_info =
1929 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1930 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1931 struct bna_rx_event_cbfn rx_cbfn;
1932 struct bna_rx *rx;
1933 unsigned long flags;
1934
Rasesh Mody078086f2011-08-08 16:21:39 +00001935 rx_info->rx_id = rx_id;
1936
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001937 /* Initialize the Rx object configuration */
1938 bnad_init_rx_config(bnad, rx_config);
1939
1940 /* Initialize the Rx event handlers */
1941 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
Rasesh Modybe7fa322010-12-23 21:45:01 +00001942 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001943 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1944 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1945 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1946 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1947
1948 /* Get BNA's resource requirement for one Rx object */
1949 spin_lock_irqsave(&bnad->bna_lock, flags);
1950 bna_rx_res_req(rx_config, res_info);
1951 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1952
1953 /* Fill Unmap Q memory requirements */
1954 BNAD_FILL_UNMAPQ_MEM_REQ(
1955 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1956 rx_config->num_paths +
1957 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1958 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1959
1960 /* Allocate resource */
1961 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1962 if (err)
1963 return err;
1964
Rasesh Mody2be67142011-08-30 15:27:39 +00001965 bnad_rx_ctrl_init(bnad, rx_id);
1966
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001967 /* Ask BNA to create one Rx object, supplying required resources */
1968 spin_lock_irqsave(&bnad->bna_lock, flags);
1969 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1970 rx_info);
1971 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1972 if (!rx)
1973 goto err_return;
1974 rx_info->rx = rx;
1975
Rasesh Mody2be67142011-08-30 15:27:39 +00001976 /*
1977 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1978 * so that IRQ handler cannot schedule NAPI at this point.
1979 */
1980 bnad_napi_init(bnad, rx_id);
1981
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001982 /* Register ISR for the Rx object */
1983 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1984 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1985 rx_config->num_paths);
1986 if (err)
1987 goto err_return;
1988 }
1989
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001990 spin_lock_irqsave(&bnad->bna_lock, flags);
1991 if (0 == rx_id) {
1992 /* Set up Dynamic Interrupt Moderation Vector */
1993 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1994 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1995
1996 /* Enable VLAN filtering only on the default Rx */
1997 bna_rx_vlanfilter_enable(rx);
1998
1999 /* Start the DIM timer */
2000 bnad_dim_timer_start(bnad);
2001 }
2002
2003 bna_rx_enable(rx);
2004 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2005
Rasesh Mody2be67142011-08-30 15:27:39 +00002006 /* Enable scheduling of NAPI */
2007 bnad_napi_enable(bnad, rx_id);
2008
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002009 return 0;
2010
2011err_return:
2012 bnad_cleanup_rx(bnad, rx_id);
2013 return err;
2014}
2015
2016/* Called with conf_lock & bnad->bna_lock held */
2017void
2018bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2019{
2020 struct bnad_tx_info *tx_info;
2021
2022 tx_info = &bnad->tx_info[0];
2023 if (!tx_info->tx)
2024 return;
2025
2026 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2027}
2028
2029/* Called with conf_lock & bnad->bna_lock held */
2030void
2031bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2032{
2033 struct bnad_rx_info *rx_info;
Rasesh Mody0120b992011-07-22 08:07:41 +00002034 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002035
2036 for (i = 0; i < bnad->num_rx; i++) {
2037 rx_info = &bnad->rx_info[i];
2038 if (!rx_info->rx)
2039 continue;
2040 bna_rx_coalescing_timeo_set(rx_info->rx,
2041 bnad->rx_coalescing_timeo);
2042 }
2043}
2044
2045/*
2046 * Called with bnad->bna_lock held
2047 */
2048static int
2049bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2050{
2051 int ret;
2052
2053 if (!is_valid_ether_addr(mac_addr))
2054 return -EADDRNOTAVAIL;
2055
2056 /* If datapath is down, pretend everything went through */
2057 if (!bnad->rx_info[0].rx)
2058 return 0;
2059
2060 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2061 if (ret != BNA_CB_SUCCESS)
2062 return -EADDRNOTAVAIL;
2063
2064 return 0;
2065}
2066
2067/* Should be called with conf_lock held */
2068static int
2069bnad_enable_default_bcast(struct bnad *bnad)
2070{
2071 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2072 int ret;
2073 unsigned long flags;
2074
2075 init_completion(&bnad->bnad_completions.mcast_comp);
2076
2077 spin_lock_irqsave(&bnad->bna_lock, flags);
2078 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2079 bnad_cb_rx_mcast_add);
2080 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2081
2082 if (ret == BNA_CB_SUCCESS)
2083 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2084 else
2085 return -ENODEV;
2086
2087 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2088 return -ENODEV;
2089
2090 return 0;
2091}
2092
Rasesh Modyaad75b62010-12-23 21:45:08 +00002093/* Called with bnad_conf_lock() held */
2094static void
2095bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2096{
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002097 u16 vid;
Rasesh Modyaad75b62010-12-23 21:45:08 +00002098 unsigned long flags;
2099
Rasesh Mody078086f2011-08-08 16:21:39 +00002100 BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
Rasesh Modyaad75b62010-12-23 21:45:08 +00002101
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002102 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
Rasesh Modyaad75b62010-12-23 21:45:08 +00002103 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002104 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
Rasesh Modyaad75b62010-12-23 21:45:08 +00002105 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2106 }
2107}
2108
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002109/* Statistics utilities */
2110void
Eric Dumazet250e0612010-09-02 12:45:02 -07002111bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002112{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002113 int i, j;
2114
2115 for (i = 0; i < bnad->num_rx; i++) {
2116 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2117 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002118 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002119 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002120 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002121 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2122 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2123 bnad->rx_info[i].rx_ctrl[j].ccb->
2124 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002125 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002126 bnad->rx_info[i].rx_ctrl[j].
2127 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002128 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002129 bnad->rx_info[i].rx_ctrl[j].
2130 ccb->rcb[1]->rxq->rx_bytes;
2131 }
2132 }
2133 }
2134 }
2135 for (i = 0; i < bnad->num_tx; i++) {
2136 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2137 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002138 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002139 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002140 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002141 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2142 }
2143 }
2144 }
2145}
2146
2147/*
2148 * Must be called with the bna_lock held.
2149 */
2150void
Eric Dumazet250e0612010-09-02 12:45:02 -07002151bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002152{
Rasesh Mody078086f2011-08-08 16:21:39 +00002153 struct bfi_enet_stats_mac *mac_stats;
2154 u32 bmap;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002155 int i;
2156
Rasesh Mody078086f2011-08-08 16:21:39 +00002157 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002158 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002159 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2160 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2161 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002162 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002163 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002164 stats->rx_dropped = mac_stats->rx_drop;
2165 stats->tx_dropped = mac_stats->tx_drop;
2166 stats->multicast = mac_stats->rx_multicast;
2167 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002168
Eric Dumazet250e0612010-09-02 12:45:02 -07002169 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002170
2171 /* receive ring buffer overflow ?? */
2172
Eric Dumazet250e0612010-09-02 12:45:02 -07002173 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2174 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002175 /* recv'r fifo overrun */
Rasesh Mody078086f2011-08-08 16:21:39 +00002176 bmap = bna_rx_rid_mask(&bnad->bna);
2177 for (i = 0; bmap; i++) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002178 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002179 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002180 bnad->stats.bna_stats->
Rasesh Mody078086f2011-08-08 16:21:39 +00002181 hw_stats.rxf_stats[i].frame_drops;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002182 break;
2183 }
2184 bmap >>= 1;
2185 }
2186}
2187
2188static void
2189bnad_mbox_irq_sync(struct bnad *bnad)
2190{
2191 u32 irq;
2192 unsigned long flags;
2193
2194 spin_lock_irqsave(&bnad->bna_lock, flags);
2195 if (bnad->cfg_flags & BNAD_CF_MSIX)
Rasesh Mody8811e262011-07-22 08:07:44 +00002196 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002197 else
2198 irq = bnad->pcidev->irq;
2199 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2200
2201 synchronize_irq(irq);
2202}
2203
2204/* Utility used by bnad_start_xmit, for doing TSO */
2205static int
2206bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2207{
2208 int err;
2209
2210 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2211 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2212 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2213 if (skb_header_cloned(skb)) {
2214 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2215 if (err) {
2216 BNAD_UPDATE_CTR(bnad, tso_err);
2217 return err;
2218 }
2219 }
2220
2221 /*
2222 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2223 * excluding the length field.
2224 */
2225 if (skb->protocol == htons(ETH_P_IP)) {
2226 struct iphdr *iph = ip_hdr(skb);
2227
2228 /* Do we really need these? */
2229 iph->tot_len = 0;
2230 iph->check = 0;
2231
2232 tcp_hdr(skb)->check =
2233 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2234 IPPROTO_TCP, 0);
2235 BNAD_UPDATE_CTR(bnad, tso4);
2236 } else {
2237 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2238
2239 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2240 ipv6h->payload_len = 0;
2241 tcp_hdr(skb)->check =
2242 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2243 IPPROTO_TCP, 0);
2244 BNAD_UPDATE_CTR(bnad, tso6);
2245 }
2246
2247 return 0;
2248}
2249
2250/*
2251 * Initialize Q numbers depending on Rx Paths
2252 * Called with bnad->bna_lock held, because of cfg_flags
2253 * access.
2254 */
2255static void
2256bnad_q_num_init(struct bnad *bnad)
2257{
2258 int rxps;
2259
2260 rxps = min((uint)num_online_cpus(),
Rasesh Mody772b5232011-08-30 15:27:37 +00002261 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002262
2263 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2264 rxps = 1; /* INTx */
2265
2266 bnad->num_rx = 1;
2267 bnad->num_tx = 1;
2268 bnad->num_rxp_per_rx = rxps;
2269 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2270}
2271
2272/*
2273 * Adjusts the Q numbers, given a number of msix vectors
2274 * Give preference to RSS as opposed to Tx priority Queues,
2275 * in such a case, just use 1 Tx Q
2276 * Called with bnad->bna_lock held b'cos of cfg_flags access
2277 */
2278static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002279bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002280{
2281 bnad->num_txq_per_tx = 1;
2282 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2283 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2284 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2285 bnad->num_rxp_per_rx = msix_vectors -
2286 (bnad->num_tx * bnad->num_txq_per_tx) -
2287 BNAD_MAILBOX_MSIX_VECTORS;
2288 } else
2289 bnad->num_rxp_per_rx = 1;
2290}
2291
Rasesh Mody078086f2011-08-08 16:21:39 +00002292/* Enable / disable ioceth */
2293static int
2294bnad_ioceth_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002295{
2296 unsigned long flags;
Rasesh Mody078086f2011-08-08 16:21:39 +00002297 int err = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002298
2299 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002300 init_completion(&bnad->bnad_completions.ioc_comp);
2301 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002302 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2303
Rasesh Mody078086f2011-08-08 16:21:39 +00002304 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2305 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2306
2307 err = bnad->bnad_completions.ioc_comp_status;
2308 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002309}
2310
2311static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002312bnad_ioceth_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002313{
2314 int err = 0;
2315 unsigned long flags;
2316
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002317 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002318 init_completion(&bnad->bnad_completions.ioc_comp);
2319 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2320 bna_ioceth_enable(&bnad->bna.ioceth);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2322
Rasesh Mody078086f2011-08-08 16:21:39 +00002323 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2324 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002325
Rasesh Mody078086f2011-08-08 16:21:39 +00002326 err = bnad->bnad_completions.ioc_comp_status;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002327
2328 return err;
2329}
2330
2331/* Free BNA resources */
2332static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002333bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2334 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002335{
2336 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002337
Rasesh Mody078086f2011-08-08 16:21:39 +00002338 for (i = 0; i < res_val_max; i++)
2339 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002340}
2341
2342/* Allocates memory and interrupt resources for BNA */
2343static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002344bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2345 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002346{
2347 int i, err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002348
Rasesh Mody078086f2011-08-08 16:21:39 +00002349 for (i = 0; i < res_val_max; i++) {
2350 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002351 if (err)
2352 goto err_return;
2353 }
2354 return 0;
2355
2356err_return:
Rasesh Mody078086f2011-08-08 16:21:39 +00002357 bnad_res_free(bnad, res_info, res_val_max);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002358 return err;
2359}
2360
2361/* Interrupt enable / disable */
2362static void
2363bnad_enable_msix(struct bnad *bnad)
2364{
2365 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002366 unsigned long flags;
2367
2368 spin_lock_irqsave(&bnad->bna_lock, flags);
2369 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2370 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2371 return;
2372 }
2373 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2374
2375 if (bnad->msix_table)
2376 return;
2377
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002378 bnad->msix_table =
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002379 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002380
2381 if (!bnad->msix_table)
2382 goto intx_mode;
2383
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002384 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002385 bnad->msix_table[i].entry = i;
2386
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002387 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002388 if (ret > 0) {
2389 /* Not enough MSI-X vectors. */
2390
2391 spin_lock_irqsave(&bnad->bna_lock, flags);
2392 /* ret = #of vectors that we got */
Rasesh Mody271e8b72011-08-30 15:27:40 +00002393 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2394 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002395 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2396
Rasesh Mody271e8b72011-08-30 15:27:40 +00002397 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002398 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002399
Rasesh Mody078086f2011-08-08 16:21:39 +00002400 if (bnad->msix_num > ret)
2401 goto intx_mode;
2402
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002403 /* Try once more with adjusted numbers */
2404 /* If this fails, fall back to INTx */
2405 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002406 bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002407 if (ret)
2408 goto intx_mode;
2409
2410 } else if (ret < 0)
2411 goto intx_mode;
Rasesh Mody078086f2011-08-08 16:21:39 +00002412
2413 pci_intx(bnad->pcidev, 0);
2414
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002415 return;
2416
2417intx_mode:
2418
2419 kfree(bnad->msix_table);
2420 bnad->msix_table = NULL;
2421 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002422 spin_lock_irqsave(&bnad->bna_lock, flags);
2423 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2424 bnad_q_num_init(bnad);
2425 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2426}
2427
2428static void
2429bnad_disable_msix(struct bnad *bnad)
2430{
2431 u32 cfg_flags;
2432 unsigned long flags;
2433
2434 spin_lock_irqsave(&bnad->bna_lock, flags);
2435 cfg_flags = bnad->cfg_flags;
2436 if (bnad->cfg_flags & BNAD_CF_MSIX)
2437 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2438 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2439
2440 if (cfg_flags & BNAD_CF_MSIX) {
2441 pci_disable_msix(bnad->pcidev);
2442 kfree(bnad->msix_table);
2443 bnad->msix_table = NULL;
2444 }
2445}
2446
2447/* Netdev entry points */
2448static int
2449bnad_open(struct net_device *netdev)
2450{
2451 int err;
2452 struct bnad *bnad = netdev_priv(netdev);
2453 struct bna_pause_config pause_config;
2454 int mtu;
2455 unsigned long flags;
2456
2457 mutex_lock(&bnad->conf_mutex);
2458
2459 /* Tx */
2460 err = bnad_setup_tx(bnad, 0);
2461 if (err)
2462 goto err_return;
2463
2464 /* Rx */
2465 err = bnad_setup_rx(bnad, 0);
2466 if (err)
2467 goto cleanup_tx;
2468
2469 /* Port */
2470 pause_config.tx_pause = 0;
2471 pause_config.rx_pause = 0;
2472
Rasesh Mody078086f2011-08-08 16:21:39 +00002473 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002474
2475 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002476 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2477 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2478 bna_enet_enable(&bnad->bna.enet);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002479 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2480
2481 /* Enable broadcast */
2482 bnad_enable_default_bcast(bnad);
2483
Rasesh Modyaad75b62010-12-23 21:45:08 +00002484 /* Restore VLANs, if any */
2485 bnad_restore_vlans(bnad, 0);
2486
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002487 /* Set the UCAST address */
2488 spin_lock_irqsave(&bnad->bna_lock, flags);
2489 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2490 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2491
2492 /* Start the stats timer */
2493 bnad_stats_timer_start(bnad);
2494
2495 mutex_unlock(&bnad->conf_mutex);
2496
2497 return 0;
2498
2499cleanup_tx:
2500 bnad_cleanup_tx(bnad, 0);
2501
2502err_return:
2503 mutex_unlock(&bnad->conf_mutex);
2504 return err;
2505}
2506
2507static int
2508bnad_stop(struct net_device *netdev)
2509{
2510 struct bnad *bnad = netdev_priv(netdev);
2511 unsigned long flags;
2512
2513 mutex_lock(&bnad->conf_mutex);
2514
2515 /* Stop the stats timer */
2516 bnad_stats_timer_stop(bnad);
2517
Rasesh Mody078086f2011-08-08 16:21:39 +00002518 init_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002519
2520 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002521 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2522 bnad_cb_enet_disabled);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002523 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2524
Rasesh Mody078086f2011-08-08 16:21:39 +00002525 wait_for_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002526
2527 bnad_cleanup_tx(bnad, 0);
2528 bnad_cleanup_rx(bnad, 0);
2529
2530 /* Synchronize mailbox IRQ */
2531 bnad_mbox_irq_sync(bnad);
2532
2533 mutex_unlock(&bnad->conf_mutex);
2534
2535 return 0;
2536}
2537
2538/* TX */
2539/*
2540 * bnad_start_xmit : Netdev entry point for Transmit
2541 * Called under lock held by net_device
2542 */
2543static netdev_tx_t
2544bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2545{
2546 struct bnad *bnad = netdev_priv(netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00002547 u32 txq_id = 0;
2548 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002549
Rasesh Mody0120b992011-07-22 08:07:41 +00002550 u16 txq_prod, vlan_tag = 0;
2551 u32 unmap_prod, wis, wis_used, wi_range;
2552 u32 vectors, vect_id, i, acked;
Rasesh Mody0120b992011-07-22 08:07:41 +00002553 int err;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002554 unsigned int len;
2555 u32 gso_size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002556
Rasesh Mody078086f2011-08-08 16:21:39 +00002557 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
Rasesh Mody0120b992011-07-22 08:07:41 +00002558 dma_addr_t dma_addr;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002559 struct bna_txq_entry *txqent;
Rasesh Mody078086f2011-08-08 16:21:39 +00002560 u16 flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002561
Rasesh Mody271e8b72011-08-30 15:27:40 +00002562 if (unlikely(skb->len <= ETH_HLEN)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002563 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002564 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2565 return NETDEV_TX_OK;
2566 }
2567 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2568 dev_kfree_skb(skb);
2569 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2570 return NETDEV_TX_OK;
2571 }
2572 if (unlikely(skb_headlen(skb) == 0)) {
2573 dev_kfree_skb(skb);
2574 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002575 return NETDEV_TX_OK;
2576 }
2577
Rasesh Modybe7fa322010-12-23 21:45:01 +00002578 /*
2579 * Takes care of the Tx that is scheduled between clearing the flag
Rasesh Mody078086f2011-08-08 16:21:39 +00002580 * and the netif_stop_all_queue() call.
Rasesh Modybe7fa322010-12-23 21:45:01 +00002581 */
2582 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2583 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002584 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002585 return NETDEV_TX_OK;
2586 }
2587
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002588 vectors = 1 + skb_shinfo(skb)->nr_frags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002589 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002590 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002591 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002592 return NETDEV_TX_OK;
2593 }
2594 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2595 acked = 0;
Rasesh Mody078086f2011-08-08 16:21:39 +00002596 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2597 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002598 if ((u16) (*tcb->hw_consumer_index) !=
2599 tcb->consumer_index &&
2600 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2601 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002602 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2603 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002604 smp_mb__before_clear_bit();
2605 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2606 } else {
2607 netif_stop_queue(netdev);
2608 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2609 }
2610
2611 smp_mb();
2612 /*
2613 * Check again to deal with race condition between
2614 * netif_stop_queue here, and netif_wake_queue in
2615 * interrupt handler which is not inside netif tx lock.
2616 */
2617 if (likely
2618 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2619 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2620 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2621 return NETDEV_TX_BUSY;
2622 } else {
2623 netif_wake_queue(netdev);
2624 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2625 }
2626 }
2627
2628 unmap_prod = unmap_q->producer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002629 flags = 0;
2630
2631 txq_prod = tcb->producer_index;
2632 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2633 BUG_ON(!(wi_range <= tcb->q_depth));
2634 txqent->hdr.wi.reserved = 0;
2635 txqent->hdr.wi.num_vectors = vectors;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002636
Jesse Grosseab6d182010-10-20 13:56:03 +00002637 if (vlan_tx_tag_present(skb)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002638 vlan_tag = (u16) vlan_tx_tag_get(skb);
2639 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2640 }
2641 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2642 vlan_tag =
2643 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2644 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2645 }
2646
2647 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2648
2649 if (skb_is_gso(skb)) {
Rasesh Mody271e8b72011-08-30 15:27:40 +00002650 gso_size = skb_shinfo(skb)->gso_size;
2651
2652 if (unlikely(gso_size > netdev->mtu)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002653 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002654 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002655 return NETDEV_TX_OK;
2656 }
Rasesh Mody271e8b72011-08-30 15:27:40 +00002657 if (unlikely((gso_size + skb_transport_offset(skb) +
2658 tcp_hdrlen(skb)) >= skb->len)) {
2659 txqent->hdr.wi.opcode =
2660 __constant_htons(BNA_TXQ_WI_SEND);
2661 txqent->hdr.wi.lso_mss = 0;
2662 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2663 } else {
2664 txqent->hdr.wi.opcode =
2665 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2666 txqent->hdr.wi.lso_mss = htons(gso_size);
2667 }
2668
2669 err = bnad_tso_prepare(bnad, skb);
2670 if (unlikely(err)) {
2671 dev_kfree_skb(skb);
2672 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2673 return NETDEV_TX_OK;
2674 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002675 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2676 txqent->hdr.wi.l4_hdr_size_n_offset =
2677 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2678 (tcp_hdrlen(skb) >> 2,
2679 skb_transport_offset(skb)));
Rasesh Mody271e8b72011-08-30 15:27:40 +00002680 } else {
2681 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002682 txqent->hdr.wi.lso_mss = 0;
2683
Rasesh Mody271e8b72011-08-30 15:27:40 +00002684 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2685 dev_kfree_skb(skb);
2686 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2687 return NETDEV_TX_OK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002688 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002689
Rasesh Mody271e8b72011-08-30 15:27:40 +00002690 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2691 u8 proto = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002692
Rasesh Mody271e8b72011-08-30 15:27:40 +00002693 if (skb->protocol == __constant_htons(ETH_P_IP))
2694 proto = ip_hdr(skb)->protocol;
2695 else if (skb->protocol ==
2696 __constant_htons(ETH_P_IPV6)) {
2697 /* nexthdr may not be TCP immediately. */
2698 proto = ipv6_hdr(skb)->nexthdr;
2699 }
2700 if (proto == IPPROTO_TCP) {
2701 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2702 txqent->hdr.wi.l4_hdr_size_n_offset =
2703 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2704 (0, skb_transport_offset(skb)));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002705
Rasesh Mody271e8b72011-08-30 15:27:40 +00002706 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002707
Rasesh Mody271e8b72011-08-30 15:27:40 +00002708 if (unlikely(skb_headlen(skb) <
2709 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2710 dev_kfree_skb(skb);
2711 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2712 return NETDEV_TX_OK;
2713 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002714
Rasesh Mody271e8b72011-08-30 15:27:40 +00002715 } else if (proto == IPPROTO_UDP) {
2716 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2717 txqent->hdr.wi.l4_hdr_size_n_offset =
2718 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2719 (0, skb_transport_offset(skb)));
2720
2721 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2722 if (unlikely(skb_headlen(skb) <
2723 skb_transport_offset(skb) +
2724 sizeof(struct udphdr))) {
2725 dev_kfree_skb(skb);
2726 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2727 return NETDEV_TX_OK;
2728 }
2729 } else {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002730 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002731 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002732 return NETDEV_TX_OK;
2733 }
Rasesh Mody271e8b72011-08-30 15:27:40 +00002734 } else {
2735 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002736 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002737 }
2738
2739 txqent->hdr.wi.flags = htons(flags);
2740
2741 txqent->hdr.wi.frame_length = htonl(skb->len);
2742
2743 unmap_q->unmap_array[unmap_prod].skb = skb;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002744 len = skb_headlen(skb);
2745 txqent->vector[0].length = htons(len);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002746 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2747 skb_headlen(skb), DMA_TO_DEVICE);
2748 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002749 dma_addr);
2750
Rasesh Mody271e8b72011-08-30 15:27:40 +00002751 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002752 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2753
Rasesh Mody271e8b72011-08-30 15:27:40 +00002754 vect_id = 0;
2755 wis_used = 1;
2756
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002757 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2758 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Rasesh Mody078086f2011-08-08 16:21:39 +00002759 u16 size = frag->size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002760
Rasesh Mody271e8b72011-08-30 15:27:40 +00002761 if (unlikely(size == 0)) {
2762 unmap_prod = unmap_q->producer_index;
2763
2764 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2765 unmap_q->unmap_array,
2766 unmap_prod, unmap_q->q_depth, skb,
2767 i);
2768 dev_kfree_skb(skb);
2769 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2770 return NETDEV_TX_OK;
2771 }
2772
2773 len += size;
2774
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002775 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2776 vect_id = 0;
2777 if (--wi_range)
2778 txqent++;
2779 else {
2780 BNA_QE_INDX_ADD(txq_prod, wis_used,
2781 tcb->q_depth);
2782 wis_used = 0;
2783 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2784 txqent, wi_range);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002785 }
2786 wis_used++;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002787 txqent->hdr.wi_ext.opcode =
2788 __constant_htons(BNA_TXQ_WI_EXTENSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002789 }
2790
2791 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2792 txqent->vector[vect_id].length = htons(size);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002793 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2794 frag->page_offset, size, DMA_TO_DEVICE);
2795 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002796 dma_addr);
2797 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2798 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2799 }
2800
Rasesh Mody271e8b72011-08-30 15:27:40 +00002801 if (unlikely(len != skb->len)) {
2802 unmap_prod = unmap_q->producer_index;
2803
2804 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2805 unmap_q->unmap_array, unmap_prod,
2806 unmap_q->q_depth, skb,
2807 skb_shinfo(skb)->nr_frags);
2808 dev_kfree_skb(skb);
2809 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2810 return NETDEV_TX_OK;
2811 }
2812
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002813 unmap_q->producer_index = unmap_prod;
2814 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2815 tcb->producer_index = txq_prod;
2816
2817 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00002818
2819 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2820 return NETDEV_TX_OK;
2821
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002822 bna_txq_prod_indx_doorbell(tcb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002823 smp_mb();
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002824
2825 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2826 tasklet_schedule(&bnad->tx_free_tasklet);
2827
2828 return NETDEV_TX_OK;
2829}
2830
2831/*
2832 * Used spin_lock to synchronize reading of stats structures, which
2833 * is written by BNA under the same lock.
2834 */
Eric Dumazet250e0612010-09-02 12:45:02 -07002835static struct rtnl_link_stats64 *
2836bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002837{
2838 struct bnad *bnad = netdev_priv(netdev);
2839 unsigned long flags;
2840
2841 spin_lock_irqsave(&bnad->bna_lock, flags);
2842
Eric Dumazet250e0612010-09-02 12:45:02 -07002843 bnad_netdev_qstats_fill(bnad, stats);
2844 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002845
2846 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2847
Eric Dumazet250e0612010-09-02 12:45:02 -07002848 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002849}
2850
2851static void
2852bnad_set_rx_mode(struct net_device *netdev)
2853{
2854 struct bnad *bnad = netdev_priv(netdev);
2855 u32 new_mask, valid_mask;
2856 unsigned long flags;
2857
2858 spin_lock_irqsave(&bnad->bna_lock, flags);
2859
2860 new_mask = valid_mask = 0;
2861
2862 if (netdev->flags & IFF_PROMISC) {
2863 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2864 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2865 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2866 bnad->cfg_flags |= BNAD_CF_PROMISC;
2867 }
2868 } else {
2869 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2870 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2871 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2872 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2873 }
2874 }
2875
2876 if (netdev->flags & IFF_ALLMULTI) {
2877 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2878 new_mask |= BNA_RXMODE_ALLMULTI;
2879 valid_mask |= BNA_RXMODE_ALLMULTI;
2880 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2881 }
2882 } else {
2883 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2884 new_mask &= ~BNA_RXMODE_ALLMULTI;
2885 valid_mask |= BNA_RXMODE_ALLMULTI;
2886 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2887 }
2888 }
2889
Rasesh Mody271e8b72011-08-30 15:27:40 +00002890 if (bnad->rx_info[0].rx == NULL)
2891 goto unlock;
2892
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002893 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2894
2895 if (!netdev_mc_empty(netdev)) {
2896 u8 *mcaddr_list;
2897 int mc_count = netdev_mc_count(netdev);
2898
2899 /* Index 0 holds the broadcast address */
2900 mcaddr_list =
2901 kzalloc((mc_count + 1) * ETH_ALEN,
2902 GFP_ATOMIC);
2903 if (!mcaddr_list)
Jiri Slabyca1cef32010-09-04 02:08:41 +00002904 goto unlock;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002905
2906 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2907
2908 /* Copy rest of the MC addresses */
2909 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2910
2911 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2912 mcaddr_list, NULL);
2913
2914 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2915 kfree(mcaddr_list);
2916 }
Jiri Slabyca1cef32010-09-04 02:08:41 +00002917unlock:
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002918 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2919}
2920
2921/*
2922 * bna_lock is used to sync writes to netdev->addr
2923 * conf_lock cannot be used since this call may be made
2924 * in a non-blocking context.
2925 */
2926static int
2927bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2928{
2929 int err;
2930 struct bnad *bnad = netdev_priv(netdev);
2931 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2932 unsigned long flags;
2933
2934 spin_lock_irqsave(&bnad->bna_lock, flags);
2935
2936 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2937
2938 if (!err)
2939 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2940
2941 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2942
2943 return err;
2944}
2945
2946static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002947bnad_mtu_set(struct bnad *bnad, int mtu)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002948{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002949 unsigned long flags;
2950
Rasesh Mody078086f2011-08-08 16:21:39 +00002951 init_completion(&bnad->bnad_completions.mtu_comp);
2952
2953 spin_lock_irqsave(&bnad->bna_lock, flags);
2954 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2955 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2956
2957 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2958
2959 return bnad->bnad_completions.mtu_comp_status;
2960}
2961
2962static int
2963bnad_change_mtu(struct net_device *netdev, int new_mtu)
2964{
2965 int err, mtu = netdev->mtu;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002966 struct bnad *bnad = netdev_priv(netdev);
2967
2968 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2969 return -EINVAL;
2970
2971 mutex_lock(&bnad->conf_mutex);
2972
2973 netdev->mtu = new_mtu;
2974
Rasesh Mody078086f2011-08-08 16:21:39 +00002975 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2976 err = bnad_mtu_set(bnad, mtu);
2977 if (err)
2978 err = -EBUSY;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002979
2980 mutex_unlock(&bnad->conf_mutex);
2981 return err;
2982}
2983
2984static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002985bnad_vlan_rx_add_vid(struct net_device *netdev,
2986 unsigned short vid)
2987{
2988 struct bnad *bnad = netdev_priv(netdev);
2989 unsigned long flags;
2990
2991 if (!bnad->rx_info[0].rx)
2992 return;
2993
2994 mutex_lock(&bnad->conf_mutex);
2995
2996 spin_lock_irqsave(&bnad->bna_lock, flags);
2997 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002998 set_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002999 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3000
3001 mutex_unlock(&bnad->conf_mutex);
3002}
3003
3004static void
3005bnad_vlan_rx_kill_vid(struct net_device *netdev,
3006 unsigned short vid)
3007{
3008 struct bnad *bnad = netdev_priv(netdev);
3009 unsigned long flags;
3010
3011 if (!bnad->rx_info[0].rx)
3012 return;
3013
3014 mutex_lock(&bnad->conf_mutex);
3015
3016 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00003017 clear_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003018 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3019 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3020
3021 mutex_unlock(&bnad->conf_mutex);
3022}
3023
3024#ifdef CONFIG_NET_POLL_CONTROLLER
3025static void
3026bnad_netpoll(struct net_device *netdev)
3027{
3028 struct bnad *bnad = netdev_priv(netdev);
3029 struct bnad_rx_info *rx_info;
3030 struct bnad_rx_ctrl *rx_ctrl;
3031 u32 curr_mask;
3032 int i, j;
3033
3034 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3035 bna_intx_disable(&bnad->bna, curr_mask);
3036 bnad_isr(bnad->pcidev->irq, netdev);
3037 bna_intx_enable(&bnad->bna, curr_mask);
3038 } else {
3039 for (i = 0; i < bnad->num_rx; i++) {
3040 rx_info = &bnad->rx_info[i];
3041 if (!rx_info->rx)
3042 continue;
3043 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3044 rx_ctrl = &rx_info->rx_ctrl[j];
Rasesh Mody271e8b72011-08-30 15:27:40 +00003045 if (rx_ctrl->ccb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003046 bnad_netif_rx_schedule_poll(bnad,
3047 rx_ctrl->ccb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003048 }
3049 }
3050 }
3051}
3052#endif
3053
3054static const struct net_device_ops bnad_netdev_ops = {
3055 .ndo_open = bnad_open,
3056 .ndo_stop = bnad_stop,
3057 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07003058 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003059 .ndo_set_rx_mode = bnad_set_rx_mode,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003060 .ndo_validate_addr = eth_validate_addr,
3061 .ndo_set_mac_address = bnad_set_mac_address,
3062 .ndo_change_mtu = bnad_change_mtu,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003063 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3064 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3065#ifdef CONFIG_NET_POLL_CONTROLLER
3066 .ndo_poll_controller = bnad_netpoll
3067#endif
3068};
3069
3070static void
3071bnad_netdev_init(struct bnad *bnad, bool using_dac)
3072{
3073 struct net_device *netdev = bnad->netdev;
3074
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003075 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3076 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3077 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003078
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003079 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3080 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3081 NETIF_F_TSO | NETIF_F_TSO6;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003082
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003083 netdev->features |= netdev->hw_features |
3084 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003085
3086 if (using_dac)
3087 netdev->features |= NETIF_F_HIGHDMA;
3088
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003089 netdev->mem_start = bnad->mmio_start;
3090 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3091
3092 netdev->netdev_ops = &bnad_netdev_ops;
3093 bnad_set_ethtool_ops(netdev);
3094}
3095
3096/*
3097 * 1. Initialize the bnad structure
3098 * 2. Setup netdev pointer in pci_dev
3099 * 3. Initialze Tx free tasklet
3100 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3101 */
3102static int
3103bnad_init(struct bnad *bnad,
3104 struct pci_dev *pdev, struct net_device *netdev)
3105{
3106 unsigned long flags;
3107
3108 SET_NETDEV_DEV(netdev, &pdev->dev);
3109 pci_set_drvdata(pdev, netdev);
3110
3111 bnad->netdev = netdev;
3112 bnad->pcidev = pdev;
3113 bnad->mmio_start = pci_resource_start(pdev, 0);
3114 bnad->mmio_len = pci_resource_len(pdev, 0);
3115 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3116 if (!bnad->bar0) {
3117 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3118 pci_set_drvdata(pdev, NULL);
3119 return -ENOMEM;
3120 }
3121 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3122 (unsigned long long) bnad->mmio_len);
3123
3124 spin_lock_irqsave(&bnad->bna_lock, flags);
3125 if (!bnad_msix_disable)
3126 bnad->cfg_flags = BNAD_CF_MSIX;
3127
3128 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3129
3130 bnad_q_num_init(bnad);
3131 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3132
3133 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3134 (bnad->num_rx * bnad->num_rxp_per_rx) +
3135 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003136
3137 bnad->txq_depth = BNAD_TXQ_DEPTH;
3138 bnad->rxq_depth = BNAD_RXQ_DEPTH;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003139
3140 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3141 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3142
3143 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3144 (unsigned long)bnad);
3145
3146 return 0;
3147}
3148
3149/*
3150 * Must be called after bnad_pci_uninit()
3151 * so that iounmap() and pci_set_drvdata(NULL)
3152 * happens only after PCI uninitialization.
3153 */
3154static void
3155bnad_uninit(struct bnad *bnad)
3156{
3157 if (bnad->bar0)
3158 iounmap(bnad->bar0);
3159 pci_set_drvdata(bnad->pcidev, NULL);
3160}
3161
3162/*
3163 * Initialize locks
Rasesh Mody078086f2011-08-08 16:21:39 +00003164 a) Per ioceth mutes used for serializing configuration
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003165 changes from OS interface
3166 b) spin lock used to protect bna state machine
3167 */
3168static void
3169bnad_lock_init(struct bnad *bnad)
3170{
3171 spin_lock_init(&bnad->bna_lock);
3172 mutex_init(&bnad->conf_mutex);
3173}
3174
3175static void
3176bnad_lock_uninit(struct bnad *bnad)
3177{
3178 mutex_destroy(&bnad->conf_mutex);
3179}
3180
3181/* PCI Initialization */
3182static int
3183bnad_pci_init(struct bnad *bnad,
3184 struct pci_dev *pdev, bool *using_dac)
3185{
3186 int err;
3187
3188 err = pci_enable_device(pdev);
3189 if (err)
3190 return err;
3191 err = pci_request_regions(pdev, BNAD_NAME);
3192 if (err)
3193 goto disable_device;
Ivan Vecera5ea74312011-02-02 04:37:02 +00003194 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3195 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003196 *using_dac = 1;
3197 } else {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003198 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003199 if (err) {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003200 err = dma_set_coherent_mask(&pdev->dev,
3201 DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003202 if (err)
3203 goto release_regions;
3204 }
3205 *using_dac = 0;
3206 }
3207 pci_set_master(pdev);
3208 return 0;
3209
3210release_regions:
3211 pci_release_regions(pdev);
3212disable_device:
3213 pci_disable_device(pdev);
3214
3215 return err;
3216}
3217
3218static void
3219bnad_pci_uninit(struct pci_dev *pdev)
3220{
3221 pci_release_regions(pdev);
3222 pci_disable_device(pdev);
3223}
3224
3225static int __devinit
3226bnad_pci_probe(struct pci_dev *pdev,
3227 const struct pci_device_id *pcidev_id)
3228{
Rasesh Mody0120b992011-07-22 08:07:41 +00003229 bool using_dac = false;
3230 int err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003231 struct bnad *bnad;
3232 struct bna *bna;
3233 struct net_device *netdev;
3234 struct bfa_pcidev pcidev_info;
3235 unsigned long flags;
3236
3237 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3238 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3239
3240 mutex_lock(&bnad_fwimg_mutex);
3241 if (!cna_get_firmware_buf(pdev)) {
3242 mutex_unlock(&bnad_fwimg_mutex);
3243 pr_warn("Failed to load Firmware Image!\n");
3244 return -ENODEV;
3245 }
3246 mutex_unlock(&bnad_fwimg_mutex);
3247
3248 /*
3249 * Allocates sizeof(struct net_device + struct bnad)
3250 * bnad = netdev->priv
3251 */
3252 netdev = alloc_etherdev(sizeof(struct bnad));
3253 if (!netdev) {
Rasesh Mody078086f2011-08-08 16:21:39 +00003254 dev_err(&pdev->dev, "netdev allocation failed\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003255 err = -ENOMEM;
3256 return err;
3257 }
3258 bnad = netdev_priv(netdev);
3259
Rasesh Mody078086f2011-08-08 16:21:39 +00003260 bnad_lock_init(bnad);
3261
3262 mutex_lock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003263 /*
3264 * PCI initialization
Rasesh Mody0120b992011-07-22 08:07:41 +00003265 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003266 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003267 */
3268 err = bnad_pci_init(bnad, pdev, &using_dac);
3269 if (err)
Dan Carpenter44861f42011-08-24 01:29:22 +00003270 goto unlock_mutex;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003271
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003272 /*
3273 * Initialize bnad structure
3274 * Setup relation between pci_dev & netdev
3275 * Init Tx free tasklet
3276 */
3277 err = bnad_init(bnad, pdev, netdev);
3278 if (err)
3279 goto pci_uninit;
Rasesh Mody078086f2011-08-08 16:21:39 +00003280
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003281 /* Initialize netdev structure, set up ethtool ops */
3282 bnad_netdev_init(bnad, using_dac);
3283
Rasesh Mody815f41e2010-12-23 21:45:03 +00003284 /* Set link to down state */
3285 netif_carrier_off(netdev);
3286
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003287 /* Get resource requirement form bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003288 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003289 bna_res_req(&bnad->res_info[0]);
Rasesh Mody078086f2011-08-08 16:21:39 +00003290 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003291
3292 /* Allocate resources from bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003293 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003294 if (err)
Rasesh Mody078086f2011-08-08 16:21:39 +00003295 goto drv_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003296
3297 bna = &bnad->bna;
3298
3299 /* Setup pcidev_info for bna_init() */
3300 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3301 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3302 pcidev_info.device_id = bnad->pcidev->device;
3303 pcidev_info.pci_bar_kva = bnad->bar0;
3304
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003305 spin_lock_irqsave(&bnad->bna_lock, flags);
3306 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003307 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3308
3309 bnad->stats.bna_stats = &bna->stats;
3310
Rasesh Mody078086f2011-08-08 16:21:39 +00003311 bnad_enable_msix(bnad);
3312 err = bnad_mbox_irq_alloc(bnad);
3313 if (err)
3314 goto res_free;
3315
3316
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003317 /* Set up timers */
Rasesh Mody078086f2011-08-08 16:21:39 +00003318 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003319 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003320 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003321 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003322 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
Rasesh Mody1d32f762010-12-23 21:45:09 +00003323 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003324 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003325 ((unsigned long)bnad));
3326
3327 /* Now start the timer before calling IOC */
Rasesh Mody078086f2011-08-08 16:21:39 +00003328 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003329 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3330
3331 /*
3332 * Start the chip
Rasesh Mody078086f2011-08-08 16:21:39 +00003333 * If the call back comes with error, we bail out.
3334 * This is a catastrophic error.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003335 */
Rasesh Mody078086f2011-08-08 16:21:39 +00003336 err = bnad_ioceth_enable(bnad);
3337 if (err) {
3338 pr_err("BNA: Initialization failed err=%d\n",
3339 err);
3340 goto probe_success;
3341 }
3342
3343 spin_lock_irqsave(&bnad->bna_lock, flags);
3344 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3345 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3346 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3347 bna_attr(bna)->num_rxp - 1);
3348 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3349 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3350 err = -EIO;
3351 }
3352 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3353 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3354
3355 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003356 if (err) {
3357 err = -EIO;
Rasesh Mody078086f2011-08-08 16:21:39 +00003358 goto disable_ioceth;
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003359 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003360
3361 spin_lock_irqsave(&bnad->bna_lock, flags);
3362 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3363 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003364
3365 /* Get the burnt-in mac */
3366 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003367 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003368 bnad_set_netdev_perm_addr(bnad);
3369 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3370
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003371 mutex_unlock(&bnad->conf_mutex);
3372
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003373 /* Finally, reguister with net_device layer */
3374 err = register_netdev(netdev);
3375 if (err) {
3376 pr_err("BNA : Registering with netdev failed\n");
Rasesh Mody078086f2011-08-08 16:21:39 +00003377 goto probe_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003378 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003379 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003380
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003381 return 0;
3382
Rasesh Mody078086f2011-08-08 16:21:39 +00003383probe_success:
3384 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003385 return 0;
3386
Rasesh Mody078086f2011-08-08 16:21:39 +00003387probe_uninit:
3388 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3389disable_ioceth:
3390 bnad_ioceth_disable(bnad);
3391 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3392 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3393 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003394 spin_lock_irqsave(&bnad->bna_lock, flags);
3395 bna_uninit(bna);
3396 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003397 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003398 bnad_disable_msix(bnad);
Rasesh Mody078086f2011-08-08 16:21:39 +00003399res_free:
3400 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3401drv_uninit:
3402 bnad_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003403pci_uninit:
3404 bnad_pci_uninit(pdev);
Dan Carpenter44861f42011-08-24 01:29:22 +00003405unlock_mutex:
Rasesh Mody078086f2011-08-08 16:21:39 +00003406 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003407 bnad_lock_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003408 free_netdev(netdev);
3409 return err;
3410}
3411
3412static void __devexit
3413bnad_pci_remove(struct pci_dev *pdev)
3414{
3415 struct net_device *netdev = pci_get_drvdata(pdev);
3416 struct bnad *bnad;
3417 struct bna *bna;
3418 unsigned long flags;
3419
3420 if (!netdev)
3421 return;
3422
3423 pr_info("%s bnad_pci_remove\n", netdev->name);
3424 bnad = netdev_priv(netdev);
3425 bna = &bnad->bna;
3426
Rasesh Mody078086f2011-08-08 16:21:39 +00003427 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3428 unregister_netdev(netdev);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003429
3430 mutex_lock(&bnad->conf_mutex);
Rasesh Mody078086f2011-08-08 16:21:39 +00003431 bnad_ioceth_disable(bnad);
3432 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3433 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3434 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003435 spin_lock_irqsave(&bnad->bna_lock, flags);
3436 bna_uninit(bna);
3437 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003438
Rasesh Mody078086f2011-08-08 16:21:39 +00003439 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3440 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3441 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003442 bnad_disable_msix(bnad);
3443 bnad_pci_uninit(pdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00003444 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003445 bnad_lock_uninit(bnad);
3446 bnad_uninit(bnad);
3447 free_netdev(netdev);
3448}
3449
Rasesh Mody0120b992011-07-22 08:07:41 +00003450static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003451 {
3452 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3453 PCI_DEVICE_ID_BROCADE_CT),
3454 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3455 .class_mask = 0xffff00
3456 }, {0, }
3457};
3458
3459MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3460
3461static struct pci_driver bnad_pci_driver = {
3462 .name = BNAD_NAME,
3463 .id_table = bnad_pci_id_table,
3464 .probe = bnad_pci_probe,
3465 .remove = __devexit_p(bnad_pci_remove),
3466};
3467
3468static int __init
3469bnad_module_init(void)
3470{
3471 int err;
3472
Rasesh Mody5aad0012011-07-22 08:07:40 +00003473 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3474 BNAD_VERSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003475
Rasesh Mody8a891422010-08-25 23:00:27 -07003476 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003477
3478 err = pci_register_driver(&bnad_pci_driver);
3479 if (err < 0) {
3480 pr_err("bna : PCI registration failed in module init "
3481 "(%d)\n", err);
3482 return err;
3483 }
3484
3485 return 0;
3486}
3487
3488static void __exit
3489bnad_module_exit(void)
3490{
3491 pci_unregister_driver(&bnad_pci_driver);
3492
3493 if (bfi_fw)
3494 release_firmware(bfi_fw);
3495}
3496
3497module_init(bnad_module_init);
3498module_exit(bnad_module_exit);
3499
3500MODULE_AUTHOR("Brocade");
3501MODULE_LICENSE("GPL");
3502MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3503MODULE_VERSION(BNAD_VERSION);
3504MODULE_FIRMWARE(CNA_FW_FILE_CT);