blob: 2f4ced66612a60e72ed8e10bb0d4290353b85254 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
Jiri Pirkof859d7c2011-07-20 04:54:14 +000018#include <linux/bitops.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070019#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040027#include <linux/prefetch.h>
Rasesh Mody8b230ed2010-08-23 20:24:12 -070028
29#include "bnad.h"
30#include "bna.h"
31#include "cna.h"
32
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000033static DEFINE_MUTEX(bnad_fwimg_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070034
35/*
36 * Module params
37 */
38static uint bnad_msix_disable;
39module_param(bnad_msix_disable, uint, 0444);
40MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42static uint bnad_ioc_auto_recover = 1;
43module_param(bnad_ioc_auto_recover, uint, 0444);
44MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46/*
47 * Global variables
48 */
49u32 bnad_rxqs_per_cq = 2;
50
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000051static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Rasesh Mody8b230ed2010-08-23 20:24:12 -070052
53/*
54 * Local MACROS
55 */
56#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60#define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
Rasesh Mody8811e262011-07-22 08:07:44 +000062 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
Rasesh Mody8b230ed2010-08-23 20:24:12 -070063 ((_bnad)->pcidev->irq))
64
65#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73} while (0)
74
Rasesh Modybe7fa322010-12-23 21:45:01 +000075#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76
Rasesh Mody8b230ed2010-08-23 20:24:12 -070077/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
81bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82{
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
86
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
89
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
99 }
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
102 }
103}
104
Rasesh Mody271e8b72011-08-30 15:27:40 +0000105static u32
106bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108{
109 int j;
110 array[index].skb = NULL;
111
112 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&array[index], dma_addr, 0);
115 BNA_QE_INDX_ADD(index, 1, depth);
116
117 for (j = 0; j < frag; j++) {
118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119 skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120 dma_unmap_addr_set(&array[index], dma_addr, 0);
121 BNA_QE_INDX_ADD(index, 1, depth);
122 }
123
124 return index;
125}
126
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700127/*
128 * Frees all pending Tx Bufs
129 * At this point no activity is expected on the Q,
130 * so DMA unmap & freeing is fine.
131 */
132static void
133bnad_free_all_txbufs(struct bnad *bnad,
134 struct bna_tcb *tcb)
135{
Rasesh Mody0120b992011-07-22 08:07:41 +0000136 u32 unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700137 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138 struct bnad_skb_unmap *unmap_array;
Rasesh Mody0120b992011-07-22 08:07:41 +0000139 struct sk_buff *skb = NULL;
Rasesh Mody938fa482011-08-30 15:27:47 +0000140 int q;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700141
142 unmap_array = unmap_q->unmap_array;
143
Rasesh Mody938fa482011-08-30 15:27:47 +0000144 for (q = 0; q < unmap_q->q_depth; q++) {
145 skb = unmap_array[q].skb;
146 if (!skb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700147 continue;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700148
Rasesh Mody938fa482011-08-30 15:27:47 +0000149 unmap_cons = q;
150 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
151 unmap_cons, unmap_q->q_depth, skb,
152 skb_shinfo(skb)->nr_frags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700153
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700154 dev_kfree_skb_any(skb);
155 }
156}
157
158/* Data Path Handlers */
159
160/*
161 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
162 * Can be called in a) Interrupt context
163 * b) Sending context
164 * c) Tasklet context
165 */
166static u32
167bnad_free_txbufs(struct bnad *bnad,
168 struct bna_tcb *tcb)
169{
Rasesh Mody271e8b72011-08-30 15:27:40 +0000170 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
171 u16 wis, updated_hw_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700172 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
173 struct bnad_skb_unmap *unmap_array;
Rasesh Mody0120b992011-07-22 08:07:41 +0000174 struct sk_buff *skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700175
176 /*
177 * Just return if TX is stopped. This check is useful
178 * when bnad_free_txbufs() runs out of a tasklet scheduled
Rasesh Modybe7fa322010-12-23 21:45:01 +0000179 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700180 * but this routine runs actually after the cleanup has been
181 * executed.
182 */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000183 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700184 return 0;
185
186 updated_hw_cons = *(tcb->hw_consumer_index);
187
188 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
189 updated_hw_cons, tcb->q_depth);
190
191 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
192
193 unmap_array = unmap_q->unmap_array;
194 unmap_cons = unmap_q->consumer_index;
195
196 prefetch(&unmap_array[unmap_cons + 1]);
197 while (wis) {
198 skb = unmap_array[unmap_cons].skb;
199
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700200 sent_packets++;
201 sent_bytes += skb->len;
202 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
Rasesh Mody271e8b72011-08-30 15:27:40 +0000204 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
205 unmap_cons, unmap_q->q_depth, skb,
206 skb_shinfo(skb)->nr_frags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700207
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700208 dev_kfree_skb_any(skb);
209 }
210
211 /* Update consumer pointers. */
212 tcb->consumer_index = updated_hw_cons;
213 unmap_q->consumer_index = unmap_cons;
214
215 tcb->txq->tx_packets += sent_packets;
216 tcb->txq->tx_bytes += sent_bytes;
217
218 return sent_packets;
219}
220
221/* Tx Free Tasklet function */
222/* Frees for all the tcb's in all the Tx's */
223/*
224 * Scheduled from sending context, so that
225 * the fat Tx lock is not held for too long
226 * in the sending context.
227 */
228static void
229bnad_tx_free_tasklet(unsigned long bnad_ptr)
230{
231 struct bnad *bnad = (struct bnad *)bnad_ptr;
232 struct bna_tcb *tcb;
Rasesh Mody0120b992011-07-22 08:07:41 +0000233 u32 acked = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700234 int i, j;
235
236 for (i = 0; i < bnad->num_tx; i++) {
237 for (j = 0; j < bnad->num_txq_per_tx; j++) {
238 tcb = bnad->tx_info[i].tcb[j];
239 if (!tcb)
240 continue;
241 if (((u16) (*tcb->hw_consumer_index) !=
242 tcb->consumer_index) &&
243 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
244 &tcb->flags))) {
245 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000246 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
247 &tcb->flags)))
248 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700249 smp_mb__before_clear_bit();
250 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
251 }
Rasesh Modyf7c0fa42010-12-23 21:45:05 +0000252 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
253 &tcb->flags)))
254 continue;
255 if (netif_queue_stopped(bnad->netdev)) {
256 if (acked && netif_carrier_ok(bnad->netdev) &&
257 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
258 BNAD_NETIF_WAKE_THRESHOLD) {
259 netif_wake_queue(bnad->netdev);
260 /* TODO */
261 /* Counters for individual TxQs? */
262 BNAD_UPDATE_CTR(bnad,
263 netif_queue_wakeup);
264 }
265 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700266 }
267 }
268}
269
270static u32
271bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
272{
273 struct net_device *netdev = bnad->netdev;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000274 u32 sent = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700275
276 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
277 return 0;
278
279 sent = bnad_free_txbufs(bnad, tcb);
280 if (sent) {
281 if (netif_queue_stopped(netdev) &&
282 netif_carrier_ok(netdev) &&
283 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
284 BNAD_NETIF_WAKE_THRESHOLD) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000285 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
286 netif_wake_queue(netdev);
287 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
288 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700289 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000290 }
291
292 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700293 bna_ib_ack(tcb->i_dbell, sent);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700294
295 smp_mb__before_clear_bit();
296 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
297
298 return sent;
299}
300
301/* MSIX Tx Completion Handler */
302static irqreturn_t
303bnad_msix_tx(int irq, void *data)
304{
305 struct bna_tcb *tcb = (struct bna_tcb *)data;
306 struct bnad *bnad = tcb->bnad;
307
308 bnad_tx(bnad, tcb);
309
310 return IRQ_HANDLED;
311}
312
313static void
314bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
315{
316 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
317
318 rcb->producer_index = 0;
319 rcb->consumer_index = 0;
320
321 unmap_q->producer_index = 0;
322 unmap_q->consumer_index = 0;
323}
324
325static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000326bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700327{
328 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000329 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700330 struct sk_buff *skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000331 int unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700332
333 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000334 unmap_array = unmap_q->unmap_array;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000335 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
Ivan Vecera5ea74312011-02-02 04:37:02 +0000336 skb = unmap_array[unmap_cons].skb;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000337 if (!skb)
338 continue;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000339 unmap_array[unmap_cons].skb = NULL;
340 dma_unmap_single(&bnad->pcidev->dev,
341 dma_unmap_addr(&unmap_array[unmap_cons],
342 dma_addr),
343 rcb->rxq->buffer_size,
344 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700345 dev_kfree_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700346 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700347 bnad_reset_rcb(bnad, rcb);
348}
349
350static void
351bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
352{
353 u16 to_alloc, alloced, unmap_prod, wi_range;
354 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
355 struct bnad_skb_unmap *unmap_array;
356 struct bna_rxq_entry *rxent;
357 struct sk_buff *skb;
358 dma_addr_t dma_addr;
359
360 alloced = 0;
361 to_alloc =
362 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
363
364 unmap_array = unmap_q->unmap_array;
365 unmap_prod = unmap_q->producer_index;
366
367 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
368
369 while (to_alloc--) {
Rasesh Mody19dbff92011-08-30 15:27:41 +0000370 if (!wi_range)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700371 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
372 wi_range);
Eric Dumazet0a0e2342011-07-08 05:29:30 +0000373 skb = netdev_alloc_skb_ip_align(bnad->netdev,
374 rcb->rxq->buffer_size);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700375 if (unlikely(!skb)) {
376 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
Rasesh Mody3caa1e952011-08-30 15:27:42 +0000377 rcb->rxq->rxbuf_alloc_failed++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700378 goto finishing;
379 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700380 unmap_array[unmap_prod].skb = skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000381 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
382 rcb->rxq->buffer_size,
383 DMA_FROM_DEVICE);
384 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700385 dma_addr);
386 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
387 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
388
389 rxent++;
390 wi_range--;
391 alloced++;
392 }
393
394finishing:
395 if (likely(alloced)) {
396 unmap_q->producer_index = unmap_prod;
397 rcb->producer_index = unmap_prod;
398 smp_mb();
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +0000399 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
Rasesh Modybe7fa322010-12-23 21:45:01 +0000400 bna_rxq_prod_indx_doorbell(rcb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700401 }
402}
403
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700404static inline void
405bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
406{
407 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
408
409 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
410 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
411 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
412 bnad_alloc_n_post_rxbufs(bnad, rcb);
413 smp_mb__before_clear_bit();
414 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
415 }
416}
417
418static u32
419bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
420{
421 struct bna_cq_entry *cmpl, *next_cmpl;
422 struct bna_rcb *rcb = NULL;
423 unsigned int wi_range, packets = 0, wis = 0;
424 struct bnad_unmap_q *unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000425 struct bnad_skb_unmap *unmap_array;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700426 struct sk_buff *skb;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000427 u32 flags, unmap_cons;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700428 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
Rasesh Mody078086f2011-08-08 16:21:39 +0000429 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700430
Rasesh Mody078086f2011-08-08 16:21:39 +0000431 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
432
433 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
434 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000435 return 0;
Rasesh Mody078086f2011-08-08 16:21:39 +0000436 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000437
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700438 prefetch(bnad->netdev);
439 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
440 wi_range);
441 BUG_ON(!(wi_range <= ccb->q_depth));
442 while (cmpl->valid && packets < budget) {
443 packets++;
444 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
445
Rasesh Mody078086f2011-08-08 16:21:39 +0000446 if (bna_is_small_rxq(cmpl->rxq_id))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700447 rcb = ccb->rcb[1];
Rasesh Mody078086f2011-08-08 16:21:39 +0000448 else
449 rcb = ccb->rcb[0];
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700450
451 unmap_q = rcb->unmap_q;
Ivan Vecera5ea74312011-02-02 04:37:02 +0000452 unmap_array = unmap_q->unmap_array;
453 unmap_cons = unmap_q->consumer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700454
Ivan Vecera5ea74312011-02-02 04:37:02 +0000455 skb = unmap_array[unmap_cons].skb;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700456 BUG_ON(!(skb));
Ivan Vecera5ea74312011-02-02 04:37:02 +0000457 unmap_array[unmap_cons].skb = NULL;
458 dma_unmap_single(&bnad->pcidev->dev,
459 dma_unmap_addr(&unmap_array[unmap_cons],
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700460 dma_addr),
Ivan Vecera5ea74312011-02-02 04:37:02 +0000461 rcb->rxq->buffer_size,
462 DMA_FROM_DEVICE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700463 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
464
465 /* Should be more efficient ? Performance ? */
466 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
467
468 wis++;
469 if (likely(--wi_range))
470 next_cmpl = cmpl + 1;
471 else {
472 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
473 wis = 0;
474 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
475 next_cmpl, wi_range);
476 BUG_ON(!(wi_range <= ccb->q_depth));
477 }
478 prefetch(next_cmpl);
479
480 flags = ntohl(cmpl->flags);
481 if (unlikely
482 (flags &
483 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
484 BNA_CQ_EF_TOO_LONG))) {
485 dev_kfree_skb_any(skb);
486 rcb->rxq->rx_packets_with_error++;
487 goto next;
488 }
489
490 skb_put(skb, ntohs(cmpl->length));
491 if (likely
Michał Mirosławe5ee20e2011-04-12 09:38:23 +0000492 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700493 (((flags & BNA_CQ_EF_IPV4) &&
494 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
495 (flags & BNA_CQ_EF_IPV6)) &&
496 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
497 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
498 skb->ip_summed = CHECKSUM_UNNECESSARY;
499 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700500 skb_checksum_none_assert(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700501
502 rcb->rxq->rx_packets++;
503 rcb->rxq->rx_bytes += skb->len;
504 skb->protocol = eth_type_trans(skb, bnad->netdev);
505
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000506 if (flags & BNA_CQ_EF_VLAN)
507 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700508
Rasesh Mody078086f2011-08-08 16:21:39 +0000509 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000510 napi_gro_receive(&rx_ctrl->napi, skb);
Rasesh Mody078086f2011-08-08 16:21:39 +0000511 else {
Jiri Pirkof859d7c2011-07-20 04:54:14 +0000512 netif_receive_skb(skb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700513 }
514
515next:
516 cmpl->valid = 0;
517 cmpl = next_cmpl;
518 }
519
520 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
521
Rasesh Mody2be67142011-08-30 15:27:39 +0000522 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
Rasesh Mody271e8b72011-08-30 15:27:40 +0000523 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
524
Rasesh Mody2be67142011-08-30 15:27:39 +0000525 bnad_refill_rxq(bnad, ccb->rcb[0]);
526 if (ccb->rcb[1])
527 bnad_refill_rxq(bnad, ccb->rcb[1]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700528
Rasesh Mody078086f2011-08-08 16:21:39 +0000529 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
530
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700531 return packets;
532}
533
534static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700535bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
536{
537 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000538 struct napi_struct *napi = &rx_ctrl->napi;
539
540 if (likely(napi_schedule_prep(napi))) {
Rasesh Modybe7fa322010-12-23 21:45:01 +0000541 __napi_schedule(napi);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000542 rx_ctrl->rx_schedule++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700543 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700544}
545
546/* MSIX Rx Path Handler */
547static irqreturn_t
548bnad_msix_rx(int irq, void *data)
549{
550 struct bna_ccb *ccb = (struct bna_ccb *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700551
Rasesh Mody271e8b72011-08-30 15:27:40 +0000552 if (ccb) {
553 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
Rasesh Mody2be67142011-08-30 15:27:39 +0000554 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
Rasesh Mody271e8b72011-08-30 15:27:40 +0000555 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700556
557 return IRQ_HANDLED;
558}
559
560/* Interrupt handlers */
561
562/* Mbox Interrupt Handlers */
563static irqreturn_t
564bnad_msix_mbox_handler(int irq, void *data)
565{
566 u32 intr_status;
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000567 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000568 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700569
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700570 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Modydfee3252011-08-30 15:27:45 +0000571 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
572 spin_unlock_irqrestore(&bnad->bna_lock, flags);
573 return IRQ_HANDLED;
574 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700575
576 bna_intr_status_get(&bnad->bna, intr_status);
577
Rasesh Mody078086f2011-08-08 16:21:39 +0000578 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700579 bna_mbox_handler(&bnad->bna, intr_status);
580
581 spin_unlock_irqrestore(&bnad->bna_lock, flags);
582
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700583 return IRQ_HANDLED;
584}
585
586static irqreturn_t
587bnad_isr(int irq, void *data)
588{
589 int i, j;
590 u32 intr_status;
591 unsigned long flags;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000592 struct bnad *bnad = (struct bnad *)data;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700593 struct bnad_rx_info *rx_info;
594 struct bnad_rx_ctrl *rx_ctrl;
Rasesh Mody078086f2011-08-08 16:21:39 +0000595 struct bna_tcb *tcb = NULL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700596
Rasesh Modydfee3252011-08-30 15:27:45 +0000597 spin_lock_irqsave(&bnad->bna_lock, flags);
598 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000600 return IRQ_NONE;
Rasesh Modydfee3252011-08-30 15:27:45 +0000601 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700602
603 bna_intr_status_get(&bnad->bna, intr_status);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000604
Rasesh Modydfee3252011-08-30 15:27:45 +0000605 if (unlikely(!intr_status)) {
606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700607 return IRQ_NONE;
Rasesh Modydfee3252011-08-30 15:27:45 +0000608 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700609
Rasesh Mody078086f2011-08-08 16:21:39 +0000610 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700611 bna_mbox_handler(&bnad->bna, intr_status);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000612
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700613 spin_unlock_irqrestore(&bnad->bna_lock, flags);
614
Rasesh Modybe7fa322010-12-23 21:45:01 +0000615 if (!BNA_IS_INTX_DATA_INTR(intr_status))
616 return IRQ_HANDLED;
617
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700618 /* Process data interrupts */
Rasesh Modybe7fa322010-12-23 21:45:01 +0000619 /* Tx processing */
620 for (i = 0; i < bnad->num_tx; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000621 for (j = 0; j < bnad->num_txq_per_tx; j++) {
622 tcb = bnad->tx_info[i].tcb[j];
623 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
624 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
625 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000626 }
627 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700628 for (i = 0; i < bnad->num_rx; i++) {
629 rx_info = &bnad->rx_info[i];
630 if (!rx_info->rx)
631 continue;
632 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
633 rx_ctrl = &rx_info->rx_ctrl[j];
634 if (rx_ctrl->ccb)
635 bnad_netif_rx_schedule_poll(bnad,
636 rx_ctrl->ccb);
637 }
638 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700639 return IRQ_HANDLED;
640}
641
642/*
643 * Called in interrupt / callback context
644 * with bna_lock held, so cfg_flags access is OK
645 */
646static void
647bnad_enable_mbox_irq(struct bnad *bnad)
648{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000649 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000650
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700651 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
652}
653
654/*
655 * Called with bnad->bna_lock held b'cos of
656 * bnad->cfg_flags access.
657 */
Rasesh Modyb7ee31c52010-10-05 15:46:05 +0000658static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700659bnad_disable_mbox_irq(struct bnad *bnad)
660{
Rasesh Modybe7fa322010-12-23 21:45:01 +0000661 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
Rasesh Modye2fa6f22010-10-05 15:46:04 +0000662
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700663 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
664}
665
Rasesh Modybe7fa322010-12-23 21:45:01 +0000666static void
667bnad_set_netdev_perm_addr(struct bnad *bnad)
668{
669 struct net_device *netdev = bnad->netdev;
670
671 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
672 if (is_zero_ether_addr(netdev->dev_addr))
673 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
674}
675
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700676/* Control Path Handlers */
677
678/* Callbacks */
679void
Rasesh Mody078086f2011-08-08 16:21:39 +0000680bnad_cb_mbox_intr_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700681{
682 bnad_enable_mbox_irq(bnad);
683}
684
685void
Rasesh Mody078086f2011-08-08 16:21:39 +0000686bnad_cb_mbox_intr_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700687{
688 bnad_disable_mbox_irq(bnad);
689}
690
691void
Rasesh Mody078086f2011-08-08 16:21:39 +0000692bnad_cb_ioceth_ready(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700693{
Rasesh Mody078086f2011-08-08 16:21:39 +0000694 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700695 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700696}
697
698void
Rasesh Mody078086f2011-08-08 16:21:39 +0000699bnad_cb_ioceth_failed(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700700{
Rasesh Mody078086f2011-08-08 16:21:39 +0000701 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700702 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody078086f2011-08-08 16:21:39 +0000703}
704
705void
706bnad_cb_ioceth_disabled(struct bnad *bnad)
707{
708 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
709 complete(&bnad->bnad_completions.ioc_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700710}
711
712static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000713bnad_cb_enet_disabled(void *arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700714{
715 struct bnad *bnad = (struct bnad *)arg;
716
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700717 netif_carrier_off(bnad->netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +0000718 complete(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700719}
720
721void
Rasesh Mody078086f2011-08-08 16:21:39 +0000722bnad_cb_ethport_link_status(struct bnad *bnad,
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700723 enum bna_link_status link_status)
724{
725 bool link_up = 0;
726
727 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
728
729 if (link_status == BNA_CEE_UP) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000730 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
731 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700732 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000733 } else {
734 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
735 BNAD_UPDATE_CTR(bnad, cee_toggle);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700736 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
Rasesh Mody078086f2011-08-08 16:21:39 +0000737 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700738
739 if (link_up) {
740 if (!netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000741 uint tx_id, tcb_id;
742 printk(KERN_WARNING "bna: %s link up\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700743 bnad->netdev->name);
744 netif_carrier_on(bnad->netdev);
745 BNAD_UPDATE_CTR(bnad, link_toggle);
Rasesh Mody078086f2011-08-08 16:21:39 +0000746 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
747 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
748 tcb_id++) {
749 struct bna_tcb *tcb =
750 bnad->tx_info[tx_id].tcb[tcb_id];
751 u32 txq_id;
752 if (!tcb)
753 continue;
754
755 txq_id = tcb->id;
756
757 if (test_bit(BNAD_TXQ_TX_STARTED,
758 &tcb->flags)) {
759 /*
760 * Force an immediate
761 * Transmit Schedule */
762 printk(KERN_INFO "bna: %s %d "
763 "TXQ_STARTED\n",
764 bnad->netdev->name,
765 txq_id);
766 netif_wake_subqueue(
767 bnad->netdev,
768 txq_id);
769 BNAD_UPDATE_CTR(bnad,
770 netif_queue_wakeup);
771 } else {
772 netif_stop_subqueue(
773 bnad->netdev,
774 txq_id);
775 BNAD_UPDATE_CTR(bnad,
776 netif_queue_stop);
777 }
778 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700779 }
780 }
781 } else {
782 if (netif_carrier_ok(bnad->netdev)) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000783 printk(KERN_WARNING "bna: %s link down\n",
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700784 bnad->netdev->name);
785 netif_carrier_off(bnad->netdev);
786 BNAD_UPDATE_CTR(bnad, link_toggle);
787 }
788 }
789}
790
791static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000792bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700793{
794 struct bnad *bnad = (struct bnad *)arg;
795
796 complete(&bnad->bnad_completions.tx_comp);
797}
798
799static void
800bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
801{
802 struct bnad_tx_info *tx_info =
803 (struct bnad_tx_info *)tcb->txq->tx->priv;
804 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
805
806 tx_info->tcb[tcb->id] = tcb;
807 unmap_q->producer_index = 0;
808 unmap_q->consumer_index = 0;
809 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
810}
811
812static void
813bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
814{
815 struct bnad_tx_info *tx_info =
816 (struct bnad_tx_info *)tcb->txq->tx->priv;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000817 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
818
819 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
820 cpu_relax();
821
822 bnad_free_all_txbufs(bnad, tcb);
823
824 unmap_q->producer_index = 0;
825 unmap_q->consumer_index = 0;
826
827 smp_mb__before_clear_bit();
828 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700829
830 tx_info->tcb[tcb->id] = NULL;
831}
832
833static void
834bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
835{
836 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
837
838 unmap_q->producer_index = 0;
839 unmap_q->consumer_index = 0;
840 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
841}
842
843static void
Rasesh Modybe7fa322010-12-23 21:45:01 +0000844bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
845{
846 bnad_free_all_rxbufs(bnad, rcb);
847}
848
849static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700850bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
851{
852 struct bnad_rx_info *rx_info =
853 (struct bnad_rx_info *)ccb->cq->rx->priv;
854
855 rx_info->rx_ctrl[ccb->id].ccb = ccb;
856 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
857}
858
859static void
860bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
861{
862 struct bnad_rx_info *rx_info =
863 (struct bnad_rx_info *)ccb->cq->rx->priv;
864
865 rx_info->rx_ctrl[ccb->id].ccb = NULL;
866}
867
868static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000869bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700870{
871 struct bnad_tx_info *tx_info =
Rasesh Mody078086f2011-08-08 16:21:39 +0000872 (struct bnad_tx_info *)tx->priv;
873 struct bna_tcb *tcb;
874 u32 txq_id;
875 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700876
Rasesh Mody078086f2011-08-08 16:21:39 +0000877 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
878 tcb = tx_info->tcb[i];
879 if (!tcb)
880 continue;
881 txq_id = tcb->id;
882 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
883 netif_stop_subqueue(bnad->netdev, txq_id);
884 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
885 bnad->netdev->name, txq_id);
886 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700887}
888
889static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000890bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700891{
Rasesh Mody078086f2011-08-08 16:21:39 +0000892 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
893 struct bna_tcb *tcb;
894 struct bnad_unmap_q *unmap_q;
895 u32 txq_id;
896 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000897
Rasesh Mody078086f2011-08-08 16:21:39 +0000898 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
899 tcb = tx_info->tcb[i];
900 if (!tcb)
901 continue;
902 txq_id = tcb->id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700903
Rasesh Mody078086f2011-08-08 16:21:39 +0000904 unmap_q = tcb->unmap_q;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000905
Rasesh Mody078086f2011-08-08 16:21:39 +0000906 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
907 continue;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000908
Rasesh Mody078086f2011-08-08 16:21:39 +0000909 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
910 cpu_relax();
Rasesh Modybe7fa322010-12-23 21:45:01 +0000911
Rasesh Mody078086f2011-08-08 16:21:39 +0000912 bnad_free_all_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000913
Rasesh Mody078086f2011-08-08 16:21:39 +0000914 unmap_q->producer_index = 0;
915 unmap_q->consumer_index = 0;
916
917 smp_mb__before_clear_bit();
918 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
919
920 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
921
922 if (netif_carrier_ok(bnad->netdev)) {
923 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
924 bnad->netdev->name, txq_id);
925 netif_wake_subqueue(bnad->netdev, txq_id);
926 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
927 }
928 }
Rasesh Modybe7fa322010-12-23 21:45:01 +0000929
930 /*
Rasesh Mody078086f2011-08-08 16:21:39 +0000931 * Workaround for first ioceth enable failure & we
Rasesh Modybe7fa322010-12-23 21:45:01 +0000932 * get a 0 MAC address. We try to get the MAC address
933 * again here.
934 */
935 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000936 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
Rasesh Modybe7fa322010-12-23 21:45:01 +0000937 bnad_set_netdev_perm_addr(bnad);
938 }
Rasesh Mody078086f2011-08-08 16:21:39 +0000939}
Rasesh Modybe7fa322010-12-23 21:45:01 +0000940
Rasesh Mody078086f2011-08-08 16:21:39 +0000941static void
942bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
943{
944 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
945 struct bna_tcb *tcb;
946 int i;
Rasesh Modybe7fa322010-12-23 21:45:01 +0000947
Rasesh Mody078086f2011-08-08 16:21:39 +0000948 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
949 tcb = tx_info->tcb[i];
950 if (!tcb)
951 continue;
952 }
953
954 mdelay(BNAD_TXRX_SYNC_MDELAY);
955 bna_tx_cleanup_complete(tx);
956}
957
958static void
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +0000959bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
960{
961 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
962 struct bna_ccb *ccb;
963 struct bnad_rx_ctrl *rx_ctrl;
964 int i;
965
966 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
967 rx_ctrl = &rx_info->rx_ctrl[i];
968 ccb = rx_ctrl->ccb;
969 if (!ccb)
970 continue;
971
972 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
973
974 if (ccb->rcb[1])
975 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
976 }
977}
978
979static void
Rasesh Mody078086f2011-08-08 16:21:39 +0000980bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
981{
982 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
983 struct bna_ccb *ccb;
984 struct bnad_rx_ctrl *rx_ctrl;
985 int i;
986
987 mdelay(BNAD_TXRX_SYNC_MDELAY);
988
Rasesh Mody772b5232011-08-30 15:27:37 +0000989 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +0000990 rx_ctrl = &rx_info->rx_ctrl[i];
991 ccb = rx_ctrl->ccb;
992 if (!ccb)
993 continue;
994
995 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
996
997 if (ccb->rcb[1])
998 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
999
1000 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1001 cpu_relax();
1002 }
1003
1004 bna_rx_cleanup_complete(rx);
1005}
1006
1007static void
1008bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1009{
1010 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1011 struct bna_ccb *ccb;
1012 struct bna_rcb *rcb;
1013 struct bnad_rx_ctrl *rx_ctrl;
1014 struct bnad_unmap_q *unmap_q;
1015 int i;
1016 int j;
1017
Rasesh Mody772b5232011-08-30 15:27:37 +00001018 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
Rasesh Mody078086f2011-08-08 16:21:39 +00001019 rx_ctrl = &rx_info->rx_ctrl[i];
1020 ccb = rx_ctrl->ccb;
1021 if (!ccb)
1022 continue;
1023
1024 bnad_cq_cmpl_init(bnad, ccb);
1025
1026 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1027 rcb = ccb->rcb[j];
1028 if (!rcb)
1029 continue;
1030 bnad_free_all_rxbufs(bnad, rcb);
1031
1032 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +00001033 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001034 unmap_q = rcb->unmap_q;
1035
1036 /* Now allocate & post buffers for this RCB */
1037 /* !!Allocation in callback context */
1038 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1039 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1040 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1041 bnad_alloc_n_post_rxbufs(bnad, rcb);
1042 smp_mb__before_clear_bit();
1043 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1044 }
1045 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001046 }
1047}
1048
1049static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001050bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001051{
1052 struct bnad *bnad = (struct bnad *)arg;
1053
1054 complete(&bnad->bnad_completions.rx_comp);
1055}
1056
1057static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001058bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001059{
Rasesh Mody078086f2011-08-08 16:21:39 +00001060 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001061 complete(&bnad->bnad_completions.mcast_comp);
1062}
1063
1064void
1065bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1066 struct bna_stats *stats)
1067{
1068 if (status == BNA_CB_SUCCESS)
1069 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1070
1071 if (!netif_running(bnad->netdev) ||
1072 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1073 return;
1074
1075 mod_timer(&bnad->stats_timer,
1076 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1077}
1078
Rasesh Mody078086f2011-08-08 16:21:39 +00001079static void
1080bnad_cb_enet_mtu_set(struct bnad *bnad)
1081{
1082 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1083 complete(&bnad->bnad_completions.mtu_comp);
1084}
1085
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001086/* Resource allocation, free functions */
1087
1088static void
1089bnad_mem_free(struct bnad *bnad,
1090 struct bna_mem_info *mem_info)
1091{
1092 int i;
1093 dma_addr_t dma_pa;
1094
1095 if (mem_info->mdl == NULL)
1096 return;
1097
1098 for (i = 0; i < mem_info->num; i++) {
1099 if (mem_info->mdl[i].kva != NULL) {
1100 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1101 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1102 dma_pa);
Ivan Vecera5ea74312011-02-02 04:37:02 +00001103 dma_free_coherent(&bnad->pcidev->dev,
1104 mem_info->mdl[i].len,
1105 mem_info->mdl[i].kva, dma_pa);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001106 } else
1107 kfree(mem_info->mdl[i].kva);
1108 }
1109 }
1110 kfree(mem_info->mdl);
1111 mem_info->mdl = NULL;
1112}
1113
1114static int
1115bnad_mem_alloc(struct bnad *bnad,
1116 struct bna_mem_info *mem_info)
1117{
1118 int i;
1119 dma_addr_t dma_pa;
1120
1121 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1122 mem_info->mdl = NULL;
1123 return 0;
1124 }
1125
1126 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1127 GFP_KERNEL);
1128 if (mem_info->mdl == NULL)
1129 return -ENOMEM;
1130
1131 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1132 for (i = 0; i < mem_info->num; i++) {
1133 mem_info->mdl[i].len = mem_info->len;
1134 mem_info->mdl[i].kva =
Ivan Vecera5ea74312011-02-02 04:37:02 +00001135 dma_alloc_coherent(&bnad->pcidev->dev,
1136 mem_info->len, &dma_pa,
1137 GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001138
1139 if (mem_info->mdl[i].kva == NULL)
1140 goto err_return;
1141
1142 BNA_SET_DMA_ADDR(dma_pa,
1143 &(mem_info->mdl[i].dma));
1144 }
1145 } else {
1146 for (i = 0; i < mem_info->num; i++) {
1147 mem_info->mdl[i].len = mem_info->len;
1148 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1149 GFP_KERNEL);
1150 if (mem_info->mdl[i].kva == NULL)
1151 goto err_return;
1152 }
1153 }
1154
1155 return 0;
1156
1157err_return:
1158 bnad_mem_free(bnad, mem_info);
1159 return -ENOMEM;
1160}
1161
1162/* Free IRQ for Mailbox */
1163static void
Rasesh Mody078086f2011-08-08 16:21:39 +00001164bnad_mbox_irq_free(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001165{
1166 int irq;
1167 unsigned long flags;
1168
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001169 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001170 bnad_disable_mbox_irq(bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001171 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001172
1173 irq = BNAD_GET_MBOX_IRQ(bnad);
Rasesh Modybe7fa322010-12-23 21:45:01 +00001174 free_irq(irq, bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001175}
1176
1177/*
1178 * Allocates IRQ for Mailbox, but keep it disabled
1179 * This will be enabled once we get the mbox enable callback
1180 * from bna
1181 */
1182static int
Rasesh Mody078086f2011-08-08 16:21:39 +00001183bnad_mbox_irq_alloc(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001184{
Rasesh Mody0120b992011-07-22 08:07:41 +00001185 int err = 0;
1186 unsigned long irq_flags, flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001187 u32 irq;
Rasesh Mody0120b992011-07-22 08:07:41 +00001188 irq_handler_t irq_handler;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001189
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001190 spin_lock_irqsave(&bnad->bna_lock, flags);
1191 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1192 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
Rasesh Mody8811e262011-07-22 08:07:44 +00001193 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Shyam Iyer82791712011-07-14 15:00:32 +00001194 irq_flags = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001195 } else {
1196 irq_handler = (irq_handler_t)bnad_isr;
1197 irq = bnad->pcidev->irq;
Shyam Iyer5f778982011-06-28 08:58:05 +00001198 irq_flags = IRQF_SHARED;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001199 }
Rasesh Mody8811e262011-07-22 08:07:44 +00001200
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001201 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001202 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1203
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001204 /*
1205 * Set the Mbox IRQ disable flag, so that the IRQ handler
1206 * called from request_irq() for SHARED IRQs do not execute
1207 */
1208 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1209
Rasesh Modybe7fa322010-12-23 21:45:01 +00001210 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1211
Shyam Iyer82791712011-07-14 15:00:32 +00001212 err = request_irq(irq, irq_handler, irq_flags,
Rasesh Modybe7fa322010-12-23 21:45:01 +00001213 bnad->mbox_irq_name, bnad);
Rasesh Modye2fa6f22010-10-05 15:46:04 +00001214
Rasesh Modybe7fa322010-12-23 21:45:01 +00001215 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001216}
1217
1218static void
1219bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1220{
1221 kfree(intr_info->idl);
1222 intr_info->idl = NULL;
1223}
1224
1225/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1226static int
1227bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
Rasesh Mody078086f2011-08-08 16:21:39 +00001228 u32 txrx_id, struct bna_intr_info *intr_info)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001229{
1230 int i, vector_start = 0;
1231 u32 cfg_flags;
1232 unsigned long flags;
1233
1234 spin_lock_irqsave(&bnad->bna_lock, flags);
1235 cfg_flags = bnad->cfg_flags;
1236 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1237
1238 if (cfg_flags & BNAD_CF_MSIX) {
1239 intr_info->intr_type = BNA_INTR_T_MSIX;
1240 intr_info->idl = kcalloc(intr_info->num,
1241 sizeof(struct bna_intr_descr),
1242 GFP_KERNEL);
1243 if (!intr_info->idl)
1244 return -ENOMEM;
1245
1246 switch (src) {
1247 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001248 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001249 break;
1250
1251 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001252 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1253 (bnad->num_tx * bnad->num_txq_per_tx) +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001254 txrx_id;
1255 break;
1256
1257 default:
1258 BUG();
1259 }
1260
1261 for (i = 0; i < intr_info->num; i++)
1262 intr_info->idl[i].vector = vector_start + i;
1263 } else {
1264 intr_info->intr_type = BNA_INTR_T_INTX;
1265 intr_info->num = 1;
1266 intr_info->idl = kcalloc(intr_info->num,
1267 sizeof(struct bna_intr_descr),
1268 GFP_KERNEL);
1269 if (!intr_info->idl)
1270 return -ENOMEM;
1271
1272 switch (src) {
1273 case BNAD_INTR_TX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001274 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001275 break;
1276
1277 case BNAD_INTR_RX:
Rasesh Mody8811e262011-07-22 08:07:44 +00001278 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001279 break;
1280 }
1281 }
1282 return 0;
1283}
1284
1285/**
1286 * NOTE: Should be called for MSIX only
1287 * Unregisters Tx MSIX vector(s) from the kernel
1288 */
1289static void
1290bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1291 int num_txqs)
1292{
1293 int i;
1294 int vector_num;
1295
1296 for (i = 0; i < num_txqs; i++) {
1297 if (tx_info->tcb[i] == NULL)
1298 continue;
1299
1300 vector_num = tx_info->tcb[i]->intr_vector;
1301 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1302 }
1303}
1304
1305/**
1306 * NOTE: Should be called for MSIX only
1307 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1308 */
1309static int
1310bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001311 u32 tx_id, int num_txqs)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001312{
1313 int i;
1314 int err;
1315 int vector_num;
1316
1317 for (i = 0; i < num_txqs; i++) {
1318 vector_num = tx_info->tcb[i]->intr_vector;
1319 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1320 tx_id + tx_info->tcb[i]->id);
1321 err = request_irq(bnad->msix_table[vector_num].vector,
1322 (irq_handler_t)bnad_msix_tx, 0,
1323 tx_info->tcb[i]->name,
1324 tx_info->tcb[i]);
1325 if (err)
1326 goto err_return;
1327 }
1328
1329 return 0;
1330
1331err_return:
1332 if (i > 0)
1333 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1334 return -1;
1335}
1336
1337/**
1338 * NOTE: Should be called for MSIX only
1339 * Unregisters Rx MSIX vector(s) from the kernel
1340 */
1341static void
1342bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1343 int num_rxps)
1344{
1345 int i;
1346 int vector_num;
1347
1348 for (i = 0; i < num_rxps; i++) {
1349 if (rx_info->rx_ctrl[i].ccb == NULL)
1350 continue;
1351
1352 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1353 free_irq(bnad->msix_table[vector_num].vector,
1354 rx_info->rx_ctrl[i].ccb);
1355 }
1356}
1357
1358/**
1359 * NOTE: Should be called for MSIX only
1360 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1361 */
1362static int
1363bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001364 u32 rx_id, int num_rxps)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001365{
1366 int i;
1367 int err;
1368 int vector_num;
1369
1370 for (i = 0; i < num_rxps; i++) {
1371 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1372 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1373 bnad->netdev->name,
1374 rx_id + rx_info->rx_ctrl[i].ccb->id);
1375 err = request_irq(bnad->msix_table[vector_num].vector,
1376 (irq_handler_t)bnad_msix_rx, 0,
1377 rx_info->rx_ctrl[i].ccb->name,
1378 rx_info->rx_ctrl[i].ccb);
1379 if (err)
1380 goto err_return;
1381 }
1382
1383 return 0;
1384
1385err_return:
1386 if (i > 0)
1387 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1388 return -1;
1389}
1390
1391/* Free Tx object Resources */
1392static void
1393bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1394{
1395 int i;
1396
1397 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1398 if (res_info[i].res_type == BNA_RES_T_MEM)
1399 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1400 else if (res_info[i].res_type == BNA_RES_T_INTR)
1401 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1402 }
1403}
1404
1405/* Allocates memory and interrupt resources for Tx object */
1406static int
1407bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
Rasesh Mody078086f2011-08-08 16:21:39 +00001408 u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001409{
1410 int i, err = 0;
1411
1412 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1413 if (res_info[i].res_type == BNA_RES_T_MEM)
1414 err = bnad_mem_alloc(bnad,
1415 &res_info[i].res_u.mem_info);
1416 else if (res_info[i].res_type == BNA_RES_T_INTR)
1417 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1418 &res_info[i].res_u.intr_info);
1419 if (err)
1420 goto err_return;
1421 }
1422 return 0;
1423
1424err_return:
1425 bnad_tx_res_free(bnad, res_info);
1426 return err;
1427}
1428
1429/* Free Rx object Resources */
1430static void
1431bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1432{
1433 int i;
1434
1435 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1436 if (res_info[i].res_type == BNA_RES_T_MEM)
1437 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1438 else if (res_info[i].res_type == BNA_RES_T_INTR)
1439 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1440 }
1441}
1442
1443/* Allocates memory and interrupt resources for Rx object */
1444static int
1445bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1446 uint rx_id)
1447{
1448 int i, err = 0;
1449
1450 /* All memory needs to be allocated before setup_ccbs */
1451 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1452 if (res_info[i].res_type == BNA_RES_T_MEM)
1453 err = bnad_mem_alloc(bnad,
1454 &res_info[i].res_u.mem_info);
1455 else if (res_info[i].res_type == BNA_RES_T_INTR)
1456 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1457 &res_info[i].res_u.intr_info);
1458 if (err)
1459 goto err_return;
1460 }
1461 return 0;
1462
1463err_return:
1464 bnad_rx_res_free(bnad, res_info);
1465 return err;
1466}
1467
1468/* Timer callbacks */
1469/* a) IOC timer */
1470static void
1471bnad_ioc_timeout(unsigned long data)
1472{
1473 struct bnad *bnad = (struct bnad *)data;
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001477 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001478 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1479}
1480
1481static void
1482bnad_ioc_hb_check(unsigned long data)
1483{
1484 struct bnad *bnad = (struct bnad *)data;
1485 unsigned long flags;
1486
1487 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001488 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001489 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1490}
1491
1492static void
Rasesh Mody1d32f762010-12-23 21:45:09 +00001493bnad_iocpf_timeout(unsigned long data)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001494{
1495 struct bnad *bnad = (struct bnad *)data;
1496 unsigned long flags;
1497
1498 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001499 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001500 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1501}
1502
1503static void
1504bnad_iocpf_sem_timeout(unsigned long data)
1505{
1506 struct bnad *bnad = (struct bnad *)data;
1507 unsigned long flags;
1508
1509 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001510 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001511 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1512}
1513
1514/*
1515 * All timer routines use bnad->bna_lock to protect against
1516 * the following race, which may occur in case of no locking:
Rasesh Mody0120b992011-07-22 08:07:41 +00001517 * Time CPU m CPU n
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001518 * 0 1 = test_bit
1519 * 1 clear_bit
1520 * 2 del_timer_sync
1521 * 3 mod_timer
1522 */
1523
1524/* b) Dynamic Interrupt Moderation Timer */
1525static void
1526bnad_dim_timeout(unsigned long data)
1527{
1528 struct bnad *bnad = (struct bnad *)data;
1529 struct bnad_rx_info *rx_info;
1530 struct bnad_rx_ctrl *rx_ctrl;
1531 int i, j;
1532 unsigned long flags;
1533
1534 if (!netif_carrier_ok(bnad->netdev))
1535 return;
1536
1537 spin_lock_irqsave(&bnad->bna_lock, flags);
1538 for (i = 0; i < bnad->num_rx; i++) {
1539 rx_info = &bnad->rx_info[i];
1540 if (!rx_info->rx)
1541 continue;
1542 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1543 rx_ctrl = &rx_info->rx_ctrl[j];
1544 if (!rx_ctrl->ccb)
1545 continue;
1546 bna_rx_dim_update(rx_ctrl->ccb);
1547 }
1548 }
1549
1550 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1551 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1552 mod_timer(&bnad->dim_timer,
1553 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1554 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1555}
1556
1557/* c) Statistics Timer */
1558static void
1559bnad_stats_timeout(unsigned long data)
1560{
1561 struct bnad *bnad = (struct bnad *)data;
1562 unsigned long flags;
1563
1564 if (!netif_running(bnad->netdev) ||
1565 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1566 return;
1567
1568 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00001569 bna_hw_stats_get(&bnad->bna);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001570 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1571}
1572
1573/*
1574 * Set up timer for DIM
1575 * Called with bnad->bna_lock held
1576 */
1577void
1578bnad_dim_timer_start(struct bnad *bnad)
1579{
1580 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1581 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1582 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1583 (unsigned long)bnad);
1584 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1585 mod_timer(&bnad->dim_timer,
1586 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1587 }
1588}
1589
1590/*
1591 * Set up timer for statistics
1592 * Called with mutex_lock(&bnad->conf_mutex) held
1593 */
1594static void
1595bnad_stats_timer_start(struct bnad *bnad)
1596{
1597 unsigned long flags;
1598
1599 spin_lock_irqsave(&bnad->bna_lock, flags);
1600 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1601 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1602 (unsigned long)bnad);
1603 mod_timer(&bnad->stats_timer,
1604 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1605 }
1606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001607}
1608
1609/*
1610 * Stops the stats timer
1611 * Called with mutex_lock(&bnad->conf_mutex) held
1612 */
1613static void
1614bnad_stats_timer_stop(struct bnad *bnad)
1615{
1616 int to_del = 0;
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&bnad->bna_lock, flags);
1620 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1621 to_del = 1;
1622 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1623 if (to_del)
1624 del_timer_sync(&bnad->stats_timer);
1625}
1626
1627/* Utilities */
1628
1629static void
1630bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1631{
1632 int i = 1; /* Index 0 has broadcast address */
1633 struct netdev_hw_addr *mc_addr;
1634
1635 netdev_for_each_mc_addr(mc_addr, netdev) {
1636 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1637 ETH_ALEN);
1638 i++;
1639 }
1640}
1641
1642static int
1643bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1644{
1645 struct bnad_rx_ctrl *rx_ctrl =
1646 container_of(napi, struct bnad_rx_ctrl, napi);
Rasesh Mody2be67142011-08-30 15:27:39 +00001647 struct bnad *bnad = rx_ctrl->bnad;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001648 int rcvd = 0;
1649
Rasesh Mody271e8b72011-08-30 15:27:40 +00001650 rx_ctrl->rx_poll_ctr++;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001651
1652 if (!netif_carrier_ok(bnad->netdev))
1653 goto poll_exit;
1654
Rasesh Mody2be67142011-08-30 15:27:39 +00001655 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001656 if (rcvd >= budget)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001657 return rcvd;
1658
1659poll_exit:
Rasesh Mody19dbff92011-08-30 15:27:41 +00001660 napi_complete(napi);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001661
Rasesh Mody271e8b72011-08-30 15:27:40 +00001662 rx_ctrl->rx_complete++;
Rasesh Mody2be67142011-08-30 15:27:39 +00001663
1664 if (rx_ctrl->ccb)
Rasesh Mody271e8b72011-08-30 15:27:40 +00001665 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1666
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001667 return rcvd;
1668}
1669
Rasesh Mody2be67142011-08-30 15:27:39 +00001670#define BNAD_NAPI_POLL_QUOTA 64
1671static void
1672bnad_napi_init(struct bnad *bnad, u32 rx_id)
1673{
1674 struct bnad_rx_ctrl *rx_ctrl;
1675 int i;
1676
1677 /* Initialize & enable NAPI */
1678 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1679 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1680 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1681 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1682 }
1683}
1684
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001685static void
1686bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1687{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001688 struct bnad_rx_ctrl *rx_ctrl;
1689 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001690
1691 /* Initialize & enable NAPI */
1692 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1693 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
Rasesh Modybe7fa322010-12-23 21:45:01 +00001694
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001695 napi_enable(&rx_ctrl->napi);
1696 }
1697}
1698
1699static void
1700bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1701{
1702 int i;
1703
1704 /* First disable and then clean up */
1705 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1706 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1707 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1708 }
1709}
1710
1711/* Should be held with conf_lock held */
1712void
Rasesh Mody078086f2011-08-08 16:21:39 +00001713bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001714{
1715 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1716 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1717 unsigned long flags;
1718
1719 if (!tx_info->tx)
1720 return;
1721
1722 init_completion(&bnad->bnad_completions.tx_comp);
1723 spin_lock_irqsave(&bnad->bna_lock, flags);
1724 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1726 wait_for_completion(&bnad->bnad_completions.tx_comp);
1727
1728 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1729 bnad_tx_msix_unregister(bnad, tx_info,
1730 bnad->num_txq_per_tx);
1731
Rasesh Mody2be67142011-08-30 15:27:39 +00001732 if (0 == tx_id)
1733 tasklet_kill(&bnad->tx_free_tasklet);
1734
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001735 spin_lock_irqsave(&bnad->bna_lock, flags);
1736 bna_tx_destroy(tx_info->tx);
1737 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1738
1739 tx_info->tx = NULL;
Rasesh Mody078086f2011-08-08 16:21:39 +00001740 tx_info->tx_id = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001741
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001742 bnad_tx_res_free(bnad, res_info);
1743}
1744
1745/* Should be held with conf_lock held */
1746int
Rasesh Mody078086f2011-08-08 16:21:39 +00001747bnad_setup_tx(struct bnad *bnad, u32 tx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001748{
1749 int err;
1750 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1752 struct bna_intr_info *intr_info =
1753 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1754 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
stephen hemmingerd91d25d2011-09-16 11:09:51 +00001755 static const struct bna_tx_event_cbfn tx_cbfn = {
1756 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1757 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1758 .tx_stall_cbfn = bnad_cb_tx_stall,
1759 .tx_resume_cbfn = bnad_cb_tx_resume,
1760 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1761 };
1762
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001763 struct bna_tx *tx;
1764 unsigned long flags;
1765
Rasesh Mody078086f2011-08-08 16:21:39 +00001766 tx_info->tx_id = tx_id;
1767
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001768 /* Initialize the Tx object configuration */
1769 tx_config->num_txq = bnad->num_txq_per_tx;
1770 tx_config->txq_depth = bnad->txq_depth;
1771 tx_config->tx_type = BNA_TX_T_REGULAR;
Rasesh Mody078086f2011-08-08 16:21:39 +00001772 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001773
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001774 /* Get BNA's resource requirement for one tx object */
1775 spin_lock_irqsave(&bnad->bna_lock, flags);
1776 bna_tx_res_req(bnad->num_txq_per_tx,
1777 bnad->txq_depth, res_info);
1778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1779
1780 /* Fill Unmap Q memory requirements */
1781 BNAD_FILL_UNMAPQ_MEM_REQ(
1782 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1783 bnad->num_txq_per_tx,
1784 BNAD_TX_UNMAPQ_DEPTH);
1785
1786 /* Allocate resources */
1787 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1788 if (err)
1789 return err;
1790
1791 /* Ask BNA to create one Tx object, supplying required resources */
1792 spin_lock_irqsave(&bnad->bna_lock, flags);
1793 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1794 tx_info);
1795 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1796 if (!tx)
1797 goto err_return;
1798 tx_info->tx = tx;
1799
1800 /* Register ISR for the Tx object */
1801 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1802 err = bnad_tx_msix_register(bnad, tx_info,
1803 tx_id, bnad->num_txq_per_tx);
1804 if (err)
1805 goto err_return;
1806 }
1807
1808 spin_lock_irqsave(&bnad->bna_lock, flags);
1809 bna_tx_enable(tx);
1810 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1811
1812 return 0;
1813
1814err_return:
1815 bnad_tx_res_free(bnad, res_info);
1816 return err;
1817}
1818
1819/* Setup the rx config for bna_rx_create */
1820/* bnad decides the configuration */
1821static void
1822bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1823{
1824 rx_config->rx_type = BNA_RX_T_REGULAR;
1825 rx_config->num_paths = bnad->num_rxp_per_rx;
Rasesh Mody078086f2011-08-08 16:21:39 +00001826 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001827
1828 if (bnad->num_rxp_per_rx > 1) {
1829 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1830 rx_config->rss_config.hash_type =
Rasesh Mody078086f2011-08-08 16:21:39 +00001831 (BFI_ENET_RSS_IPV6 |
1832 BFI_ENET_RSS_IPV6_TCP |
1833 BFI_ENET_RSS_IPV4 |
1834 BFI_ENET_RSS_IPV4_TCP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001835 rx_config->rss_config.hash_mask =
1836 bnad->num_rxp_per_rx - 1;
1837 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1838 sizeof(rx_config->rss_config.toeplitz_hash_key));
1839 } else {
1840 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1841 memset(&rx_config->rss_config, 0,
1842 sizeof(rx_config->rss_config));
1843 }
1844 rx_config->rxp_type = BNA_RXP_SLR;
1845 rx_config->q_depth = bnad->rxq_depth;
1846
1847 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1848
1849 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1850}
1851
Rasesh Mody2be67142011-08-30 15:27:39 +00001852static void
1853bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1854{
1855 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1856 int i;
1857
1858 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1859 rx_info->rx_ctrl[i].bnad = bnad;
1860}
1861
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001862/* Called with mutex_lock(&bnad->conf_mutex) held */
1863void
Rasesh Mody078086f2011-08-08 16:21:39 +00001864bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001865{
1866 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1867 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1868 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1869 unsigned long flags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00001870 int to_del = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001871
1872 if (!rx_info->rx)
1873 return;
1874
1875 if (0 == rx_id) {
1876 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001877 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1878 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001879 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001880 to_del = 1;
1881 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001882 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody271e8b72011-08-30 15:27:40 +00001883 if (to_del)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001884 del_timer_sync(&bnad->dim_timer);
1885 }
1886
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001887 init_completion(&bnad->bnad_completions.rx_comp);
1888 spin_lock_irqsave(&bnad->bna_lock, flags);
1889 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1890 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1891 wait_for_completion(&bnad->bnad_completions.rx_comp);
1892
1893 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1894 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1895
Rasesh Mody2be67142011-08-30 15:27:39 +00001896 bnad_napi_disable(bnad, rx_id);
1897
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001898 spin_lock_irqsave(&bnad->bna_lock, flags);
1899 bna_rx_destroy(rx_info->rx);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001900
1901 rx_info->rx = NULL;
Rasesh Mody3caa1e952011-08-30 15:27:42 +00001902 rx_info->rx_id = 0;
Rasesh Modyb9fa1fb2011-09-16 15:06:48 +00001903 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001904
1905 bnad_rx_res_free(bnad, res_info);
1906}
1907
1908/* Called with mutex_lock(&bnad->conf_mutex) held */
1909int
Rasesh Mody078086f2011-08-08 16:21:39 +00001910bnad_setup_rx(struct bnad *bnad, u32 rx_id)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001911{
1912 int err;
1913 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1914 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1915 struct bna_intr_info *intr_info =
1916 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1917 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
stephen hemmingerd91d25d2011-09-16 11:09:51 +00001918 static const struct bna_rx_event_cbfn rx_cbfn = {
1919 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1920 .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1921 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1922 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
Rasesh Mody5bcf6ac2011-09-27 10:39:10 +00001923 .rx_stall_cbfn = bnad_cb_rx_stall,
stephen hemmingerd91d25d2011-09-16 11:09:51 +00001924 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1925 .rx_post_cbfn = bnad_cb_rx_post,
1926 };
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001927 struct bna_rx *rx;
1928 unsigned long flags;
1929
Rasesh Mody078086f2011-08-08 16:21:39 +00001930 rx_info->rx_id = rx_id;
1931
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001932 /* Initialize the Rx object configuration */
1933 bnad_init_rx_config(bnad, rx_config);
1934
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001935 /* Get BNA's resource requirement for one Rx object */
1936 spin_lock_irqsave(&bnad->bna_lock, flags);
1937 bna_rx_res_req(rx_config, res_info);
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939
1940 /* Fill Unmap Q memory requirements */
1941 BNAD_FILL_UNMAPQ_MEM_REQ(
1942 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1943 rx_config->num_paths +
1944 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1945 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1946
1947 /* Allocate resource */
1948 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1949 if (err)
1950 return err;
1951
Rasesh Mody2be67142011-08-30 15:27:39 +00001952 bnad_rx_ctrl_init(bnad, rx_id);
1953
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001954 /* Ask BNA to create one Rx object, supplying required resources */
1955 spin_lock_irqsave(&bnad->bna_lock, flags);
1956 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1957 rx_info);
Rasesh Mody3caa1e952011-08-30 15:27:42 +00001958 if (!rx) {
1959 err = -ENOMEM;
Rasesh Modyb9fa1fb2011-09-16 15:06:48 +00001960 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001961 goto err_return;
Rasesh Mody3caa1e952011-08-30 15:27:42 +00001962 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001963 rx_info->rx = rx;
Rasesh Modyb9fa1fb2011-09-16 15:06:48 +00001964 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001965
Rasesh Mody2be67142011-08-30 15:27:39 +00001966 /*
1967 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1968 * so that IRQ handler cannot schedule NAPI at this point.
1969 */
1970 bnad_napi_init(bnad, rx_id);
1971
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001972 /* Register ISR for the Rx object */
1973 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1974 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1975 rx_config->num_paths);
1976 if (err)
1977 goto err_return;
1978 }
1979
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001980 spin_lock_irqsave(&bnad->bna_lock, flags);
1981 if (0 == rx_id) {
1982 /* Set up Dynamic Interrupt Moderation Vector */
1983 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1984 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1985
1986 /* Enable VLAN filtering only on the default Rx */
1987 bna_rx_vlanfilter_enable(rx);
1988
1989 /* Start the DIM timer */
1990 bnad_dim_timer_start(bnad);
1991 }
1992
1993 bna_rx_enable(rx);
1994 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1995
Rasesh Mody2be67142011-08-30 15:27:39 +00001996 /* Enable scheduling of NAPI */
1997 bnad_napi_enable(bnad, rx_id);
1998
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001999 return 0;
2000
2001err_return:
2002 bnad_cleanup_rx(bnad, rx_id);
2003 return err;
2004}
2005
2006/* Called with conf_lock & bnad->bna_lock held */
2007void
2008bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2009{
2010 struct bnad_tx_info *tx_info;
2011
2012 tx_info = &bnad->tx_info[0];
2013 if (!tx_info->tx)
2014 return;
2015
2016 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2017}
2018
2019/* Called with conf_lock & bnad->bna_lock held */
2020void
2021bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2022{
2023 struct bnad_rx_info *rx_info;
Rasesh Mody0120b992011-07-22 08:07:41 +00002024 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002025
2026 for (i = 0; i < bnad->num_rx; i++) {
2027 rx_info = &bnad->rx_info[i];
2028 if (!rx_info->rx)
2029 continue;
2030 bna_rx_coalescing_timeo_set(rx_info->rx,
2031 bnad->rx_coalescing_timeo);
2032 }
2033}
2034
2035/*
2036 * Called with bnad->bna_lock held
2037 */
Rasesh Modya2122d92011-08-30 15:27:43 +00002038int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002039bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2040{
2041 int ret;
2042
2043 if (!is_valid_ether_addr(mac_addr))
2044 return -EADDRNOTAVAIL;
2045
2046 /* If datapath is down, pretend everything went through */
2047 if (!bnad->rx_info[0].rx)
2048 return 0;
2049
2050 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2051 if (ret != BNA_CB_SUCCESS)
2052 return -EADDRNOTAVAIL;
2053
2054 return 0;
2055}
2056
2057/* Should be called with conf_lock held */
Rasesh Modya2122d92011-08-30 15:27:43 +00002058int
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002059bnad_enable_default_bcast(struct bnad *bnad)
2060{
2061 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2062 int ret;
2063 unsigned long flags;
2064
2065 init_completion(&bnad->bnad_completions.mcast_comp);
2066
2067 spin_lock_irqsave(&bnad->bna_lock, flags);
2068 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2069 bnad_cb_rx_mcast_add);
2070 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2071
2072 if (ret == BNA_CB_SUCCESS)
2073 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2074 else
2075 return -ENODEV;
2076
2077 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2078 return -ENODEV;
2079
2080 return 0;
2081}
2082
Rasesh Mody19dbff92011-08-30 15:27:41 +00002083/* Called with mutex_lock(&bnad->conf_mutex) held */
Rasesh Modya2122d92011-08-30 15:27:43 +00002084void
Rasesh Modyaad75b62010-12-23 21:45:08 +00002085bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2086{
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002087 u16 vid;
Rasesh Modyaad75b62010-12-23 21:45:08 +00002088 unsigned long flags;
2089
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002090 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
Rasesh Modyaad75b62010-12-23 21:45:08 +00002091 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002092 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
Rasesh Modyaad75b62010-12-23 21:45:08 +00002093 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2094 }
2095}
2096
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002097/* Statistics utilities */
2098void
Eric Dumazet250e0612010-09-02 12:45:02 -07002099bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002100{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002101 int i, j;
2102
2103 for (i = 0; i < bnad->num_rx; i++) {
2104 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2105 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002106 stats->rx_packets += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002107 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002108 stats->rx_bytes += bnad->rx_info[i].
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002109 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2110 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2111 bnad->rx_info[i].rx_ctrl[j].ccb->
2112 rcb[1]->rxq) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002113 stats->rx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002114 bnad->rx_info[i].rx_ctrl[j].
2115 ccb->rcb[1]->rxq->rx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002116 stats->rx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002117 bnad->rx_info[i].rx_ctrl[j].
2118 ccb->rcb[1]->rxq->rx_bytes;
2119 }
2120 }
2121 }
2122 }
2123 for (i = 0; i < bnad->num_tx; i++) {
2124 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2125 if (bnad->tx_info[i].tcb[j]) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002126 stats->tx_packets +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002127 bnad->tx_info[i].tcb[j]->txq->tx_packets;
Eric Dumazet250e0612010-09-02 12:45:02 -07002128 stats->tx_bytes +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002129 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2130 }
2131 }
2132 }
2133}
2134
2135/*
2136 * Must be called with the bna_lock held.
2137 */
2138void
Eric Dumazet250e0612010-09-02 12:45:02 -07002139bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002140{
Rasesh Mody078086f2011-08-08 16:21:39 +00002141 struct bfi_enet_stats_mac *mac_stats;
2142 u32 bmap;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002143 int i;
2144
Rasesh Mody078086f2011-08-08 16:21:39 +00002145 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
Eric Dumazet250e0612010-09-02 12:45:02 -07002146 stats->rx_errors =
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002147 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2148 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2149 mac_stats->rx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002150 stats->tx_errors = mac_stats->tx_fcs_error +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002151 mac_stats->tx_undersize;
Eric Dumazet250e0612010-09-02 12:45:02 -07002152 stats->rx_dropped = mac_stats->rx_drop;
2153 stats->tx_dropped = mac_stats->tx_drop;
2154 stats->multicast = mac_stats->rx_multicast;
2155 stats->collisions = mac_stats->tx_total_collision;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002156
Eric Dumazet250e0612010-09-02 12:45:02 -07002157 stats->rx_length_errors = mac_stats->rx_frame_length_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002158
2159 /* receive ring buffer overflow ?? */
2160
Eric Dumazet250e0612010-09-02 12:45:02 -07002161 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2162 stats->rx_frame_errors = mac_stats->rx_alignment_error;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002163 /* recv'r fifo overrun */
Rasesh Mody078086f2011-08-08 16:21:39 +00002164 bmap = bna_rx_rid_mask(&bnad->bna);
2165 for (i = 0; bmap; i++) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002166 if (bmap & 1) {
Eric Dumazet250e0612010-09-02 12:45:02 -07002167 stats->rx_fifo_errors +=
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002168 bnad->stats.bna_stats->
Rasesh Mody078086f2011-08-08 16:21:39 +00002169 hw_stats.rxf_stats[i].frame_drops;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002170 break;
2171 }
2172 bmap >>= 1;
2173 }
2174}
2175
2176static void
2177bnad_mbox_irq_sync(struct bnad *bnad)
2178{
2179 u32 irq;
2180 unsigned long flags;
2181
2182 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 if (bnad->cfg_flags & BNAD_CF_MSIX)
Rasesh Mody8811e262011-07-22 08:07:44 +00002184 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002185 else
2186 irq = bnad->pcidev->irq;
2187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188
2189 synchronize_irq(irq);
2190}
2191
2192/* Utility used by bnad_start_xmit, for doing TSO */
2193static int
2194bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2195{
2196 int err;
2197
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002198 if (skb_header_cloned(skb)) {
2199 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2200 if (err) {
2201 BNAD_UPDATE_CTR(bnad, tso_err);
2202 return err;
2203 }
2204 }
2205
2206 /*
2207 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2208 * excluding the length field.
2209 */
2210 if (skb->protocol == htons(ETH_P_IP)) {
2211 struct iphdr *iph = ip_hdr(skb);
2212
2213 /* Do we really need these? */
2214 iph->tot_len = 0;
2215 iph->check = 0;
2216
2217 tcp_hdr(skb)->check =
2218 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2219 IPPROTO_TCP, 0);
2220 BNAD_UPDATE_CTR(bnad, tso4);
2221 } else {
2222 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2223
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002224 ipv6h->payload_len = 0;
2225 tcp_hdr(skb)->check =
2226 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2227 IPPROTO_TCP, 0);
2228 BNAD_UPDATE_CTR(bnad, tso6);
2229 }
2230
2231 return 0;
2232}
2233
2234/*
2235 * Initialize Q numbers depending on Rx Paths
2236 * Called with bnad->bna_lock held, because of cfg_flags
2237 * access.
2238 */
2239static void
2240bnad_q_num_init(struct bnad *bnad)
2241{
2242 int rxps;
2243
2244 rxps = min((uint)num_online_cpus(),
Rasesh Mody772b5232011-08-30 15:27:37 +00002245 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002246
2247 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2248 rxps = 1; /* INTx */
2249
2250 bnad->num_rx = 1;
2251 bnad->num_tx = 1;
2252 bnad->num_rxp_per_rx = rxps;
2253 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2254}
2255
2256/*
2257 * Adjusts the Q numbers, given a number of msix vectors
2258 * Give preference to RSS as opposed to Tx priority Queues,
2259 * in such a case, just use 1 Tx Q
2260 * Called with bnad->bna_lock held b'cos of cfg_flags access
2261 */
2262static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002263bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002264{
2265 bnad->num_txq_per_tx = 1;
2266 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2267 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2268 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2269 bnad->num_rxp_per_rx = msix_vectors -
2270 (bnad->num_tx * bnad->num_txq_per_tx) -
2271 BNAD_MAILBOX_MSIX_VECTORS;
2272 } else
2273 bnad->num_rxp_per_rx = 1;
2274}
2275
Rasesh Mody078086f2011-08-08 16:21:39 +00002276/* Enable / disable ioceth */
2277static int
2278bnad_ioceth_disable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002279{
2280 unsigned long flags;
Rasesh Mody078086f2011-08-08 16:21:39 +00002281 int err = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002282
2283 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002284 init_completion(&bnad->bnad_completions.ioc_comp);
2285 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002286 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2287
Rasesh Mody078086f2011-08-08 16:21:39 +00002288 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2289 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2290
2291 err = bnad->bnad_completions.ioc_comp_status;
2292 return err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002293}
2294
2295static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002296bnad_ioceth_enable(struct bnad *bnad)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002297{
2298 int err = 0;
2299 unsigned long flags;
2300
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002301 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002302 init_completion(&bnad->bnad_completions.ioc_comp);
2303 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2304 bna_ioceth_enable(&bnad->bna.ioceth);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002305 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2306
Rasesh Mody078086f2011-08-08 16:21:39 +00002307 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2308 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002309
Rasesh Mody078086f2011-08-08 16:21:39 +00002310 err = bnad->bnad_completions.ioc_comp_status;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002311
2312 return err;
2313}
2314
2315/* Free BNA resources */
2316static void
Rasesh Mody078086f2011-08-08 16:21:39 +00002317bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2318 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002319{
2320 int i;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002321
Rasesh Mody078086f2011-08-08 16:21:39 +00002322 for (i = 0; i < res_val_max; i++)
2323 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002324}
2325
2326/* Allocates memory and interrupt resources for BNA */
2327static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002328bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2329 u32 res_val_max)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002330{
2331 int i, err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002332
Rasesh Mody078086f2011-08-08 16:21:39 +00002333 for (i = 0; i < res_val_max; i++) {
2334 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002335 if (err)
2336 goto err_return;
2337 }
2338 return 0;
2339
2340err_return:
Rasesh Mody078086f2011-08-08 16:21:39 +00002341 bnad_res_free(bnad, res_info, res_val_max);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002342 return err;
2343}
2344
2345/* Interrupt enable / disable */
2346static void
2347bnad_enable_msix(struct bnad *bnad)
2348{
2349 int i, ret;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002350 unsigned long flags;
2351
2352 spin_lock_irqsave(&bnad->bna_lock, flags);
2353 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2354 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2355 return;
2356 }
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358
2359 if (bnad->msix_table)
2360 return;
2361
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002362 bnad->msix_table =
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002363 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002364
2365 if (!bnad->msix_table)
2366 goto intx_mode;
2367
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002368 for (i = 0; i < bnad->msix_num; i++)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002369 bnad->msix_table[i].entry = i;
2370
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002371 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002372 if (ret > 0) {
2373 /* Not enough MSI-X vectors. */
Rasesh Mody19dbff92011-08-30 15:27:41 +00002374 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2375 ret, bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002376
2377 spin_lock_irqsave(&bnad->bna_lock, flags);
2378 /* ret = #of vectors that we got */
Rasesh Mody271e8b72011-08-30 15:27:40 +00002379 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2380 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2382
Rasesh Mody271e8b72011-08-30 15:27:40 +00002383 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002384 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002385
Rasesh Mody078086f2011-08-08 16:21:39 +00002386 if (bnad->msix_num > ret)
2387 goto intx_mode;
2388
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002389 /* Try once more with adjusted numbers */
2390 /* If this fails, fall back to INTx */
2391 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
Rasesh Modyb7ee31c52010-10-05 15:46:05 +00002392 bnad->msix_num);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002393 if (ret)
2394 goto intx_mode;
2395
2396 } else if (ret < 0)
2397 goto intx_mode;
Rasesh Mody078086f2011-08-08 16:21:39 +00002398
2399 pci_intx(bnad->pcidev, 0);
2400
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002401 return;
2402
2403intx_mode:
Rasesh Mody19dbff92011-08-30 15:27:41 +00002404 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002405
2406 kfree(bnad->msix_table);
2407 bnad->msix_table = NULL;
2408 bnad->msix_num = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002409 spin_lock_irqsave(&bnad->bna_lock, flags);
2410 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2411 bnad_q_num_init(bnad);
2412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2413}
2414
2415static void
2416bnad_disable_msix(struct bnad *bnad)
2417{
2418 u32 cfg_flags;
2419 unsigned long flags;
2420
2421 spin_lock_irqsave(&bnad->bna_lock, flags);
2422 cfg_flags = bnad->cfg_flags;
2423 if (bnad->cfg_flags & BNAD_CF_MSIX)
2424 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2425 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2426
2427 if (cfg_flags & BNAD_CF_MSIX) {
2428 pci_disable_msix(bnad->pcidev);
2429 kfree(bnad->msix_table);
2430 bnad->msix_table = NULL;
2431 }
2432}
2433
2434/* Netdev entry points */
2435static int
2436bnad_open(struct net_device *netdev)
2437{
2438 int err;
2439 struct bnad *bnad = netdev_priv(netdev);
2440 struct bna_pause_config pause_config;
2441 int mtu;
2442 unsigned long flags;
2443
2444 mutex_lock(&bnad->conf_mutex);
2445
2446 /* Tx */
2447 err = bnad_setup_tx(bnad, 0);
2448 if (err)
2449 goto err_return;
2450
2451 /* Rx */
2452 err = bnad_setup_rx(bnad, 0);
2453 if (err)
2454 goto cleanup_tx;
2455
2456 /* Port */
2457 pause_config.tx_pause = 0;
2458 pause_config.rx_pause = 0;
2459
Rasesh Mody078086f2011-08-08 16:21:39 +00002460 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002461
2462 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002463 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2464 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2465 bna_enet_enable(&bnad->bna.enet);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002466 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2467
2468 /* Enable broadcast */
2469 bnad_enable_default_bcast(bnad);
2470
Rasesh Modyaad75b62010-12-23 21:45:08 +00002471 /* Restore VLANs, if any */
2472 bnad_restore_vlans(bnad, 0);
2473
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002474 /* Set the UCAST address */
2475 spin_lock_irqsave(&bnad->bna_lock, flags);
2476 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2477 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2478
2479 /* Start the stats timer */
2480 bnad_stats_timer_start(bnad);
2481
2482 mutex_unlock(&bnad->conf_mutex);
2483
2484 return 0;
2485
2486cleanup_tx:
2487 bnad_cleanup_tx(bnad, 0);
2488
2489err_return:
2490 mutex_unlock(&bnad->conf_mutex);
2491 return err;
2492}
2493
2494static int
2495bnad_stop(struct net_device *netdev)
2496{
2497 struct bnad *bnad = netdev_priv(netdev);
2498 unsigned long flags;
2499
2500 mutex_lock(&bnad->conf_mutex);
2501
2502 /* Stop the stats timer */
2503 bnad_stats_timer_stop(bnad);
2504
Rasesh Mody078086f2011-08-08 16:21:39 +00002505 init_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002506
2507 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00002508 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2509 bnad_cb_enet_disabled);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002510 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2511
Rasesh Mody078086f2011-08-08 16:21:39 +00002512 wait_for_completion(&bnad->bnad_completions.enet_comp);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002513
2514 bnad_cleanup_tx(bnad, 0);
2515 bnad_cleanup_rx(bnad, 0);
2516
2517 /* Synchronize mailbox IRQ */
2518 bnad_mbox_irq_sync(bnad);
2519
2520 mutex_unlock(&bnad->conf_mutex);
2521
2522 return 0;
2523}
2524
2525/* TX */
2526/*
2527 * bnad_start_xmit : Netdev entry point for Transmit
2528 * Called under lock held by net_device
2529 */
2530static netdev_tx_t
2531bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2532{
2533 struct bnad *bnad = netdev_priv(netdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00002534 u32 txq_id = 0;
2535 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002536
Rasesh Mody0120b992011-07-22 08:07:41 +00002537 u16 txq_prod, vlan_tag = 0;
2538 u32 unmap_prod, wis, wis_used, wi_range;
2539 u32 vectors, vect_id, i, acked;
Rasesh Mody0120b992011-07-22 08:07:41 +00002540 int err;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002541 unsigned int len;
2542 u32 gso_size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002543
Rasesh Mody078086f2011-08-08 16:21:39 +00002544 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
Rasesh Mody0120b992011-07-22 08:07:41 +00002545 dma_addr_t dma_addr;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002546 struct bna_txq_entry *txqent;
Rasesh Mody078086f2011-08-08 16:21:39 +00002547 u16 flags;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002548
Rasesh Mody271e8b72011-08-30 15:27:40 +00002549 if (unlikely(skb->len <= ETH_HLEN)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002550 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002551 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2552 return NETDEV_TX_OK;
2553 }
2554 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2555 dev_kfree_skb(skb);
2556 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2557 return NETDEV_TX_OK;
2558 }
2559 if (unlikely(skb_headlen(skb) == 0)) {
2560 dev_kfree_skb(skb);
2561 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002562 return NETDEV_TX_OK;
2563 }
2564
Rasesh Modybe7fa322010-12-23 21:45:01 +00002565 /*
2566 * Takes care of the Tx that is scheduled between clearing the flag
Rasesh Mody19dbff92011-08-30 15:27:41 +00002567 * and the netif_tx_stop_all_queues() call.
Rasesh Modybe7fa322010-12-23 21:45:01 +00002568 */
2569 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2570 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002571 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002572 return NETDEV_TX_OK;
2573 }
2574
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002575 vectors = 1 + skb_shinfo(skb)->nr_frags;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002576 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002577 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002578 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002579 return NETDEV_TX_OK;
2580 }
2581 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2582 acked = 0;
Rasesh Mody078086f2011-08-08 16:21:39 +00002583 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2584 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002585 if ((u16) (*tcb->hw_consumer_index) !=
2586 tcb->consumer_index &&
2587 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2588 acked = bnad_free_txbufs(bnad, tcb);
Rasesh Modybe7fa322010-12-23 21:45:01 +00002589 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2590 bna_ib_ack(tcb->i_dbell, acked);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002591 smp_mb__before_clear_bit();
2592 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2593 } else {
2594 netif_stop_queue(netdev);
2595 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2596 }
2597
2598 smp_mb();
2599 /*
2600 * Check again to deal with race condition between
2601 * netif_stop_queue here, and netif_wake_queue in
2602 * interrupt handler which is not inside netif tx lock.
2603 */
2604 if (likely
2605 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2606 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2607 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2608 return NETDEV_TX_BUSY;
2609 } else {
2610 netif_wake_queue(netdev);
2611 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2612 }
2613 }
2614
2615 unmap_prod = unmap_q->producer_index;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002616 flags = 0;
2617
2618 txq_prod = tcb->producer_index;
2619 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002620 txqent->hdr.wi.reserved = 0;
2621 txqent->hdr.wi.num_vectors = vectors;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002622
Jesse Grosseab6d182010-10-20 13:56:03 +00002623 if (vlan_tx_tag_present(skb)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002624 vlan_tag = (u16) vlan_tx_tag_get(skb);
2625 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2626 }
2627 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2628 vlan_tag =
2629 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2630 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2631 }
2632
2633 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2634
2635 if (skb_is_gso(skb)) {
Rasesh Mody271e8b72011-08-30 15:27:40 +00002636 gso_size = skb_shinfo(skb)->gso_size;
2637
2638 if (unlikely(gso_size > netdev->mtu)) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002639 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002640 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002641 return NETDEV_TX_OK;
2642 }
Rasesh Mody271e8b72011-08-30 15:27:40 +00002643 if (unlikely((gso_size + skb_transport_offset(skb) +
2644 tcp_hdrlen(skb)) >= skb->len)) {
2645 txqent->hdr.wi.opcode =
2646 __constant_htons(BNA_TXQ_WI_SEND);
2647 txqent->hdr.wi.lso_mss = 0;
2648 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2649 } else {
2650 txqent->hdr.wi.opcode =
2651 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2652 txqent->hdr.wi.lso_mss = htons(gso_size);
2653 }
2654
2655 err = bnad_tso_prepare(bnad, skb);
2656 if (unlikely(err)) {
2657 dev_kfree_skb(skb);
2658 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2659 return NETDEV_TX_OK;
2660 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002661 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2662 txqent->hdr.wi.l4_hdr_size_n_offset =
2663 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2664 (tcp_hdrlen(skb) >> 2,
2665 skb_transport_offset(skb)));
Rasesh Mody271e8b72011-08-30 15:27:40 +00002666 } else {
2667 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002668 txqent->hdr.wi.lso_mss = 0;
2669
Rasesh Mody271e8b72011-08-30 15:27:40 +00002670 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2671 dev_kfree_skb(skb);
2672 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2673 return NETDEV_TX_OK;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002674 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002675
Rasesh Mody271e8b72011-08-30 15:27:40 +00002676 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2677 u8 proto = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002678
Rasesh Mody271e8b72011-08-30 15:27:40 +00002679 if (skb->protocol == __constant_htons(ETH_P_IP))
2680 proto = ip_hdr(skb)->protocol;
2681 else if (skb->protocol ==
2682 __constant_htons(ETH_P_IPV6)) {
2683 /* nexthdr may not be TCP immediately. */
2684 proto = ipv6_hdr(skb)->nexthdr;
2685 }
2686 if (proto == IPPROTO_TCP) {
2687 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2688 txqent->hdr.wi.l4_hdr_size_n_offset =
2689 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2690 (0, skb_transport_offset(skb)));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002691
Rasesh Mody271e8b72011-08-30 15:27:40 +00002692 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002693
Rasesh Mody271e8b72011-08-30 15:27:40 +00002694 if (unlikely(skb_headlen(skb) <
2695 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2696 dev_kfree_skb(skb);
2697 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2698 return NETDEV_TX_OK;
2699 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002700
Rasesh Mody271e8b72011-08-30 15:27:40 +00002701 } else if (proto == IPPROTO_UDP) {
2702 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2703 txqent->hdr.wi.l4_hdr_size_n_offset =
2704 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2705 (0, skb_transport_offset(skb)));
2706
2707 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2708 if (unlikely(skb_headlen(skb) <
2709 skb_transport_offset(skb) +
2710 sizeof(struct udphdr))) {
2711 dev_kfree_skb(skb);
2712 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2713 return NETDEV_TX_OK;
2714 }
2715 } else {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002716 dev_kfree_skb(skb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002717 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002718 return NETDEV_TX_OK;
2719 }
Rasesh Mody271e8b72011-08-30 15:27:40 +00002720 } else {
2721 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002722 }
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002723 }
2724
2725 txqent->hdr.wi.flags = htons(flags);
2726
2727 txqent->hdr.wi.frame_length = htonl(skb->len);
2728
2729 unmap_q->unmap_array[unmap_prod].skb = skb;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002730 len = skb_headlen(skb);
2731 txqent->vector[0].length = htons(len);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002732 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2733 skb_headlen(skb), DMA_TO_DEVICE);
2734 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002735 dma_addr);
2736
Rasesh Mody271e8b72011-08-30 15:27:40 +00002737 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002738 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2739
Rasesh Mody271e8b72011-08-30 15:27:40 +00002740 vect_id = 0;
2741 wis_used = 1;
2742
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002743 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2744 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Rasesh Mody078086f2011-08-08 16:21:39 +00002745 u16 size = frag->size;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002746
Rasesh Mody271e8b72011-08-30 15:27:40 +00002747 if (unlikely(size == 0)) {
2748 unmap_prod = unmap_q->producer_index;
2749
2750 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2751 unmap_q->unmap_array,
2752 unmap_prod, unmap_q->q_depth, skb,
2753 i);
2754 dev_kfree_skb(skb);
2755 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2756 return NETDEV_TX_OK;
2757 }
2758
2759 len += size;
2760
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002761 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2762 vect_id = 0;
2763 if (--wi_range)
2764 txqent++;
2765 else {
2766 BNA_QE_INDX_ADD(txq_prod, wis_used,
2767 tcb->q_depth);
2768 wis_used = 0;
2769 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2770 txqent, wi_range);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002771 }
2772 wis_used++;
Rasesh Mody271e8b72011-08-30 15:27:40 +00002773 txqent->hdr.wi_ext.opcode =
2774 __constant_htons(BNA_TXQ_WI_EXTENSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002775 }
2776
2777 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2778 txqent->vector[vect_id].length = htons(size);
Ian Campbell4d5b1a62011-08-29 23:18:24 +00002779 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2780 0, size, DMA_TO_DEVICE);
Ivan Vecera5ea74312011-02-02 04:37:02 +00002781 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002782 dma_addr);
2783 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2784 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2785 }
2786
Rasesh Mody271e8b72011-08-30 15:27:40 +00002787 if (unlikely(len != skb->len)) {
2788 unmap_prod = unmap_q->producer_index;
2789
2790 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2791 unmap_q->unmap_array, unmap_prod,
2792 unmap_q->q_depth, skb,
2793 skb_shinfo(skb)->nr_frags);
2794 dev_kfree_skb(skb);
2795 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2796 return NETDEV_TX_OK;
2797 }
2798
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002799 unmap_q->producer_index = unmap_prod;
2800 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2801 tcb->producer_index = txq_prod;
2802
2803 smp_mb();
Rasesh Modybe7fa322010-12-23 21:45:01 +00002804
2805 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2806 return NETDEV_TX_OK;
2807
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002808 bna_txq_prod_indx_doorbell(tcb);
Rasesh Mody271e8b72011-08-30 15:27:40 +00002809 smp_mb();
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002810
2811 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2812 tasklet_schedule(&bnad->tx_free_tasklet);
2813
2814 return NETDEV_TX_OK;
2815}
2816
2817/*
2818 * Used spin_lock to synchronize reading of stats structures, which
2819 * is written by BNA under the same lock.
2820 */
Eric Dumazet250e0612010-09-02 12:45:02 -07002821static struct rtnl_link_stats64 *
2822bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002823{
2824 struct bnad *bnad = netdev_priv(netdev);
2825 unsigned long flags;
2826
2827 spin_lock_irqsave(&bnad->bna_lock, flags);
2828
Eric Dumazet250e0612010-09-02 12:45:02 -07002829 bnad_netdev_qstats_fill(bnad, stats);
2830 bnad_netdev_hwstats_fill(bnad, stats);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002831
2832 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2833
Eric Dumazet250e0612010-09-02 12:45:02 -07002834 return stats;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002835}
2836
Rasesh Modya2122d92011-08-30 15:27:43 +00002837void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002838bnad_set_rx_mode(struct net_device *netdev)
2839{
2840 struct bnad *bnad = netdev_priv(netdev);
2841 u32 new_mask, valid_mask;
2842 unsigned long flags;
2843
2844 spin_lock_irqsave(&bnad->bna_lock, flags);
2845
2846 new_mask = valid_mask = 0;
2847
2848 if (netdev->flags & IFF_PROMISC) {
2849 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2850 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2851 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2852 bnad->cfg_flags |= BNAD_CF_PROMISC;
2853 }
2854 } else {
2855 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2856 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2857 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2858 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2859 }
2860 }
2861
2862 if (netdev->flags & IFF_ALLMULTI) {
2863 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2864 new_mask |= BNA_RXMODE_ALLMULTI;
2865 valid_mask |= BNA_RXMODE_ALLMULTI;
2866 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2867 }
2868 } else {
2869 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2870 new_mask &= ~BNA_RXMODE_ALLMULTI;
2871 valid_mask |= BNA_RXMODE_ALLMULTI;
2872 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2873 }
2874 }
2875
Rasesh Mody271e8b72011-08-30 15:27:40 +00002876 if (bnad->rx_info[0].rx == NULL)
2877 goto unlock;
2878
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002879 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2880
2881 if (!netdev_mc_empty(netdev)) {
2882 u8 *mcaddr_list;
2883 int mc_count = netdev_mc_count(netdev);
2884
2885 /* Index 0 holds the broadcast address */
2886 mcaddr_list =
2887 kzalloc((mc_count + 1) * ETH_ALEN,
2888 GFP_ATOMIC);
2889 if (!mcaddr_list)
Jiri Slabyca1cef32010-09-04 02:08:41 +00002890 goto unlock;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002891
2892 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2893
2894 /* Copy rest of the MC addresses */
2895 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2896
2897 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2898 mcaddr_list, NULL);
2899
2900 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2901 kfree(mcaddr_list);
2902 }
Jiri Slabyca1cef32010-09-04 02:08:41 +00002903unlock:
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002904 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2905}
2906
2907/*
2908 * bna_lock is used to sync writes to netdev->addr
2909 * conf_lock cannot be used since this call may be made
2910 * in a non-blocking context.
2911 */
2912static int
2913bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2914{
2915 int err;
2916 struct bnad *bnad = netdev_priv(netdev);
2917 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2918 unsigned long flags;
2919
2920 spin_lock_irqsave(&bnad->bna_lock, flags);
2921
2922 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2923
2924 if (!err)
2925 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2926
2927 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2928
2929 return err;
2930}
2931
2932static int
Rasesh Mody078086f2011-08-08 16:21:39 +00002933bnad_mtu_set(struct bnad *bnad, int mtu)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002934{
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002935 unsigned long flags;
2936
Rasesh Mody078086f2011-08-08 16:21:39 +00002937 init_completion(&bnad->bnad_completions.mtu_comp);
2938
2939 spin_lock_irqsave(&bnad->bna_lock, flags);
2940 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2941 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2942
2943 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2944
2945 return bnad->bnad_completions.mtu_comp_status;
2946}
2947
2948static int
2949bnad_change_mtu(struct net_device *netdev, int new_mtu)
2950{
2951 int err, mtu = netdev->mtu;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002952 struct bnad *bnad = netdev_priv(netdev);
2953
2954 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2955 return -EINVAL;
2956
2957 mutex_lock(&bnad->conf_mutex);
2958
2959 netdev->mtu = new_mtu;
2960
Rasesh Mody078086f2011-08-08 16:21:39 +00002961 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2962 err = bnad_mtu_set(bnad, mtu);
2963 if (err)
2964 err = -EBUSY;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002965
2966 mutex_unlock(&bnad->conf_mutex);
2967 return err;
2968}
2969
2970static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002971bnad_vlan_rx_add_vid(struct net_device *netdev,
2972 unsigned short vid)
2973{
2974 struct bnad *bnad = netdev_priv(netdev);
2975 unsigned long flags;
2976
2977 if (!bnad->rx_info[0].rx)
2978 return;
2979
2980 mutex_lock(&bnad->conf_mutex);
2981
2982 spin_lock_irqsave(&bnad->bna_lock, flags);
2983 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00002984 set_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002985 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2986
2987 mutex_unlock(&bnad->conf_mutex);
2988}
2989
2990static void
2991bnad_vlan_rx_kill_vid(struct net_device *netdev,
2992 unsigned short vid)
2993{
2994 struct bnad *bnad = netdev_priv(netdev);
2995 unsigned long flags;
2996
2997 if (!bnad->rx_info[0].rx)
2998 return;
2999
3000 mutex_lock(&bnad->conf_mutex);
3001
3002 spin_lock_irqsave(&bnad->bna_lock, flags);
Jiri Pirkof859d7c2011-07-20 04:54:14 +00003003 clear_bit(vid, bnad->active_vlans);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003004 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3005 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3006
3007 mutex_unlock(&bnad->conf_mutex);
3008}
3009
3010#ifdef CONFIG_NET_POLL_CONTROLLER
3011static void
3012bnad_netpoll(struct net_device *netdev)
3013{
3014 struct bnad *bnad = netdev_priv(netdev);
3015 struct bnad_rx_info *rx_info;
3016 struct bnad_rx_ctrl *rx_ctrl;
3017 u32 curr_mask;
3018 int i, j;
3019
3020 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3021 bna_intx_disable(&bnad->bna, curr_mask);
3022 bnad_isr(bnad->pcidev->irq, netdev);
3023 bna_intx_enable(&bnad->bna, curr_mask);
3024 } else {
Rasesh Mody19dbff92011-08-30 15:27:41 +00003025 /*
3026 * Tx processing may happen in sending context, so no need
3027 * to explicitly process completions here
3028 */
3029
3030 /* Rx processing */
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003031 for (i = 0; i < bnad->num_rx; i++) {
3032 rx_info = &bnad->rx_info[i];
3033 if (!rx_info->rx)
3034 continue;
3035 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3036 rx_ctrl = &rx_info->rx_ctrl[j];
Rasesh Mody271e8b72011-08-30 15:27:40 +00003037 if (rx_ctrl->ccb)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003038 bnad_netif_rx_schedule_poll(bnad,
3039 rx_ctrl->ccb);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003040 }
3041 }
3042 }
3043}
3044#endif
3045
3046static const struct net_device_ops bnad_netdev_ops = {
3047 .ndo_open = bnad_open,
3048 .ndo_stop = bnad_stop,
3049 .ndo_start_xmit = bnad_start_xmit,
Eric Dumazet250e0612010-09-02 12:45:02 -07003050 .ndo_get_stats64 = bnad_get_stats64,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003051 .ndo_set_rx_mode = bnad_set_rx_mode,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003052 .ndo_validate_addr = eth_validate_addr,
3053 .ndo_set_mac_address = bnad_set_mac_address,
3054 .ndo_change_mtu = bnad_change_mtu,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003055 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3056 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3057#ifdef CONFIG_NET_POLL_CONTROLLER
3058 .ndo_poll_controller = bnad_netpoll
3059#endif
3060};
3061
3062static void
3063bnad_netdev_init(struct bnad *bnad, bool using_dac)
3064{
3065 struct net_device *netdev = bnad->netdev;
3066
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003067 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3068 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3069 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003070
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003071 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3072 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3073 NETIF_F_TSO | NETIF_F_TSO6;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003074
Michał Mirosławe5ee20e2011-04-12 09:38:23 +00003075 netdev->features |= netdev->hw_features |
3076 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003077
3078 if (using_dac)
3079 netdev->features |= NETIF_F_HIGHDMA;
3080
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003081 netdev->mem_start = bnad->mmio_start;
3082 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3083
3084 netdev->netdev_ops = &bnad_netdev_ops;
3085 bnad_set_ethtool_ops(netdev);
3086}
3087
3088/*
3089 * 1. Initialize the bnad structure
3090 * 2. Setup netdev pointer in pci_dev
3091 * 3. Initialze Tx free tasklet
3092 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3093 */
3094static int
3095bnad_init(struct bnad *bnad,
3096 struct pci_dev *pdev, struct net_device *netdev)
3097{
3098 unsigned long flags;
3099
3100 SET_NETDEV_DEV(netdev, &pdev->dev);
3101 pci_set_drvdata(pdev, netdev);
3102
3103 bnad->netdev = netdev;
3104 bnad->pcidev = pdev;
3105 bnad->mmio_start = pci_resource_start(pdev, 0);
3106 bnad->mmio_len = pci_resource_len(pdev, 0);
3107 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3108 if (!bnad->bar0) {
3109 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3110 pci_set_drvdata(pdev, NULL);
3111 return -ENOMEM;
3112 }
3113 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3114 (unsigned long long) bnad->mmio_len);
3115
3116 spin_lock_irqsave(&bnad->bna_lock, flags);
3117 if (!bnad_msix_disable)
3118 bnad->cfg_flags = BNAD_CF_MSIX;
3119
3120 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3121
3122 bnad_q_num_init(bnad);
3123 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3124
3125 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3126 (bnad->num_rx * bnad->num_rxp_per_rx) +
3127 BNAD_MAILBOX_MSIX_VECTORS;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003128
3129 bnad->txq_depth = BNAD_TXQ_DEPTH;
3130 bnad->rxq_depth = BNAD_RXQ_DEPTH;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003131
3132 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3133 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3134
3135 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3136 (unsigned long)bnad);
3137
3138 return 0;
3139}
3140
3141/*
3142 * Must be called after bnad_pci_uninit()
3143 * so that iounmap() and pci_set_drvdata(NULL)
3144 * happens only after PCI uninitialization.
3145 */
3146static void
3147bnad_uninit(struct bnad *bnad)
3148{
3149 if (bnad->bar0)
3150 iounmap(bnad->bar0);
3151 pci_set_drvdata(bnad->pcidev, NULL);
3152}
3153
3154/*
3155 * Initialize locks
Rasesh Mody078086f2011-08-08 16:21:39 +00003156 a) Per ioceth mutes used for serializing configuration
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003157 changes from OS interface
3158 b) spin lock used to protect bna state machine
3159 */
3160static void
3161bnad_lock_init(struct bnad *bnad)
3162{
3163 spin_lock_init(&bnad->bna_lock);
3164 mutex_init(&bnad->conf_mutex);
3165}
3166
3167static void
3168bnad_lock_uninit(struct bnad *bnad)
3169{
3170 mutex_destroy(&bnad->conf_mutex);
3171}
3172
3173/* PCI Initialization */
3174static int
3175bnad_pci_init(struct bnad *bnad,
3176 struct pci_dev *pdev, bool *using_dac)
3177{
3178 int err;
3179
3180 err = pci_enable_device(pdev);
3181 if (err)
3182 return err;
3183 err = pci_request_regions(pdev, BNAD_NAME);
3184 if (err)
3185 goto disable_device;
Ivan Vecera5ea74312011-02-02 04:37:02 +00003186 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3187 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003188 *using_dac = 1;
3189 } else {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003190 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003191 if (err) {
Ivan Vecera5ea74312011-02-02 04:37:02 +00003192 err = dma_set_coherent_mask(&pdev->dev,
3193 DMA_BIT_MASK(32));
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003194 if (err)
3195 goto release_regions;
3196 }
3197 *using_dac = 0;
3198 }
3199 pci_set_master(pdev);
3200 return 0;
3201
3202release_regions:
3203 pci_release_regions(pdev);
3204disable_device:
3205 pci_disable_device(pdev);
3206
3207 return err;
3208}
3209
3210static void
3211bnad_pci_uninit(struct pci_dev *pdev)
3212{
3213 pci_release_regions(pdev);
3214 pci_disable_device(pdev);
3215}
3216
3217static int __devinit
3218bnad_pci_probe(struct pci_dev *pdev,
3219 const struct pci_device_id *pcidev_id)
3220{
Rasesh Mody3caa1e952011-08-30 15:27:42 +00003221 bool using_dac;
Rasesh Mody0120b992011-07-22 08:07:41 +00003222 int err;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003223 struct bnad *bnad;
3224 struct bna *bna;
3225 struct net_device *netdev;
3226 struct bfa_pcidev pcidev_info;
3227 unsigned long flags;
3228
3229 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3230 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3231
3232 mutex_lock(&bnad_fwimg_mutex);
3233 if (!cna_get_firmware_buf(pdev)) {
3234 mutex_unlock(&bnad_fwimg_mutex);
3235 pr_warn("Failed to load Firmware Image!\n");
3236 return -ENODEV;
3237 }
3238 mutex_unlock(&bnad_fwimg_mutex);
3239
3240 /*
3241 * Allocates sizeof(struct net_device + struct bnad)
3242 * bnad = netdev->priv
3243 */
3244 netdev = alloc_etherdev(sizeof(struct bnad));
3245 if (!netdev) {
Rasesh Mody078086f2011-08-08 16:21:39 +00003246 dev_err(&pdev->dev, "netdev allocation failed\n");
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003247 err = -ENOMEM;
3248 return err;
3249 }
3250 bnad = netdev_priv(netdev);
3251
Rasesh Mody078086f2011-08-08 16:21:39 +00003252 bnad_lock_init(bnad);
3253
3254 mutex_lock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003255 /*
3256 * PCI initialization
Rasesh Mody0120b992011-07-22 08:07:41 +00003257 * Output : using_dac = 1 for 64 bit DMA
Rasesh Modybe7fa322010-12-23 21:45:01 +00003258 * = 0 for 32 bit DMA
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003259 */
3260 err = bnad_pci_init(bnad, pdev, &using_dac);
3261 if (err)
Dan Carpenter44861f42011-08-24 01:29:22 +00003262 goto unlock_mutex;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003263
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003264 /*
3265 * Initialize bnad structure
3266 * Setup relation between pci_dev & netdev
3267 * Init Tx free tasklet
3268 */
3269 err = bnad_init(bnad, pdev, netdev);
3270 if (err)
3271 goto pci_uninit;
Rasesh Mody078086f2011-08-08 16:21:39 +00003272
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003273 /* Initialize netdev structure, set up ethtool ops */
3274 bnad_netdev_init(bnad, using_dac);
3275
Rasesh Mody815f41e2010-12-23 21:45:03 +00003276 /* Set link to down state */
3277 netif_carrier_off(netdev);
3278
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003279 /* Get resource requirement form bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003280 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003281 bna_res_req(&bnad->res_info[0]);
Rasesh Mody078086f2011-08-08 16:21:39 +00003282 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003283
3284 /* Allocate resources from bna */
Rasesh Mody078086f2011-08-08 16:21:39 +00003285 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003286 if (err)
Rasesh Mody078086f2011-08-08 16:21:39 +00003287 goto drv_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003288
3289 bna = &bnad->bna;
3290
3291 /* Setup pcidev_info for bna_init() */
3292 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3293 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3294 pcidev_info.device_id = bnad->pcidev->device;
3295 pcidev_info.pci_bar_kva = bnad->bar0;
3296
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003297 spin_lock_irqsave(&bnad->bna_lock, flags);
3298 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003299 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3300
3301 bnad->stats.bna_stats = &bna->stats;
3302
Rasesh Mody078086f2011-08-08 16:21:39 +00003303 bnad_enable_msix(bnad);
3304 err = bnad_mbox_irq_alloc(bnad);
3305 if (err)
3306 goto res_free;
3307
3308
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003309 /* Set up timers */
Rasesh Mody078086f2011-08-08 16:21:39 +00003310 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003311 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003312 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003313 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003314 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
Rasesh Mody1d32f762010-12-23 21:45:09 +00003315 ((unsigned long)bnad));
Rasesh Mody078086f2011-08-08 16:21:39 +00003316 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003317 ((unsigned long)bnad));
3318
3319 /* Now start the timer before calling IOC */
Rasesh Mody078086f2011-08-08 16:21:39 +00003320 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003321 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3322
3323 /*
3324 * Start the chip
Rasesh Mody078086f2011-08-08 16:21:39 +00003325 * If the call back comes with error, we bail out.
3326 * This is a catastrophic error.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003327 */
Rasesh Mody078086f2011-08-08 16:21:39 +00003328 err = bnad_ioceth_enable(bnad);
3329 if (err) {
3330 pr_err("BNA: Initialization failed err=%d\n",
3331 err);
3332 goto probe_success;
3333 }
3334
3335 spin_lock_irqsave(&bnad->bna_lock, flags);
3336 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3337 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3338 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3339 bna_attr(bna)->num_rxp - 1);
3340 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3341 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3342 err = -EIO;
3343 }
Rasesh Mody3caa1e952011-08-30 15:27:42 +00003344 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3345 if (err)
3346 goto disable_ioceth;
3347
3348 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003349 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3350 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3351
3352 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003353 if (err) {
3354 err = -EIO;
Rasesh Mody078086f2011-08-08 16:21:39 +00003355 goto disable_ioceth;
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003356 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003357
3358 spin_lock_irqsave(&bnad->bna_lock, flags);
3359 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003361
3362 /* Get the burnt-in mac */
3363 spin_lock_irqsave(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003364 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003365 bnad_set_netdev_perm_addr(bnad);
3366 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3367
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003368 mutex_unlock(&bnad->conf_mutex);
3369
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003370 /* Finally, reguister with net_device layer */
3371 err = register_netdev(netdev);
3372 if (err) {
3373 pr_err("BNA : Registering with netdev failed\n");
Rasesh Mody078086f2011-08-08 16:21:39 +00003374 goto probe_uninit;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003375 }
Rasesh Mody078086f2011-08-08 16:21:39 +00003376 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003377
Rasesh Mody0caa9aa2011-08-30 15:27:38 +00003378 return 0;
3379
Rasesh Mody078086f2011-08-08 16:21:39 +00003380probe_success:
3381 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003382 return 0;
3383
Rasesh Mody078086f2011-08-08 16:21:39 +00003384probe_uninit:
Rasesh Mody3fc72372011-09-21 20:55:41 -04003385 mutex_lock(&bnad->conf_mutex);
Rasesh Mody078086f2011-08-08 16:21:39 +00003386 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3387disable_ioceth:
3388 bnad_ioceth_disable(bnad);
3389 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3390 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3391 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003392 spin_lock_irqsave(&bnad->bna_lock, flags);
3393 bna_uninit(bna);
3394 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody078086f2011-08-08 16:21:39 +00003395 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003396 bnad_disable_msix(bnad);
Rasesh Mody078086f2011-08-08 16:21:39 +00003397res_free:
3398 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3399drv_uninit:
3400 bnad_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003401pci_uninit:
3402 bnad_pci_uninit(pdev);
Dan Carpenter44861f42011-08-24 01:29:22 +00003403unlock_mutex:
Rasesh Mody078086f2011-08-08 16:21:39 +00003404 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003405 bnad_lock_uninit(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003406 free_netdev(netdev);
3407 return err;
3408}
3409
3410static void __devexit
3411bnad_pci_remove(struct pci_dev *pdev)
3412{
3413 struct net_device *netdev = pci_get_drvdata(pdev);
3414 struct bnad *bnad;
3415 struct bna *bna;
3416 unsigned long flags;
3417
3418 if (!netdev)
3419 return;
3420
3421 pr_info("%s bnad_pci_remove\n", netdev->name);
3422 bnad = netdev_priv(netdev);
3423 bna = &bnad->bna;
3424
Rasesh Mody078086f2011-08-08 16:21:39 +00003425 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3426 unregister_netdev(netdev);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003427
3428 mutex_lock(&bnad->conf_mutex);
Rasesh Mody078086f2011-08-08 16:21:39 +00003429 bnad_ioceth_disable(bnad);
3430 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3431 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3432 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003433 spin_lock_irqsave(&bnad->bna_lock, flags);
3434 bna_uninit(bna);
3435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003436
Rasesh Mody078086f2011-08-08 16:21:39 +00003437 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3438 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3439 bnad_mbox_irq_free(bnad);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003440 bnad_disable_msix(bnad);
3441 bnad_pci_uninit(pdev);
Rasesh Mody078086f2011-08-08 16:21:39 +00003442 mutex_unlock(&bnad->conf_mutex);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003443 bnad_lock_uninit(bnad);
3444 bnad_uninit(bnad);
3445 free_netdev(netdev);
3446}
3447
Rasesh Mody0120b992011-07-22 08:07:41 +00003448static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003449 {
3450 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3451 PCI_DEVICE_ID_BROCADE_CT),
3452 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3453 .class_mask = 0xffff00
Rasesh Mody586b28162011-09-27 10:39:08 +00003454 },
3455 {
3456 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3457 BFA_PCI_DEVICE_ID_CT2),
3458 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3459 .class_mask = 0xffff00
3460 },
3461 {0, },
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003462};
3463
3464MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3465
3466static struct pci_driver bnad_pci_driver = {
3467 .name = BNAD_NAME,
3468 .id_table = bnad_pci_id_table,
3469 .probe = bnad_pci_probe,
3470 .remove = __devexit_p(bnad_pci_remove),
3471};
3472
3473static int __init
3474bnad_module_init(void)
3475{
3476 int err;
3477
Rasesh Mody5aad0012011-07-22 08:07:40 +00003478 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3479 BNAD_VERSION);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003480
Rasesh Mody8a891422010-08-25 23:00:27 -07003481 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07003482
3483 err = pci_register_driver(&bnad_pci_driver);
3484 if (err < 0) {
3485 pr_err("bna : PCI registration failed in module init "
3486 "(%d)\n", err);
3487 return err;
3488 }
3489
3490 return 0;
3491}
3492
3493static void __exit
3494bnad_module_exit(void)
3495{
3496 pci_unregister_driver(&bnad_pci_driver);
3497
3498 if (bfi_fw)
3499 release_firmware(bfi_fw);
3500}
3501
3502module_init(bnad_module_init);
3503module_exit(bnad_module_exit);
3504
3505MODULE_AUTHOR("Brocade");
3506MODULE_LICENSE("GPL");
3507MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3508MODULE_VERSION(BNAD_VERSION);
3509MODULE_FIRMWARE(CNA_FW_FILE_CT);
Rasesh Mody1bf9fd702011-09-27 10:39:07 +00003510MODULE_FIRMWARE(CNA_FW_FILE_CT2);