blob: 89efd04be4e0d340bfae3a68f144281ae68cafaf [file] [log] [blame]
Divy Le Ray4d22de32007-01-18 22:04:14 -05001/*
Divy Le Ray1d68e932007-01-30 19:44:35 -08002 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
Divy Le Ray4d22de32007-01-18 22:04:14 -05003 *
Divy Le Ray1d68e932007-01-30 19:44:35 -08004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Divy Le Ray4d22de32007-01-18 22:04:14 -05009 *
Divy Le Ray1d68e932007-01-30 19:44:35 -080010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Divy Le Ray4d22de32007-01-18 22:04:14 -050031 */
Divy Le Ray4d22de32007-01-18 22:04:14 -050032#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
Divy Le Raye0994eb2007-02-24 16:44:17 -080048
Divy Le Ray4d22de32007-01-18 22:04:14 -050049#define SGE_RX_COPY_THRES 256
Divy Le Raycf992af2007-05-30 21:10:47 -070050#define SGE_RX_PULL_LEN 128
Divy Le Ray4d22de32007-01-18 22:04:14 -050051
Divy Le Raye0994eb2007-02-24 16:44:17 -080052/*
Divy Le Raycf992af2007-05-30 21:10:47 -070053 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
Divy Le Raye0994eb2007-02-24 16:44:17 -080056 */
Divy Le Raycf992af2007-05-30 21:10:47 -070057#define FL0_PG_CHUNK_SIZE 2048
Divy Le Ray7385ecf2008-05-21 18:56:21 -070058#define FL0_PG_ORDER 0
59#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
60#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
Divy Le Raycf992af2007-05-30 21:10:47 -070061
Divy Le Raye0994eb2007-02-24 16:44:17 -080062#define SGE_RX_DROP_THRES 16
Divy Le Ray4d22de32007-01-18 22:04:14 -050063
64/*
65 * Period of the Tx buffer reclaim timer. This timer does not need to run
66 * frequently as Tx buffers are usually reclaimed by new Tx packets.
67 */
68#define TX_RECLAIM_PERIOD (HZ / 4)
69
70/* WR size in bytes */
71#define WR_LEN (WR_FLITS * 8)
72
73/*
74 * Types of Tx queues in each queue set. Order here matters, do not change.
75 */
76enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
77
78/* Values for sge_txq.flags */
79enum {
80 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
81 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
82};
83
84struct tx_desc {
Al Virofb8e4442007-08-23 03:04:12 -040085 __be64 flit[TX_DESC_FLITS];
Divy Le Ray4d22de32007-01-18 22:04:14 -050086};
87
88struct rx_desc {
89 __be32 addr_lo;
90 __be32 len_gen;
91 __be32 gen2;
92 __be32 addr_hi;
93};
94
95struct tx_sw_desc { /* SW state per Tx descriptor */
96 struct sk_buff *skb;
Divy Le Ray23561c92007-11-16 11:22:05 -080097 u8 eop; /* set if last descriptor for packet */
98 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
99 u8 fragidx; /* first page fragment associated with descriptor */
100 s8 sflit; /* start flit of first SGL entry in descriptor */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500101};
102
Divy Le Raycf992af2007-05-30 21:10:47 -0700103struct rx_sw_desc { /* SW state per Rx descriptor */
Divy Le Raye0994eb2007-02-24 16:44:17 -0800104 union {
105 struct sk_buff *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700106 struct fl_pg_chunk pg_chunk;
107 };
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500109};
110
111struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
113 __be32 flags;
114 __be32 len_cq;
115 u8 imm_data[47];
116 u8 intr_gen;
117};
118
Divy Le Ray4d22de32007-01-18 22:04:14 -0500119/*
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800120 * Holds unmapping information for Tx packets that need deferred unmapping.
121 * This structure lives at skb->head and must be allocated by callers.
122 */
123struct deferred_unmap_info {
124 struct pci_dev *pdev;
125 dma_addr_t addr[MAX_SKB_FRAGS + 1];
126};
127
128/*
Divy Le Ray4d22de32007-01-18 22:04:14 -0500129 * Maps a number of flits to the number of Tx descriptors that can hold them.
130 * The formula is
131 *
132 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
133 *
134 * HW allows up to 4 descriptors to be combined into a WR.
135 */
136static u8 flit_desc_map[] = {
137 0,
138#if SGE_NUM_GENBITS == 1
139 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
140 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
141 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
142 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
143#elif SGE_NUM_GENBITS == 2
144 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
146 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
147 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
148#else
149# error "SGE_NUM_GENBITS must be 1 or 2"
150#endif
151};
152
153static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
154{
155 return container_of(q, struct sge_qset, fl[qidx]);
156}
157
158static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
159{
160 return container_of(q, struct sge_qset, rspq);
161}
162
163static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
164{
165 return container_of(q, struct sge_qset, txq[qidx]);
166}
167
168/**
169 * refill_rspq - replenish an SGE response queue
170 * @adapter: the adapter
171 * @q: the response queue to replenish
172 * @credits: how many new responses to make available
173 *
174 * Replenishes a response queue by making the supplied number of responses
175 * available to HW.
176 */
177static inline void refill_rspq(struct adapter *adapter,
178 const struct sge_rspq *q, unsigned int credits)
179{
Divy Le Rayafefce62007-11-16 11:22:21 -0800180 rmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -0500181 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
182 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
183}
184
185/**
186 * need_skb_unmap - does the platform need unmapping of sk_buffs?
187 *
188 * Returns true if the platfrom needs sk_buff unmapping. The compiler
189 * optimizes away unecessary code if this returns true.
190 */
191static inline int need_skb_unmap(void)
192{
193 /*
194 * This structure is used to tell if the platfrom needs buffer
195 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
196 */
197 struct dummy {
198 DECLARE_PCI_UNMAP_ADDR(addr);
199 };
200
201 return sizeof(struct dummy) != 0;
202}
203
204/**
205 * unmap_skb - unmap a packet main body and its page fragments
206 * @skb: the packet
207 * @q: the Tx queue containing Tx descriptors for the packet
208 * @cidx: index of Tx descriptor
209 * @pdev: the PCI device
210 *
211 * Unmap the main body of an sk_buff and its page fragments, if any.
212 * Because of the fairly complicated structure of our SGLs and the desire
Divy Le Ray23561c92007-11-16 11:22:05 -0800213 * to conserve space for metadata, the information necessary to unmap an
214 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
215 * descriptors (the physical addresses of the various data buffers), and
216 * the SW descriptor state (assorted indices). The send functions
217 * initialize the indices for the first packet descriptor so we can unmap
218 * the buffers held in the first Tx descriptor here, and we have enough
219 * information at this point to set the state for the next Tx descriptor.
220 *
221 * Note that it is possible to clean up the first descriptor of a packet
222 * before the send routines have written the next descriptors, but this
223 * race does not cause any problem. We just end up writing the unmapping
224 * info for the descriptor first.
Divy Le Ray4d22de32007-01-18 22:04:14 -0500225 */
226static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
227 unsigned int cidx, struct pci_dev *pdev)
228{
229 const struct sg_ent *sgp;
Divy Le Ray23561c92007-11-16 11:22:05 -0800230 struct tx_sw_desc *d = &q->sdesc[cidx];
231 int nfrags, frag_idx, curflit, j = d->addr_idx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500232
Divy Le Ray23561c92007-11-16 11:22:05 -0800233 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
234 frag_idx = d->fragidx;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500235
Divy Le Ray23561c92007-11-16 11:22:05 -0800236 if (frag_idx == 0 && skb_headlen(skb)) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
238 skb_headlen(skb), PCI_DMA_TODEVICE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500239 j = 1;
240 }
241
Divy Le Ray23561c92007-11-16 11:22:05 -0800242 curflit = d->sflit + 1 + j;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500243 nfrags = skb_shinfo(skb)->nr_frags;
244
245 while (frag_idx < nfrags && curflit < WR_FLITS) {
246 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
247 skb_shinfo(skb)->frags[frag_idx].size,
248 PCI_DMA_TODEVICE);
249 j ^= 1;
250 if (j == 0) {
251 sgp++;
252 curflit++;
253 }
254 curflit++;
255 frag_idx++;
256 }
257
Divy Le Ray23561c92007-11-16 11:22:05 -0800258 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
259 d = cidx + 1 == q->size ? q->sdesc : d + 1;
260 d->fragidx = frag_idx;
261 d->addr_idx = j;
262 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
Divy Le Ray4d22de32007-01-18 22:04:14 -0500263 }
264}
265
266/**
267 * free_tx_desc - reclaims Tx descriptors and their buffers
268 * @adapter: the adapter
269 * @q: the Tx queue to reclaim descriptors from
270 * @n: the number of descriptors to reclaim
271 *
272 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
273 * Tx buffers. Called with the Tx queue lock held.
274 */
275static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
276 unsigned int n)
277{
278 struct tx_sw_desc *d;
279 struct pci_dev *pdev = adapter->pdev;
280 unsigned int cidx = q->cidx;
281
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800282 const int need_unmap = need_skb_unmap() &&
283 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
284
Divy Le Ray4d22de32007-01-18 22:04:14 -0500285 d = &q->sdesc[cidx];
286 while (n--) {
287 if (d->skb) { /* an SGL is present */
Divy Le Ray99d7cf32007-02-24 16:44:06 -0800288 if (need_unmap)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500289 unmap_skb(d->skb, q, cidx, pdev);
Divy Le Ray23561c92007-11-16 11:22:05 -0800290 if (d->eop)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500291 kfree_skb(d->skb);
292 }
293 ++d;
294 if (++cidx == q->size) {
295 cidx = 0;
296 d = q->sdesc;
297 }
298 }
299 q->cidx = cidx;
300}
301
302/**
303 * reclaim_completed_tx - reclaims completed Tx descriptors
304 * @adapter: the adapter
305 * @q: the Tx queue to reclaim completed descriptors from
306 *
307 * Reclaims Tx descriptors that the SGE has indicated it has processed,
308 * and frees the associated buffers if possible. Called with the Tx
309 * queue's lock held.
310 */
311static inline void reclaim_completed_tx(struct adapter *adapter,
312 struct sge_txq *q)
313{
314 unsigned int reclaim = q->processed - q->cleaned;
315
316 if (reclaim) {
317 free_tx_desc(adapter, q, reclaim);
318 q->cleaned += reclaim;
319 q->in_use -= reclaim;
320 }
321}
322
323/**
324 * should_restart_tx - are there enough resources to restart a Tx queue?
325 * @q: the Tx queue
326 *
327 * Checks if there are enough descriptors to restart a suspended Tx queue.
328 */
329static inline int should_restart_tx(const struct sge_txq *q)
330{
331 unsigned int r = q->processed - q->cleaned;
332
333 return q->in_use - r < (q->size >> 1);
334}
335
336/**
337 * free_rx_bufs - free the Rx buffers on an SGE free list
338 * @pdev: the PCI device associated with the adapter
339 * @rxq: the SGE free list to clean up
340 *
341 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
342 * this queue should be stopped before calling this function.
343 */
344static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
345{
346 unsigned int cidx = q->cidx;
347
348 while (q->credits--) {
349 struct rx_sw_desc *d = &q->sdesc[cidx];
350
351 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
352 q->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Raycf992af2007-05-30 21:10:47 -0700353 if (q->use_pages) {
354 put_page(d->pg_chunk.page);
355 d->pg_chunk.page = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800356 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700357 kfree_skb(d->skb);
358 d->skb = NULL;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800359 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500360 if (++cidx == q->size)
361 cidx = 0;
362 }
Divy Le Raye0994eb2007-02-24 16:44:17 -0800363
Divy Le Raycf992af2007-05-30 21:10:47 -0700364 if (q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700365 __free_pages(q->pg_chunk.page, q->order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700366 q->pg_chunk.page = NULL;
367 }
Divy Le Ray4d22de32007-01-18 22:04:14 -0500368}
369
370/**
371 * add_one_rx_buf - add a packet buffer to a free-buffer list
Divy Le Raycf992af2007-05-30 21:10:47 -0700372 * @va: buffer start VA
Divy Le Ray4d22de32007-01-18 22:04:14 -0500373 * @len: the buffer length
374 * @d: the HW Rx descriptor to write
375 * @sd: the SW Rx descriptor to write
376 * @gen: the generation bit value
377 * @pdev: the PCI device associated with the adapter
378 *
379 * Add a buffer of the given length to the supplied HW and SW Rx
380 * descriptors.
381 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700382static inline int add_one_rx_buf(void *va, unsigned int len,
383 struct rx_desc *d, struct rx_sw_desc *sd,
384 unsigned int gen, struct pci_dev *pdev)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500385{
386 dma_addr_t mapping;
387
Divy Le Raye0994eb2007-02-24 16:44:17 -0800388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700389 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700390 return -ENOMEM;
391
Divy Le Ray4d22de32007-01-18 22:04:14 -0500392 pci_unmap_addr_set(sd, dma_addr, mapping);
393
394 d->addr_lo = cpu_to_be32(mapping);
395 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
396 wmb();
397 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
398 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700399 return 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500400}
401
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700402static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
403 unsigned int order)
Divy Le Raycf992af2007-05-30 21:10:47 -0700404{
405 if (!q->pg_chunk.page) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700406 q->pg_chunk.page = alloc_pages(gfp, order);
Divy Le Raycf992af2007-05-30 21:10:47 -0700407 if (unlikely(!q->pg_chunk.page))
408 return -ENOMEM;
409 q->pg_chunk.va = page_address(q->pg_chunk.page);
410 q->pg_chunk.offset = 0;
411 }
412 sd->pg_chunk = q->pg_chunk;
413
414 q->pg_chunk.offset += q->buf_size;
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700415 if (q->pg_chunk.offset == (PAGE_SIZE << order))
Divy Le Raycf992af2007-05-30 21:10:47 -0700416 q->pg_chunk.page = NULL;
417 else {
418 q->pg_chunk.va += q->buf_size;
419 get_page(q->pg_chunk.page);
420 }
421 return 0;
422}
423
Divy Le Ray4d22de32007-01-18 22:04:14 -0500424/**
425 * refill_fl - refill an SGE free-buffer list
426 * @adapter: the adapter
427 * @q: the free-list to refill
428 * @n: the number of new buffers to allocate
429 * @gfp: the gfp flags for allocating new buffers
430 *
431 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
432 * allocated with the supplied gfp flags. The caller must assure that
433 * @n does not exceed the queue's capacity.
434 */
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700435static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500436{
Divy Le Raycf992af2007-05-30 21:10:47 -0700437 void *buf_start;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500438 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
439 struct rx_desc *d = &q->desc[q->pidx];
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700440 unsigned int count = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500441
442 while (n--) {
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700443 int err;
444
Divy Le Raycf992af2007-05-30 21:10:47 -0700445 if (q->use_pages) {
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700446 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700447nomem: q->alloc_failed++;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800448 break;
449 }
Divy Le Raycf992af2007-05-30 21:10:47 -0700450 buf_start = sd->pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800451 } else {
Divy Le Raycf992af2007-05-30 21:10:47 -0700452 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
Divy Le Raye0994eb2007-02-24 16:44:17 -0800453
Divy Le Raycf992af2007-05-30 21:10:47 -0700454 if (!skb)
455 goto nomem;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800456
Divy Le Raycf992af2007-05-30 21:10:47 -0700457 sd->skb = skb;
458 buf_start = skb->data;
Divy Le Raye0994eb2007-02-24 16:44:17 -0800459 }
460
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700461 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
462 adap->pdev);
463 if (unlikely(err)) {
464 if (!q->use_pages) {
465 kfree_skb(sd->skb);
466 sd->skb = NULL;
467 }
468 break;
469 }
470
Divy Le Ray4d22de32007-01-18 22:04:14 -0500471 d++;
472 sd++;
473 if (++q->pidx == q->size) {
474 q->pidx = 0;
475 q->gen ^= 1;
476 sd = q->sdesc;
477 d = q->desc;
478 }
479 q->credits++;
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700480 count++;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500481 }
Divy Le Rayafefce62007-11-16 11:22:21 -0800482 wmb();
Divy Le Rayb1fb1f22008-05-21 18:56:16 -0700483 if (likely(count))
484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
485
486 return count;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500487}
488
489static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
490{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700491 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
492 GFP_ATOMIC | __GFP_COMP);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500493}
494
495/**
496 * recycle_rx_buf - recycle a receive buffer
497 * @adapter: the adapter
498 * @q: the SGE free list
499 * @idx: index of buffer to recycle
500 *
501 * Recycles the specified buffer on the given free list by adding it at
502 * the next available slot on the list.
503 */
504static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
505 unsigned int idx)
506{
507 struct rx_desc *from = &q->desc[idx];
508 struct rx_desc *to = &q->desc[q->pidx];
509
Divy Le Raycf992af2007-05-30 21:10:47 -0700510 q->sdesc[q->pidx] = q->sdesc[idx];
Divy Le Ray4d22de32007-01-18 22:04:14 -0500511 to->addr_lo = from->addr_lo; /* already big endian */
512 to->addr_hi = from->addr_hi; /* likewise */
513 wmb();
514 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
515 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
516 q->credits++;
517
518 if (++q->pidx == q->size) {
519 q->pidx = 0;
520 q->gen ^= 1;
521 }
522 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
523}
524
525/**
526 * alloc_ring - allocate resources for an SGE descriptor ring
527 * @pdev: the PCI device
528 * @nelem: the number of descriptors
529 * @elem_size: the size of each descriptor
530 * @sw_size: the size of the SW state associated with each ring element
531 * @phys: the physical address of the allocated ring
532 * @metadata: address of the array holding the SW state for the ring
533 *
534 * Allocates resources for an SGE descriptor ring, such as Tx queues,
535 * free buffer lists, or response queues. Each SGE ring requires
536 * space for its HW descriptors plus, optionally, space for the SW state
537 * associated with each HW entry (the metadata). The function returns
538 * three values: the virtual address for the HW ring (the return value
539 * of the function), the physical address of the HW ring, and the address
540 * of the SW ring.
541 */
542static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
Divy Le Raye0994eb2007-02-24 16:44:17 -0800543 size_t sw_size, dma_addr_t * phys, void *metadata)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500544{
545 size_t len = nelem * elem_size;
546 void *s = NULL;
547 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
548
549 if (!p)
550 return NULL;
551 if (sw_size) {
552 s = kcalloc(nelem, sw_size, GFP_KERNEL);
553
554 if (!s) {
555 dma_free_coherent(&pdev->dev, len, p, *phys);
556 return NULL;
557 }
558 }
559 if (metadata)
560 *(void **)metadata = s;
561 memset(p, 0, len);
562 return p;
563}
564
565/**
Divy Le Ray204e2f92008-05-06 19:26:01 -0700566 * t3_reset_qset - reset a sge qset
567 * @q: the queue set
568 *
569 * Reset the qset structure.
570 * the NAPI structure is preserved in the event of
571 * the qset's reincarnation, for example during EEH recovery.
572 */
573static void t3_reset_qset(struct sge_qset *q)
574{
575 if (q->adap &&
576 !(q->adap->flags & NAPI_INIT)) {
577 memset(q, 0, sizeof(*q));
578 return;
579 }
580
581 q->adap = NULL;
582 memset(&q->rspq, 0, sizeof(q->rspq));
583 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
585 q->txq_stopped = 0;
586 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
Divy Le Rayb47385b2008-05-21 18:56:26 -0700587 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0;
Divy Le Ray204e2f92008-05-06 19:26:01 -0700589}
590
591
592/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500593 * free_qset - free the resources of an SGE queue set
594 * @adapter: the adapter owning the queue set
595 * @q: the queue set
596 *
597 * Release the HW and SW resources associated with an SGE queue set, such
598 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
599 * queue set must be quiesced prior to calling this.
600 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -0700601static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500602{
603 int i;
604 struct pci_dev *pdev = adapter->pdev;
605
606 if (q->tx_reclaim_timer.function)
607 del_timer_sync(&q->tx_reclaim_timer);
608
609 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
610 if (q->fl[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700611 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500612 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700613 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500614 free_rx_bufs(pdev, &q->fl[i]);
615 kfree(q->fl[i].sdesc);
616 dma_free_coherent(&pdev->dev,
617 q->fl[i].size *
618 sizeof(struct rx_desc), q->fl[i].desc,
619 q->fl[i].phys_addr);
620 }
621
622 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
623 if (q->txq[i].desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700624 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500625 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
Roland Dreierb1186de2008-03-20 13:30:48 -0700626 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500627 if (q->txq[i].sdesc) {
628 free_tx_desc(adapter, &q->txq[i],
629 q->txq[i].in_use);
630 kfree(q->txq[i].sdesc);
631 }
632 dma_free_coherent(&pdev->dev,
633 q->txq[i].size *
634 sizeof(struct tx_desc),
635 q->txq[i].desc, q->txq[i].phys_addr);
636 __skb_queue_purge(&q->txq[i].sendq);
637 }
638
639 if (q->rspq.desc) {
Roland Dreierb1186de2008-03-20 13:30:48 -0700640 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500641 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
Roland Dreierb1186de2008-03-20 13:30:48 -0700642 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500643 dma_free_coherent(&pdev->dev,
644 q->rspq.size * sizeof(struct rsp_desc),
645 q->rspq.desc, q->rspq.phys_addr);
646 }
647
Divy Le Ray204e2f92008-05-06 19:26:01 -0700648 t3_reset_qset(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500649}
650
651/**
652 * init_qset_cntxt - initialize an SGE queue set context info
653 * @qs: the queue set
654 * @id: the queue set id
655 *
656 * Initializes the TIDs and context ids for the queues of a queue set.
657 */
658static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
659{
660 qs->rspq.cntxt_id = id;
661 qs->fl[0].cntxt_id = 2 * id;
662 qs->fl[1].cntxt_id = 2 * id + 1;
663 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
664 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
665 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
666 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
667 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
668}
669
670/**
671 * sgl_len - calculates the size of an SGL of the given capacity
672 * @n: the number of SGL entries
673 *
674 * Calculates the number of flits needed for a scatter/gather list that
675 * can hold the given number of entries.
676 */
677static inline unsigned int sgl_len(unsigned int n)
678{
679 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
680 return (3 * n) / 2 + (n & 1);
681}
682
683/**
684 * flits_to_desc - returns the num of Tx descriptors for the given flits
685 * @n: the number of flits
686 *
687 * Calculates the number of Tx descriptors needed for the supplied number
688 * of flits.
689 */
690static inline unsigned int flits_to_desc(unsigned int n)
691{
692 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
693 return flit_desc_map[n];
694}
695
696/**
Divy Le Raycf992af2007-05-30 21:10:47 -0700697 * get_packet - return the next ingress packet buffer from a free list
698 * @adap: the adapter that received the packet
699 * @fl: the SGE free list holding the packet
700 * @len: the packet length including any SGE padding
701 * @drop_thres: # of remaining buffers before we start dropping packets
702 *
703 * Get the next packet from a free list and complete setup of the
704 * sk_buff. If the packet is small we make a copy and recycle the
705 * original buffer, otherwise we use the original buffer itself. If a
706 * positive drop threshold is supplied packets are dropped and their
707 * buffers recycled if (a) the number of remaining buffers is under the
708 * threshold and the packet is too big to copy, or (b) the packet should
709 * be copied but there is no memory for the copy.
710 */
711static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
712 unsigned int len, unsigned int drop_thres)
713{
714 struct sk_buff *skb = NULL;
715 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
716
717 prefetch(sd->skb->data);
718 fl->credits--;
719
720 if (len <= SGE_RX_COPY_THRES) {
721 skb = alloc_skb(len, GFP_ATOMIC);
722 if (likely(skb != NULL)) {
723 __skb_put(skb, len);
724 pci_dma_sync_single_for_cpu(adap->pdev,
725 pci_unmap_addr(sd, dma_addr), len,
726 PCI_DMA_FROMDEVICE);
727 memcpy(skb->data, sd->skb->data, len);
728 pci_dma_sync_single_for_device(adap->pdev,
729 pci_unmap_addr(sd, dma_addr), len,
730 PCI_DMA_FROMDEVICE);
731 } else if (!drop_thres)
732 goto use_orig_buf;
733recycle:
734 recycle_rx_buf(adap, fl, fl->cidx);
735 return skb;
736 }
737
738 if (unlikely(fl->credits < drop_thres))
739 goto recycle;
740
741use_orig_buf:
742 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
743 fl->buf_size, PCI_DMA_FROMDEVICE);
744 skb = sd->skb;
745 skb_put(skb, len);
746 __refill_fl(adap, fl);
747 return skb;
748}
749
750/**
751 * get_packet_pg - return the next ingress packet buffer from a free list
752 * @adap: the adapter that received the packet
753 * @fl: the SGE free list holding the packet
754 * @len: the packet length including any SGE padding
755 * @drop_thres: # of remaining buffers before we start dropping packets
756 *
757 * Get the next packet from a free list populated with page chunks.
758 * If the packet is small we make a copy and recycle the original buffer,
759 * otherwise we attach the original buffer as a page fragment to a fresh
760 * sk_buff. If a positive drop threshold is supplied packets are dropped
761 * and their buffers recycled if (a) the number of remaining buffers is
762 * under the threshold and the packet is too big to copy, or (b) there's
763 * no system memory.
764 *
765 * Note: this function is similar to @get_packet but deals with Rx buffers
766 * that are page chunks rather than sk_buffs.
767 */
768static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700769 struct sge_rspq *q, unsigned int len,
770 unsigned int drop_thres)
Divy Le Raycf992af2007-05-30 21:10:47 -0700771{
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700772 struct sk_buff *newskb, *skb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700773 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
774
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700775 newskb = skb = q->pg_skb;
776
777 if (!skb && (len <= SGE_RX_COPY_THRES)) {
778 newskb = alloc_skb(len, GFP_ATOMIC);
779 if (likely(newskb != NULL)) {
780 __skb_put(newskb, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700781 pci_dma_sync_single_for_cpu(adap->pdev,
782 pci_unmap_addr(sd, dma_addr), len,
783 PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700784 memcpy(newskb->data, sd->pg_chunk.va, len);
Divy Le Raycf992af2007-05-30 21:10:47 -0700785 pci_dma_sync_single_for_device(adap->pdev,
786 pci_unmap_addr(sd, dma_addr), len,
787 PCI_DMA_FROMDEVICE);
788 } else if (!drop_thres)
789 return NULL;
790recycle:
791 fl->credits--;
792 recycle_rx_buf(adap, fl, fl->cidx);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700793 q->rx_recycle_buf++;
794 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700795 }
796
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700797 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
Divy Le Raycf992af2007-05-30 21:10:47 -0700798 goto recycle;
799
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700800 if (!skb)
Divy Le Rayb47385b2008-05-21 18:56:26 -0700801 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700802 if (unlikely(!newskb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -0700803 if (!drop_thres)
804 return NULL;
805 goto recycle;
806 }
807
808 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
809 fl->buf_size, PCI_DMA_FROMDEVICE);
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700810 if (!skb) {
811 __skb_put(newskb, SGE_RX_PULL_LEN);
812 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
813 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
814 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
815 len - SGE_RX_PULL_LEN);
816 newskb->len = len;
817 newskb->data_len = len - SGE_RX_PULL_LEN;
818 } else {
819 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
820 sd->pg_chunk.page,
821 sd->pg_chunk.offset, len);
822 newskb->len += len;
823 newskb->data_len += len;
824 }
825 newskb->truesize += newskb->data_len;
Divy Le Raycf992af2007-05-30 21:10:47 -0700826
827 fl->credits--;
828 /*
829 * We do not refill FLs here, we let the caller do it to overlap a
830 * prefetch.
831 */
Divy Le Ray7385ecf2008-05-21 18:56:21 -0700832 return newskb;
Divy Le Raycf992af2007-05-30 21:10:47 -0700833}
834
835/**
Divy Le Ray4d22de32007-01-18 22:04:14 -0500836 * get_imm_packet - return the next ingress packet buffer from a response
837 * @resp: the response descriptor containing the packet data
838 *
839 * Return a packet containing the immediate data of the given response.
840 */
841static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
842{
843 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
844
845 if (skb) {
846 __skb_put(skb, IMMED_PKT_SIZE);
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300847 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
Divy Le Ray4d22de32007-01-18 22:04:14 -0500848 }
849 return skb;
850}
851
852/**
853 * calc_tx_descs - calculate the number of Tx descriptors for a packet
854 * @skb: the packet
855 *
856 * Returns the number of Tx descriptors needed for the given Ethernet
857 * packet. Ethernet packets require addition of WR and CPL headers.
858 */
859static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
860{
861 unsigned int flits;
862
863 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
864 return 1;
865
866 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
867 if (skb_shinfo(skb)->gso_size)
868 flits++;
869 return flits_to_desc(flits);
870}
871
872/**
873 * make_sgl - populate a scatter/gather list for a packet
874 * @skb: the packet
875 * @sgp: the SGL to populate
876 * @start: start address of skb main body data to include in the SGL
877 * @len: length of skb main body data to include in the SGL
878 * @pdev: the PCI device
879 *
880 * Generates a scatter/gather list for the buffers that make up a packet
881 * and returns the SGL size in 8-byte words. The caller must size the SGL
882 * appropriately.
883 */
884static inline unsigned int make_sgl(const struct sk_buff *skb,
885 struct sg_ent *sgp, unsigned char *start,
886 unsigned int len, struct pci_dev *pdev)
887{
888 dma_addr_t mapping;
889 unsigned int i, j = 0, nfrags;
890
891 if (len) {
892 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
893 sgp->len[0] = cpu_to_be32(len);
894 sgp->addr[0] = cpu_to_be64(mapping);
895 j = 1;
896 }
897
898 nfrags = skb_shinfo(skb)->nr_frags;
899 for (i = 0; i < nfrags; i++) {
900 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
901
902 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
903 frag->size, PCI_DMA_TODEVICE);
904 sgp->len[j] = cpu_to_be32(frag->size);
905 sgp->addr[j] = cpu_to_be64(mapping);
906 j ^= 1;
907 if (j == 0)
908 ++sgp;
909 }
910 if (j)
911 sgp->len[j] = 0;
912 return ((nfrags + (len != 0)) * 3) / 2 + j;
913}
914
915/**
916 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
917 * @adap: the adapter
918 * @q: the Tx queue
919 *
920 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
921 * where the HW is going to sleep just after we checked, however,
922 * then the interrupt handler will detect the outstanding TX packet
923 * and ring the doorbell for us.
924 *
925 * When GTS is disabled we unconditionally ring the doorbell.
926 */
927static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
928{
929#if USE_GTS
930 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
931 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
932 set_bit(TXQ_LAST_PKT_DB, &q->flags);
933 t3_write_reg(adap, A_SG_KDOORBELL,
934 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
935 }
936#else
937 wmb(); /* write descriptors before telling HW */
938 t3_write_reg(adap, A_SG_KDOORBELL,
939 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
940#endif
941}
942
943static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
944{
945#if SGE_NUM_GENBITS == 2
946 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
947#endif
948}
949
950/**
951 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
952 * @ndesc: number of Tx descriptors spanned by the SGL
953 * @skb: the packet corresponding to the WR
954 * @d: first Tx descriptor to be written
955 * @pidx: index of above descriptors
956 * @q: the SGE Tx queue
957 * @sgl: the SGL
958 * @flits: number of flits to the start of the SGL in the first descriptor
959 * @sgl_flits: the SGL size in flits
960 * @gen: the Tx descriptor generation
961 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
962 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
963 *
964 * Write a work request header and an associated SGL. If the SGL is
965 * small enough to fit into one Tx descriptor it has already been written
966 * and we just need to write the WR header. Otherwise we distribute the
967 * SGL across the number of descriptors it spans.
968 */
969static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
970 struct tx_desc *d, unsigned int pidx,
971 const struct sge_txq *q,
972 const struct sg_ent *sgl,
973 unsigned int flits, unsigned int sgl_flits,
Al Virofb8e4442007-08-23 03:04:12 -0400974 unsigned int gen, __be32 wr_hi,
975 __be32 wr_lo)
Divy Le Ray4d22de32007-01-18 22:04:14 -0500976{
977 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
978 struct tx_sw_desc *sd = &q->sdesc[pidx];
979
980 sd->skb = skb;
981 if (need_skb_unmap()) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800982 sd->fragidx = 0;
983 sd->addr_idx = 0;
984 sd->sflit = flits;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500985 }
986
987 if (likely(ndesc == 1)) {
Divy Le Ray23561c92007-11-16 11:22:05 -0800988 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -0500989 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
990 V_WR_SGLSFLT(flits)) | wr_hi;
991 wmb();
992 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
993 V_WR_GEN(gen)) | wr_lo;
994 wr_gen2(d, gen);
995 } else {
996 unsigned int ogen = gen;
997 const u64 *fp = (const u64 *)sgl;
998 struct work_request_hdr *wp = wrp;
999
1000 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1001 V_WR_SGLSFLT(flits)) | wr_hi;
1002
1003 while (sgl_flits) {
1004 unsigned int avail = WR_FLITS - flits;
1005
1006 if (avail > sgl_flits)
1007 avail = sgl_flits;
1008 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1009 sgl_flits -= avail;
1010 ndesc--;
1011 if (!sgl_flits)
1012 break;
1013
1014 fp += avail;
1015 d++;
Divy Le Ray23561c92007-11-16 11:22:05 -08001016 sd->eop = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001017 sd++;
1018 if (++pidx == q->size) {
1019 pidx = 0;
1020 gen ^= 1;
1021 d = q->desc;
1022 sd = q->sdesc;
1023 }
1024
1025 sd->skb = skb;
1026 wrp = (struct work_request_hdr *)d;
1027 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1028 V_WR_SGLSFLT(1)) | wr_hi;
1029 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1030 sgl_flits + 1)) |
1031 V_WR_GEN(gen)) | wr_lo;
1032 wr_gen2(d, gen);
1033 flits = 1;
1034 }
Divy Le Ray23561c92007-11-16 11:22:05 -08001035 sd->eop = 1;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001036 wrp->wr_hi |= htonl(F_WR_EOP);
1037 wmb();
1038 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1039 wr_gen2((struct tx_desc *)wp, ogen);
1040 WARN_ON(ndesc != 0);
1041 }
1042}
1043
1044/**
1045 * write_tx_pkt_wr - write a TX_PKT work request
1046 * @adap: the adapter
1047 * @skb: the packet to send
1048 * @pi: the egress interface
1049 * @pidx: index of the first Tx descriptor to write
1050 * @gen: the generation value to use
1051 * @q: the Tx queue
1052 * @ndesc: number of descriptors the packet will occupy
1053 * @compl: the value of the COMPL bit to use
1054 *
1055 * Generate a TX_PKT work request to send the supplied packet.
1056 */
1057static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1058 const struct port_info *pi,
1059 unsigned int pidx, unsigned int gen,
1060 struct sge_txq *q, unsigned int ndesc,
1061 unsigned int compl)
1062{
1063 unsigned int flits, sgl_flits, cntrl, tso_info;
1064 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1065 struct tx_desc *d = &q->desc[pidx];
1066 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1067
1068 cpl->len = htonl(skb->len | 0x80000000);
1069 cntrl = V_TXPKT_INTF(pi->port_id);
1070
1071 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1072 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1073
1074 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1075 if (tso_info) {
1076 int eth_type;
1077 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1078
1079 d->flit[2] = 0;
1080 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1081 hdr->cntrl = htonl(cntrl);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001082 eth_type = skb_network_offset(skb) == ETH_HLEN ?
Divy Le Ray4d22de32007-01-18 22:04:14 -05001083 CPL_ETH_II : CPL_ETH_II_VLAN;
1084 tso_info |= V_LSO_ETH_TYPE(eth_type) |
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001085 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001086 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001087 hdr->lso_info = htonl(tso_info);
1088 flits = 3;
1089 } else {
1090 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1091 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1092 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1093 cpl->cntrl = htonl(cntrl);
1094
1095 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1096 q->sdesc[pidx].skb = NULL;
1097 if (!skb->data_len)
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001098 skb_copy_from_linear_data(skb, &d->flit[2],
1099 skb->len);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001100 else
1101 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1102
1103 flits = (skb->len + 7) / 8 + 2;
1104 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1105 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1106 | F_WR_SOP | F_WR_EOP | compl);
1107 wmb();
1108 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1109 V_WR_TID(q->token));
1110 wr_gen2(d, gen);
1111 kfree_skb(skb);
1112 return;
1113 }
1114
1115 flits = 2;
1116 }
1117
1118 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1119 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001120
1121 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1122 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1123 htonl(V_WR_TID(q->token)));
1124}
1125
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301126static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1127 struct sge_txq *q)
1128{
1129 netif_stop_queue(dev);
1130 set_bit(TXQ_ETH, &qs->txq_stopped);
1131 q->stops++;
1132}
1133
Divy Le Ray4d22de32007-01-18 22:04:14 -05001134/**
1135 * eth_xmit - add a packet to the Ethernet Tx queue
1136 * @skb: the packet
1137 * @dev: the egress net device
1138 *
1139 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1140 */
1141int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1142{
1143 unsigned int ndesc, pidx, credits, gen, compl;
1144 const struct port_info *pi = netdev_priv(dev);
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001145 struct adapter *adap = pi->adapter;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001146 struct sge_qset *qs = pi->qs;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001147 struct sge_txq *q = &qs->txq[TXQ_ETH];
1148
1149 /*
1150 * The chip min packet length is 9 octets but play safe and reject
1151 * anything shorter than an Ethernet header.
1152 */
1153 if (unlikely(skb->len < ETH_HLEN)) {
1154 dev_kfree_skb(skb);
1155 return NETDEV_TX_OK;
1156 }
1157
1158 spin_lock(&q->lock);
1159 reclaim_completed_tx(adap, q);
1160
1161 credits = q->size - q->in_use;
1162 ndesc = calc_tx_descs(skb);
1163
1164 if (unlikely(credits < ndesc)) {
Krishna Kumara8cc21f2008-01-30 12:30:16 +05301165 t3_stop_queue(dev, qs, q);
1166 dev_err(&adap->pdev->dev,
1167 "%s: Tx ring %u full while queue awake!\n",
1168 dev->name, q->cntxt_id & 7);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001169 spin_unlock(&q->lock);
1170 return NETDEV_TX_BUSY;
1171 }
1172
1173 q->in_use += ndesc;
Divy Le Raycd7e9032008-03-13 00:13:30 -07001174 if (unlikely(credits - ndesc < q->stop_thres)) {
1175 t3_stop_queue(dev, qs, q);
1176
1177 if (should_restart_tx(q) &&
1178 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1179 q->restarts++;
1180 netif_wake_queue(dev);
1181 }
1182 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001183
1184 gen = q->gen;
1185 q->unacked += ndesc;
1186 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1187 q->unacked &= 7;
1188 pidx = q->pidx;
1189 q->pidx += ndesc;
1190 if (q->pidx >= q->size) {
1191 q->pidx -= q->size;
1192 q->gen ^= 1;
1193 }
1194
1195 /* update port statistics */
1196 if (skb->ip_summed == CHECKSUM_COMPLETE)
1197 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1198 if (skb_shinfo(skb)->gso_size)
1199 qs->port_stats[SGE_PSTAT_TSO]++;
1200 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1201 qs->port_stats[SGE_PSTAT_VLANINS]++;
1202
1203 dev->trans_start = jiffies;
1204 spin_unlock(&q->lock);
1205
1206 /*
1207 * We do not use Tx completion interrupts to free DMAd Tx packets.
1208 * This is good for performamce but means that we rely on new Tx
1209 * packets arriving to run the destructors of completed packets,
1210 * which open up space in their sockets' send queues. Sometimes
1211 * we do not get such new packets causing Tx to stall. A single
1212 * UDP transmitter is a good example of this situation. We have
1213 * a clean up timer that periodically reclaims completed packets
1214 * but it doesn't run often enough (nor do we want it to) to prevent
1215 * lengthy stalls. A solution to this problem is to run the
1216 * destructor early, after the packet is queued but before it's DMAd.
1217 * A cons is that we lie to socket memory accounting, but the amount
1218 * of extra memory is reasonable (limited by the number of Tx
1219 * descriptors), the packets do actually get freed quickly by new
1220 * packets almost always, and for protocols like TCP that wait for
1221 * acks to really free up the data the extra memory is even less.
1222 * On the positive side we run the destructors on the sending CPU
1223 * rather than on a potentially different completing CPU, usually a
1224 * good thing. We also run them without holding our Tx queue lock,
1225 * unlike what reclaim_completed_tx() would otherwise do.
1226 *
1227 * Run the destructor before telling the DMA engine about the packet
1228 * to make sure it doesn't complete and get freed prematurely.
1229 */
1230 if (likely(!skb_shared(skb)))
1231 skb_orphan(skb);
1232
1233 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1234 check_ring_tx_db(adap, q);
1235 return NETDEV_TX_OK;
1236}
1237
1238/**
1239 * write_imm - write a packet into a Tx descriptor as immediate data
1240 * @d: the Tx descriptor to write
1241 * @skb: the packet
1242 * @len: the length of packet data to write as immediate data
1243 * @gen: the generation bit value to write
1244 *
1245 * Writes a packet as immediate data into a Tx descriptor. The packet
1246 * contains a work request at its beginning. We must write the packet
Divy Le Ray27186dc2007-08-21 20:49:15 -07001247 * carefully so the SGE doesn't read it accidentally before it's written
1248 * in its entirety.
Divy Le Ray4d22de32007-01-18 22:04:14 -05001249 */
1250static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1251 unsigned int len, unsigned int gen)
1252{
1253 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1254 struct work_request_hdr *to = (struct work_request_hdr *)d;
1255
Divy Le Ray27186dc2007-08-21 20:49:15 -07001256 if (likely(!skb->data_len))
1257 memcpy(&to[1], &from[1], len - sizeof(*from));
1258 else
1259 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1260
Divy Le Ray4d22de32007-01-18 22:04:14 -05001261 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1262 V_WR_BCNTLFLT(len & 7));
1263 wmb();
1264 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1265 V_WR_LEN((len + 7) / 8));
1266 wr_gen2(d, gen);
1267 kfree_skb(skb);
1268}
1269
1270/**
1271 * check_desc_avail - check descriptor availability on a send queue
1272 * @adap: the adapter
1273 * @q: the send queue
1274 * @skb: the packet needing the descriptors
1275 * @ndesc: the number of Tx descriptors needed
1276 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1277 *
1278 * Checks if the requested number of Tx descriptors is available on an
1279 * SGE send queue. If the queue is already suspended or not enough
1280 * descriptors are available the packet is queued for later transmission.
1281 * Must be called with the Tx queue locked.
1282 *
1283 * Returns 0 if enough descriptors are available, 1 if there aren't
1284 * enough descriptors and the packet has been queued, and 2 if the caller
1285 * needs to retry because there weren't enough descriptors at the
1286 * beginning of the call but some freed up in the mean time.
1287 */
1288static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1289 struct sk_buff *skb, unsigned int ndesc,
1290 unsigned int qid)
1291{
1292 if (unlikely(!skb_queue_empty(&q->sendq))) {
1293 addq_exit:__skb_queue_tail(&q->sendq, skb);
1294 return 1;
1295 }
1296 if (unlikely(q->size - q->in_use < ndesc)) {
1297 struct sge_qset *qs = txq_to_qset(q, qid);
1298
1299 set_bit(qid, &qs->txq_stopped);
1300 smp_mb__after_clear_bit();
1301
1302 if (should_restart_tx(q) &&
1303 test_and_clear_bit(qid, &qs->txq_stopped))
1304 return 2;
1305
1306 q->stops++;
1307 goto addq_exit;
1308 }
1309 return 0;
1310}
1311
1312/**
1313 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1314 * @q: the SGE control Tx queue
1315 *
1316 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1317 * that send only immediate data (presently just the control queues) and
1318 * thus do not have any sk_buffs to release.
1319 */
1320static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1321{
1322 unsigned int reclaim = q->processed - q->cleaned;
1323
1324 q->in_use -= reclaim;
1325 q->cleaned += reclaim;
1326}
1327
1328static inline int immediate(const struct sk_buff *skb)
1329{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001330 return skb->len <= WR_LEN;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001331}
1332
1333/**
1334 * ctrl_xmit - send a packet through an SGE control Tx queue
1335 * @adap: the adapter
1336 * @q: the control queue
1337 * @skb: the packet
1338 *
1339 * Send a packet through an SGE control Tx queue. Packets sent through
1340 * a control queue must fit entirely as immediate data in a single Tx
1341 * descriptor and have no page fragments.
1342 */
1343static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1344 struct sk_buff *skb)
1345{
1346 int ret;
1347 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1348
1349 if (unlikely(!immediate(skb))) {
1350 WARN_ON(1);
1351 dev_kfree_skb(skb);
1352 return NET_XMIT_SUCCESS;
1353 }
1354
1355 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1356 wrp->wr_lo = htonl(V_WR_TID(q->token));
1357
1358 spin_lock(&q->lock);
1359 again:reclaim_completed_tx_imm(q);
1360
1361 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1362 if (unlikely(ret)) {
1363 if (ret == 1) {
1364 spin_unlock(&q->lock);
1365 return NET_XMIT_CN;
1366 }
1367 goto again;
1368 }
1369
1370 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1371
1372 q->in_use++;
1373 if (++q->pidx >= q->size) {
1374 q->pidx = 0;
1375 q->gen ^= 1;
1376 }
1377 spin_unlock(&q->lock);
1378 wmb();
1379 t3_write_reg(adap, A_SG_KDOORBELL,
1380 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1381 return NET_XMIT_SUCCESS;
1382}
1383
1384/**
1385 * restart_ctrlq - restart a suspended control queue
1386 * @qs: the queue set cotaining the control queue
1387 *
1388 * Resumes transmission on a suspended Tx control queue.
1389 */
1390static void restart_ctrlq(unsigned long data)
1391{
1392 struct sk_buff *skb;
1393 struct sge_qset *qs = (struct sge_qset *)data;
1394 struct sge_txq *q = &qs->txq[TXQ_CTRL];
Divy Le Ray4d22de32007-01-18 22:04:14 -05001395
1396 spin_lock(&q->lock);
1397 again:reclaim_completed_tx_imm(q);
1398
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001399 while (q->in_use < q->size &&
1400 (skb = __skb_dequeue(&q->sendq)) != NULL) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001401
1402 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1403
1404 if (++q->pidx >= q->size) {
1405 q->pidx = 0;
1406 q->gen ^= 1;
1407 }
1408 q->in_use++;
1409 }
1410
1411 if (!skb_queue_empty(&q->sendq)) {
1412 set_bit(TXQ_CTRL, &qs->txq_stopped);
1413 smp_mb__after_clear_bit();
1414
1415 if (should_restart_tx(q) &&
1416 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1417 goto again;
1418 q->stops++;
1419 }
1420
1421 spin_unlock(&q->lock);
Divy Le Rayafefce62007-11-16 11:22:21 -08001422 wmb();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001423 t3_write_reg(qs->adap, A_SG_KDOORBELL,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001424 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1425}
1426
Divy Le Ray14ab9892007-01-30 19:43:50 -08001427/*
1428 * Send a management message through control queue 0
1429 */
1430int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1431{
Divy Le Ray204e2f92008-05-06 19:26:01 -07001432 int ret;
Divy Le Raybc4b6b522007-12-17 18:47:41 -08001433 local_bh_disable();
1434 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1435 local_bh_enable();
1436
1437 return ret;
Divy Le Ray14ab9892007-01-30 19:43:50 -08001438}
1439
Divy Le Ray4d22de32007-01-18 22:04:14 -05001440/**
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001441 * deferred_unmap_destructor - unmap a packet when it is freed
1442 * @skb: the packet
1443 *
1444 * This is the packet destructor used for Tx packets that need to remain
1445 * mapped until they are freed rather than until their Tx descriptors are
1446 * freed.
1447 */
1448static void deferred_unmap_destructor(struct sk_buff *skb)
1449{
1450 int i;
1451 const dma_addr_t *p;
1452 const struct skb_shared_info *si;
1453 const struct deferred_unmap_info *dui;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001454
1455 dui = (struct deferred_unmap_info *)skb->head;
1456 p = dui->addr;
1457
Divy Le Ray23561c92007-11-16 11:22:05 -08001458 if (skb->tail - skb->transport_header)
1459 pci_unmap_single(dui->pdev, *p++,
1460 skb->tail - skb->transport_header,
1461 PCI_DMA_TODEVICE);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001462
1463 si = skb_shinfo(skb);
1464 for (i = 0; i < si->nr_frags; i++)
1465 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1466 PCI_DMA_TODEVICE);
1467}
1468
1469static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1470 const struct sg_ent *sgl, int sgl_flits)
1471{
1472 dma_addr_t *p;
1473 struct deferred_unmap_info *dui;
1474
1475 dui = (struct deferred_unmap_info *)skb->head;
1476 dui->pdev = pdev;
1477 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1478 *p++ = be64_to_cpu(sgl->addr[0]);
1479 *p++ = be64_to_cpu(sgl->addr[1]);
1480 }
1481 if (sgl_flits)
1482 *p = be64_to_cpu(sgl->addr[0]);
1483}
1484
1485/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001486 * write_ofld_wr - write an offload work request
1487 * @adap: the adapter
1488 * @skb: the packet to send
1489 * @q: the Tx queue
1490 * @pidx: index of the first Tx descriptor to write
1491 * @gen: the generation value to use
1492 * @ndesc: number of descriptors the packet will occupy
1493 *
1494 * Write an offload work request to send the supplied packet. The packet
1495 * data already carry the work request with most fields populated.
1496 */
1497static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1498 struct sge_txq *q, unsigned int pidx,
1499 unsigned int gen, unsigned int ndesc)
1500{
1501 unsigned int sgl_flits, flits;
1502 struct work_request_hdr *from;
1503 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1504 struct tx_desc *d = &q->desc[pidx];
1505
1506 if (immediate(skb)) {
1507 q->sdesc[pidx].skb = NULL;
1508 write_imm(d, skb, skb->len, gen);
1509 return;
1510 }
1511
1512 /* Only TX_DATA builds SGLs */
1513
1514 from = (struct work_request_hdr *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001515 memcpy(&d->flit[1], &from[1],
1516 skb_transport_offset(skb) - sizeof(*from));
Divy Le Ray4d22de32007-01-18 22:04:14 -05001517
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001518 flits = skb_transport_offset(skb) / 8;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001519 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001520 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001521 skb->tail - skb->transport_header,
Divy Le Ray4d22de32007-01-18 22:04:14 -05001522 adap->pdev);
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001523 if (need_skb_unmap()) {
1524 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1525 skb->destructor = deferred_unmap_destructor;
Divy Le Ray99d7cf32007-02-24 16:44:06 -08001526 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001527
1528 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1529 gen, from->wr_hi, from->wr_lo);
1530}
1531
1532/**
1533 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1534 * @skb: the packet
1535 *
1536 * Returns the number of Tx descriptors needed for the given offload
1537 * packet. These packets are already fully constructed.
1538 */
1539static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1540{
Divy Le Ray27186dc2007-08-21 20:49:15 -07001541 unsigned int flits, cnt;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001542
Divy Le Ray27186dc2007-08-21 20:49:15 -07001543 if (skb->len <= WR_LEN)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001544 return 1; /* packet fits as immediate data */
1545
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07001546 flits = skb_transport_offset(skb) / 8; /* headers */
Divy Le Ray27186dc2007-08-21 20:49:15 -07001547 cnt = skb_shinfo(skb)->nr_frags;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001548 if (skb->tail != skb->transport_header)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001549 cnt++;
1550 return flits_to_desc(flits + sgl_len(cnt));
1551}
1552
1553/**
1554 * ofld_xmit - send a packet through an offload queue
1555 * @adap: the adapter
1556 * @q: the Tx offload queue
1557 * @skb: the packet
1558 *
1559 * Send an offload packet through an SGE offload queue.
1560 */
1561static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1562 struct sk_buff *skb)
1563{
1564 int ret;
1565 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1566
1567 spin_lock(&q->lock);
1568 again:reclaim_completed_tx(adap, q);
1569
1570 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1571 if (unlikely(ret)) {
1572 if (ret == 1) {
1573 skb->priority = ndesc; /* save for restart */
1574 spin_unlock(&q->lock);
1575 return NET_XMIT_CN;
1576 }
1577 goto again;
1578 }
1579
1580 gen = q->gen;
1581 q->in_use += ndesc;
1582 pidx = q->pidx;
1583 q->pidx += ndesc;
1584 if (q->pidx >= q->size) {
1585 q->pidx -= q->size;
1586 q->gen ^= 1;
1587 }
1588 spin_unlock(&q->lock);
1589
1590 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1591 check_ring_tx_db(adap, q);
1592 return NET_XMIT_SUCCESS;
1593}
1594
1595/**
1596 * restart_offloadq - restart a suspended offload queue
1597 * @qs: the queue set cotaining the offload queue
1598 *
1599 * Resumes transmission on a suspended Tx offload queue.
1600 */
1601static void restart_offloadq(unsigned long data)
1602{
1603 struct sk_buff *skb;
1604 struct sge_qset *qs = (struct sge_qset *)data;
1605 struct sge_txq *q = &qs->txq[TXQ_OFLD];
Divy Le Ray5fbf8162007-08-29 19:15:47 -07001606 const struct port_info *pi = netdev_priv(qs->netdev);
1607 struct adapter *adap = pi->adapter;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001608
1609 spin_lock(&q->lock);
1610 again:reclaim_completed_tx(adap, q);
1611
1612 while ((skb = skb_peek(&q->sendq)) != NULL) {
1613 unsigned int gen, pidx;
1614 unsigned int ndesc = skb->priority;
1615
1616 if (unlikely(q->size - q->in_use < ndesc)) {
1617 set_bit(TXQ_OFLD, &qs->txq_stopped);
1618 smp_mb__after_clear_bit();
1619
1620 if (should_restart_tx(q) &&
1621 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1622 goto again;
1623 q->stops++;
1624 break;
1625 }
1626
1627 gen = q->gen;
1628 q->in_use += ndesc;
1629 pidx = q->pidx;
1630 q->pidx += ndesc;
1631 if (q->pidx >= q->size) {
1632 q->pidx -= q->size;
1633 q->gen ^= 1;
1634 }
1635 __skb_unlink(skb, &q->sendq);
1636 spin_unlock(&q->lock);
1637
1638 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1639 spin_lock(&q->lock);
1640 }
1641 spin_unlock(&q->lock);
1642
1643#if USE_GTS
1644 set_bit(TXQ_RUNNING, &q->flags);
1645 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1646#endif
Divy Le Rayafefce62007-11-16 11:22:21 -08001647 wmb();
Divy Le Ray4d22de32007-01-18 22:04:14 -05001648 t3_write_reg(adap, A_SG_KDOORBELL,
1649 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1650}
1651
1652/**
1653 * queue_set - return the queue set a packet should use
1654 * @skb: the packet
1655 *
1656 * Maps a packet to the SGE queue set it should use. The desired queue
1657 * set is carried in bits 1-3 in the packet's priority.
1658 */
1659static inline int queue_set(const struct sk_buff *skb)
1660{
1661 return skb->priority >> 1;
1662}
1663
1664/**
1665 * is_ctrl_pkt - return whether an offload packet is a control packet
1666 * @skb: the packet
1667 *
1668 * Determines whether an offload packet should use an OFLD or a CTRL
1669 * Tx queue. This is indicated by bit 0 in the packet's priority.
1670 */
1671static inline int is_ctrl_pkt(const struct sk_buff *skb)
1672{
1673 return skb->priority & 1;
1674}
1675
1676/**
1677 * t3_offload_tx - send an offload packet
1678 * @tdev: the offload device to send to
1679 * @skb: the packet
1680 *
1681 * Sends an offload packet. We use the packet priority to select the
1682 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1683 * should be sent as regular or control, bits 1-3 select the queue set.
1684 */
1685int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1686{
1687 struct adapter *adap = tdev2adap(tdev);
1688 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1689
1690 if (unlikely(is_ctrl_pkt(skb)))
1691 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1692
1693 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1694}
1695
1696/**
1697 * offload_enqueue - add an offload packet to an SGE offload receive queue
1698 * @q: the SGE response queue
1699 * @skb: the packet
1700 *
1701 * Add a new offload packet to an SGE response queue's offload packet
1702 * queue. If the packet is the first on the queue it schedules the RX
1703 * softirq to process the queue.
1704 */
1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1706{
David S. Miller147e70e2008-09-22 01:29:52 -07001707 int was_empty = skb_queue_empty(&q->rx_queue);
1708
1709 __skb_queue_tail(&q->rx_queue, skb);
1710
1711 if (was_empty) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05001712 struct sge_qset *qs = rspq_to_qset(q);
1713
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001714 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001715 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001716}
1717
1718/**
1719 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1720 * @tdev: the offload device that will be receiving the packets
1721 * @q: the SGE response queue that assembled the bundle
1722 * @skbs: the partial bundle
1723 * @n: the number of packets in the bundle
1724 *
1725 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1726 */
1727static inline void deliver_partial_bundle(struct t3cdev *tdev,
1728 struct sge_rspq *q,
1729 struct sk_buff *skbs[], int n)
1730{
1731 if (n) {
1732 q->offload_bundles++;
1733 tdev->recv(tdev, skbs, n);
1734 }
1735}
1736
1737/**
1738 * ofld_poll - NAPI handler for offload packets in interrupt mode
1739 * @dev: the network device doing the polling
1740 * @budget: polling budget
1741 *
1742 * The NAPI handler for offload packets when a response queue is serviced
1743 * by the hard interrupt handler, i.e., when it's operating in non-polling
1744 * mode. Creates small packet batches and sends them through the offload
1745 * receive handler. Batches need to be of modest size as we do prefetches
1746 * on the packets in each.
1747 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001748static int ofld_poll(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001749{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001750 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001751 struct sge_rspq *q = &qs->rspq;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001752 struct adapter *adapter = qs->adap;
1753 int work_done = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001754
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001755 while (work_done < budget) {
David S. Miller147e70e2008-09-22 01:29:52 -07001756 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1757 struct sk_buff_head queue;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001758 int ngathered;
1759
1760 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001761 __skb_queue_head_init(&queue);
1762 skb_queue_splice_init(&q->rx_queue, &queue);
1763 if (skb_queue_empty(&queue)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001764 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001765 spin_unlock_irq(&q->lock);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001766 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001767 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05001768 spin_unlock_irq(&q->lock);
1769
David S. Miller147e70e2008-09-22 01:29:52 -07001770 ngathered = 0;
1771 skb_queue_walk_safe(&queue, skb, tmp) {
1772 if (work_done >= budget)
1773 break;
1774 work_done++;
1775
1776 __skb_unlink(skb, &queue);
1777 prefetch(skb->data);
1778 skbs[ngathered] = skb;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001779 if (++ngathered == RX_BUNDLE_SIZE) {
1780 q->offload_bundles++;
1781 adapter->tdev.recv(&adapter->tdev, skbs,
1782 ngathered);
1783 ngathered = 0;
1784 }
1785 }
David S. Miller147e70e2008-09-22 01:29:52 -07001786 if (!skb_queue_empty(&queue)) {
1787 /* splice remaining packets back onto Rx queue */
Divy Le Ray4d22de32007-01-18 22:04:14 -05001788 spin_lock_irq(&q->lock);
David S. Miller147e70e2008-09-22 01:29:52 -07001789 skb_queue_splice(&queue, &q->rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001790 spin_unlock_irq(&q->lock);
1791 }
1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1793 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001794
1795 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001796}
1797
1798/**
1799 * rx_offload - process a received offload packet
1800 * @tdev: the offload device receiving the packet
1801 * @rq: the response queue that received the packet
1802 * @skb: the packet
1803 * @rx_gather: a gather list of packets if we are building a bundle
1804 * @gather_idx: index of the next available slot in the bundle
1805 *
1806 * Process an ingress offload pakcet and add it to the offload ingress
1807 * queue. Returns the index of the next available slot in the bundle.
1808 */
1809static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1810 struct sk_buff *skb, struct sk_buff *rx_gather[],
1811 unsigned int gather_idx)
1812{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001813 skb_reset_mac_header(skb);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001814 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001815 skb_reset_transport_header(skb);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001816
1817 if (rq->polling) {
1818 rx_gather[gather_idx++] = skb;
1819 if (gather_idx == RX_BUNDLE_SIZE) {
1820 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1821 gather_idx = 0;
1822 rq->offload_bundles++;
1823 }
1824 } else
1825 offload_enqueue(rq, skb);
1826
1827 return gather_idx;
1828}
1829
1830/**
Divy Le Ray4d22de32007-01-18 22:04:14 -05001831 * restart_tx - check whether to restart suspended Tx queues
1832 * @qs: the queue set to resume
1833 *
1834 * Restarts suspended Tx queues of an SGE queue set if they have enough
1835 * free resources to resume operation.
1836 */
1837static void restart_tx(struct sge_qset *qs)
1838{
1839 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1840 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1841 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1842 qs->txq[TXQ_ETH].restarts++;
1843 if (netif_running(qs->netdev))
1844 netif_wake_queue(qs->netdev);
1845 }
1846
1847 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1848 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1849 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1850 qs->txq[TXQ_OFLD].restarts++;
1851 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1852 }
1853 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1854 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1855 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1856 qs->txq[TXQ_CTRL].restarts++;
1857 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1858 }
1859}
1860
1861/**
1862 * rx_eth - process an ingress ethernet packet
1863 * @adap: the adapter
1864 * @rq: the response queue that received the packet
1865 * @skb: the packet
1866 * @pad: amount of padding at the start of the buffer
1867 *
1868 * Process an ingress ethernet pakcet and deliver it to the stack.
1869 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1870 * if it was immediate data in a response.
1871 */
1872static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
Divy Le Rayb47385b2008-05-21 18:56:26 -07001873 struct sk_buff *skb, int pad, int lro)
Divy Le Ray4d22de32007-01-18 22:04:14 -05001874{
1875 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001876 struct sge_qset *qs = rspq_to_qset(rq);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001877 struct port_info *pi;
1878
Divy Le Ray4d22de32007-01-18 22:04:14 -05001879 skb_pull(skb, sizeof(*p) + pad);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001880 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
Divy Le Raye360b562007-05-30 10:01:29 -07001881 skb->dev->last_rx = jiffies;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001882 pi = netdev_priv(skb->dev);
Al Viro05e5c112007-12-22 18:56:23 +00001883 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
Divy Le Ray4d22de32007-01-18 22:04:14 -05001884 !p->fragment) {
1885 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1886 skb->ip_summed = CHECKSUM_UNNECESSARY;
1887 } else
1888 skb->ip_summed = CHECKSUM_NONE;
1889
1890 if (unlikely(p->vlan_valid)) {
1891 struct vlan_group *grp = pi->vlan_grp;
1892
Divy Le Rayb47385b2008-05-21 18:56:26 -07001893 qs->port_stats[SGE_PSTAT_VLANEX]++;
Divy Le Ray4d22de32007-01-18 22:04:14 -05001894 if (likely(grp))
Divy Le Rayb47385b2008-05-21 18:56:26 -07001895 if (lro)
1896 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
1897 grp,
1898 ntohs(p->vlan),
1899 p);
1900 else
1901 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1902 rq->polling);
Divy Le Ray4d22de32007-01-18 22:04:14 -05001903 else
1904 dev_kfree_skb_any(skb);
Divy Le Rayb47385b2008-05-21 18:56:26 -07001905 } else if (rq->polling) {
1906 if (lro)
1907 lro_receive_skb(&qs->lro_mgr, skb, p);
1908 else
1909 netif_receive_skb(skb);
1910 } else
Divy Le Ray4d22de32007-01-18 22:04:14 -05001911 netif_rx(skb);
1912}
1913
Divy Le Rayb47385b2008-05-21 18:56:26 -07001914static inline int is_eth_tcp(u32 rss)
1915{
1916 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1917}
1918
1919/**
1920 * lro_frame_ok - check if an ingress packet is eligible for LRO
1921 * @p: the CPL header of the packet
1922 *
1923 * Returns true if a received packet is eligible for LRO.
1924 * The following conditions must be true:
1925 * - packet is TCP/IP Ethernet II (checked elsewhere)
1926 * - not an IP fragment
1927 * - no IP options
1928 * - TCP/IP checksums are correct
1929 * - the packet is for this host
1930 */
1931static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1932{
1933 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1934 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1935
1936 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938}
1939
Divy Le Rayb47385b2008-05-21 18:56:26 -07001940static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1941 u64 *hdr_flags, void *priv)
1942{
1943 const struct cpl_rx_pkt *cpl = priv;
1944
1945 if (!lro_frame_ok(cpl))
1946 return -1;
1947
1948 *eh = (struct ethhdr *)(cpl + 1);
1949 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1950 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1951
Divy Le Rayb47385b2008-05-21 18:56:26 -07001952 *hdr_flags = LRO_IPV4 | LRO_TCP;
1953 return 0;
1954}
1955
1956static int t3_get_skb_header(struct sk_buff *skb,
1957 void **iph, void **tcph, u64 *hdr_flags,
1958 void *priv)
1959{
1960 void *eh;
1961
1962 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
1963}
1964
1965static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
1966 void **iph, void **tcph, u64 *hdr_flags,
1967 void *priv)
1968{
1969 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
1970}
1971
1972/**
1973 * lro_add_page - add a page chunk to an LRO session
1974 * @adap: the adapter
1975 * @qs: the associated queue set
1976 * @fl: the free list containing the page chunk to add
1977 * @len: packet length
1978 * @complete: Indicates the last fragment of a frame
1979 *
1980 * Add a received packet contained in a page chunk to an existing LRO
1981 * session.
1982 */
1983static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
1984 struct sge_fl *fl, int len, int complete)
1985{
1986 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1987 struct cpl_rx_pkt *cpl;
1988 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
1989 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
1990 int offset = 0;
1991
1992 if (!nr_frags) {
1993 offset = 2 + sizeof(struct cpl_rx_pkt);
1994 qs->lro_va = cpl = sd->pg_chunk.va + 2;
1995 }
1996
1997 fl->credits--;
1998
1999 len -= offset;
2000 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2001 fl->buf_size, PCI_DMA_FROMDEVICE);
2002
2003 rx_frag += nr_frags;
2004 rx_frag->page = sd->pg_chunk.page;
2005 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2006 rx_frag->size = len;
2007 frag_len += len;
2008 qs->lro_nfrags++;
2009 qs->lro_frag_len = frag_len;
2010
2011 if (!complete)
2012 return;
2013
2014 qs->lro_nfrags = qs->lro_frag_len = 0;
2015 cpl = qs->lro_va;
2016
2017 if (unlikely(cpl->vlan_valid)) {
2018 struct net_device *dev = qs->netdev;
2019 struct port_info *pi = netdev_priv(dev);
2020 struct vlan_group *grp = pi->vlan_grp;
2021
2022 if (likely(grp != NULL)) {
2023 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
2024 qs->lro_frag_tbl,
2025 frag_len, frag_len,
2026 grp, ntohs(cpl->vlan),
2027 cpl, 0);
2028 return;
2029 }
2030 }
2031 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
2032 frag_len, frag_len, cpl, 0);
2033}
2034
2035/**
2036 * init_lro_mgr - initialize a LRO manager object
2037 * @lro_mgr: the LRO manager object
2038 */
2039static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2040{
2041 lro_mgr->dev = qs->netdev;
2042 lro_mgr->features = LRO_F_NAPI;
2043 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2044 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2045 lro_mgr->max_desc = T3_MAX_LRO_SES;
2046 lro_mgr->lro_arr = qs->lro_desc;
2047 lro_mgr->get_frag_header = t3_get_frag_header;
2048 lro_mgr->get_skb_header = t3_get_skb_header;
2049 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2050 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2051 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2052}
2053
Divy Le Ray4d22de32007-01-18 22:04:14 -05002054/**
2055 * handle_rsp_cntrl_info - handles control information in a response
2056 * @qs: the queue set corresponding to the response
2057 * @flags: the response control flags
Divy Le Ray4d22de32007-01-18 22:04:14 -05002058 *
2059 * Handles the control information of an SGE response, such as GTS
2060 * indications and completion credits for the queue set's Tx queues.
Divy Le Ray6195c712007-01-30 19:43:56 -08002061 * HW coalesces credits, we don't do any extra SW coalescing.
Divy Le Ray4d22de32007-01-18 22:04:14 -05002062 */
Divy Le Ray6195c712007-01-30 19:43:56 -08002063static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002064{
2065 unsigned int credits;
2066
2067#if USE_GTS
2068 if (flags & F_RSPD_TXQ0_GTS)
2069 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2070#endif
2071
Divy Le Ray4d22de32007-01-18 22:04:14 -05002072 credits = G_RSPD_TXQ0_CR(flags);
2073 if (credits)
2074 qs->txq[TXQ_ETH].processed += credits;
2075
Divy Le Ray6195c712007-01-30 19:43:56 -08002076 credits = G_RSPD_TXQ2_CR(flags);
2077 if (credits)
2078 qs->txq[TXQ_CTRL].processed += credits;
2079
Divy Le Ray4d22de32007-01-18 22:04:14 -05002080# if USE_GTS
2081 if (flags & F_RSPD_TXQ1_GTS)
2082 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2083# endif
Divy Le Ray6195c712007-01-30 19:43:56 -08002084 credits = G_RSPD_TXQ1_CR(flags);
2085 if (credits)
2086 qs->txq[TXQ_OFLD].processed += credits;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002087}
2088
2089/**
2090 * check_ring_db - check if we need to ring any doorbells
2091 * @adapter: the adapter
2092 * @qs: the queue set whose Tx queues are to be examined
2093 * @sleeping: indicates which Tx queue sent GTS
2094 *
2095 * Checks if some of a queue set's Tx queues need to ring their doorbells
2096 * to resume transmission after idling while they still have unprocessed
2097 * descriptors.
2098 */
2099static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2100 unsigned int sleeping)
2101{
2102 if (sleeping & F_RSPD_TXQ0_GTS) {
2103 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2104
2105 if (txq->cleaned + txq->in_use != txq->processed &&
2106 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2107 set_bit(TXQ_RUNNING, &txq->flags);
2108 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2109 V_EGRCNTX(txq->cntxt_id));
2110 }
2111 }
2112
2113 if (sleeping & F_RSPD_TXQ1_GTS) {
2114 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2115
2116 if (txq->cleaned + txq->in_use != txq->processed &&
2117 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2118 set_bit(TXQ_RUNNING, &txq->flags);
2119 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2120 V_EGRCNTX(txq->cntxt_id));
2121 }
2122 }
2123}
2124
2125/**
2126 * is_new_response - check if a response is newly written
2127 * @r: the response descriptor
2128 * @q: the response queue
2129 *
2130 * Returns true if a response descriptor contains a yet unprocessed
2131 * response.
2132 */
2133static inline int is_new_response(const struct rsp_desc *r,
2134 const struct sge_rspq *q)
2135{
2136 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2137}
2138
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002139static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2140{
2141 q->pg_skb = NULL;
2142 q->rx_recycle_buf = 0;
2143}
2144
Divy Le Ray4d22de32007-01-18 22:04:14 -05002145#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2146#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2147 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2148 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2149 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2150
2151/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2152#define NOMEM_INTR_DELAY 2500
2153
2154/**
2155 * process_responses - process responses from an SGE response queue
2156 * @adap: the adapter
2157 * @qs: the queue set to which the response queue belongs
2158 * @budget: how many responses can be processed in this round
2159 *
2160 * Process responses from an SGE response queue up to the supplied budget.
2161 * Responses include received packets as well as credits and other events
2162 * for the queues that belong to the response queue's queue set.
2163 * A negative budget is effectively unlimited.
2164 *
2165 * Additionally choose the interrupt holdoff time for the next interrupt
2166 * on this queue. If the system is under memory shortage use a fairly
2167 * long delay to help recovery.
2168 */
2169static int process_responses(struct adapter *adap, struct sge_qset *qs,
2170 int budget)
2171{
2172 struct sge_rspq *q = &qs->rspq;
2173 struct rsp_desc *r = &q->desc[q->cidx];
2174 int budget_left = budget;
Divy Le Ray6195c712007-01-30 19:43:56 -08002175 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002176 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2177 int ngathered = 0;
2178
2179 q->next_holdoff = q->holdoff_tmr;
2180
2181 while (likely(budget_left && is_new_response(r, q))) {
Divy Le Rayb47385b2008-05-21 18:56:26 -07002182 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002183 struct sk_buff *skb = NULL;
2184 u32 len, flags = ntohl(r->flags);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002185 __be32 rss_hi = *(const __be32 *)r,
2186 rss_lo = r->rss_hdr.rss_hash_val;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002187
2188 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2189
2190 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2191 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2192 if (!skb)
2193 goto no_mem;
2194
2195 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2196 skb->data[0] = CPL_ASYNC_NOTIF;
2197 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2198 q->async_notif++;
2199 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2200 skb = get_imm_packet(r);
2201 if (unlikely(!skb)) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002202no_mem:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002203 q->next_holdoff = NOMEM_INTR_DELAY;
2204 q->nomem++;
2205 /* consume one credit since we tried */
2206 budget_left--;
2207 break;
2208 }
2209 q->imm_data++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002210 ethpad = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002211 } else if ((len = ntohl(r->len_cq)) != 0) {
Divy Le Raycf992af2007-05-30 21:10:47 -07002212 struct sge_fl *fl;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002213
Divy Le Rayb47385b2008-05-21 18:56:26 -07002214 if (eth)
2215 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2216
Divy Le Raycf992af2007-05-30 21:10:47 -07002217 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2218 if (fl->use_pages) {
2219 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002220
Divy Le Raycf992af2007-05-30 21:10:47 -07002221 prefetch(addr);
2222#if L1_CACHE_BYTES < 128
2223 prefetch(addr + L1_CACHE_BYTES);
2224#endif
Divy Le Raye0994eb2007-02-24 16:44:17 -08002225 __refill_fl(adap, fl);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002226 if (lro > 0) {
2227 lro_add_page(adap, qs, fl,
2228 G_RSPD_LEN(len),
2229 flags & F_RSPD_EOP);
2230 goto next_fl;
2231 }
Divy Le Raye0994eb2007-02-24 16:44:17 -08002232
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002233 skb = get_packet_pg(adap, fl, q,
2234 G_RSPD_LEN(len),
2235 eth ?
2236 SGE_RX_DROP_THRES : 0);
2237 q->pg_skb = skb;
Divy Le Raycf992af2007-05-30 21:10:47 -07002238 } else
Divy Le Raye0994eb2007-02-24 16:44:17 -08002239 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2240 eth ? SGE_RX_DROP_THRES : 0);
Divy Le Raycf992af2007-05-30 21:10:47 -07002241 if (unlikely(!skb)) {
2242 if (!eth)
2243 goto no_mem;
2244 q->rx_drops++;
2245 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2246 __skb_pull(skb, 2);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002247next_fl:
Divy Le Ray4d22de32007-01-18 22:04:14 -05002248 if (++fl->cidx == fl->size)
2249 fl->cidx = 0;
2250 } else
2251 q->pure_rsps++;
2252
2253 if (flags & RSPD_CTRL_MASK) {
2254 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002255 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002256 }
2257
2258 r++;
2259 if (unlikely(++q->cidx == q->size)) {
2260 q->cidx = 0;
2261 q->gen ^= 1;
2262 r = q->desc;
2263 }
2264 prefetch(r);
2265
2266 if (++q->credits >= (q->size / 4)) {
2267 refill_rspq(adap, q, q->credits);
2268 q->credits = 0;
2269 }
2270
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002271 packet_complete = flags &
2272 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2273 F_RSPD_ASYNC_NOTIF);
2274
2275 if (skb != NULL && packet_complete) {
Divy Le Ray4d22de32007-01-18 22:04:14 -05002276 if (eth)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002277 rx_eth(adap, q, skb, ethpad, lro);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002278 else {
Divy Le Rayafefce62007-11-16 11:22:21 -08002279 q->offload_pkts++;
Divy Le Raycf992af2007-05-30 21:10:47 -07002280 /* Preserve the RSS info in csum & priority */
2281 skb->csum = rss_hi;
2282 skb->priority = rss_lo;
2283 ngathered = rx_offload(&adap->tdev, q, skb,
2284 offload_skbs,
Divy Le Raye0994eb2007-02-24 16:44:17 -08002285 ngathered);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002286 }
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002287
2288 if (flags & F_RSPD_EOP)
Divy Le Rayb47385b2008-05-21 18:56:26 -07002289 clear_rspq_bufstate(q);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002290 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002291 --budget_left;
2292 }
2293
Divy Le Ray4d22de32007-01-18 22:04:14 -05002294 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002295 lro_flush_all(&qs->lro_mgr);
2296 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2297 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2298 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2299
Divy Le Ray4d22de32007-01-18 22:04:14 -05002300 if (sleeping)
2301 check_ring_db(adap, qs, sleeping);
2302
2303 smp_mb(); /* commit Tx queue .processed updates */
2304 if (unlikely(qs->txq_stopped != 0))
2305 restart_tx(qs);
2306
2307 budget -= budget_left;
2308 return budget;
2309}
2310
2311static inline int is_pure_response(const struct rsp_desc *r)
2312{
2313 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2314
2315 return (n | r->len_cq) == 0;
2316}
2317
2318/**
2319 * napi_rx_handler - the NAPI handler for Rx processing
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002320 * @napi: the napi instance
Divy Le Ray4d22de32007-01-18 22:04:14 -05002321 * @budget: how many packets we can process in this round
2322 *
2323 * Handler for new data events when using NAPI.
2324 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002325static int napi_rx_handler(struct napi_struct *napi, int budget)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002326{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002327 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2328 struct adapter *adap = qs->adap;
2329 int work_done = process_responses(adap, qs, budget);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002330
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002331 if (likely(work_done < budget)) {
2332 napi_complete(napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002333
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002334 /*
2335 * Because we don't atomically flush the following
2336 * write it is possible that in very rare cases it can
2337 * reach the device in a way that races with a new
2338 * response being written plus an error interrupt
2339 * causing the NAPI interrupt handler below to return
2340 * unhandled status to the OS. To protect against
2341 * this would require flushing the write and doing
2342 * both the write and the flush with interrupts off.
2343 * Way too expensive and unjustifiable given the
2344 * rarity of the race.
2345 *
2346 * The race cannot happen at all with MSI-X.
2347 */
2348 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2349 V_NEWTIMER(qs->rspq.next_holdoff) |
2350 V_NEWINDEX(qs->rspq.cidx));
2351 }
2352 return work_done;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002353}
2354
2355/*
2356 * Returns true if the device is already scheduled for polling.
2357 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002358static inline int napi_is_scheduled(struct napi_struct *napi)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002359{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002360 return test_bit(NAPI_STATE_SCHED, &napi->state);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002361}
2362
2363/**
2364 * process_pure_responses - process pure responses from a response queue
2365 * @adap: the adapter
2366 * @qs: the queue set owning the response queue
2367 * @r: the first pure response to process
2368 *
2369 * A simpler version of process_responses() that handles only pure (i.e.,
2370 * non data-carrying) responses. Such respones are too light-weight to
2371 * justify calling a softirq under NAPI, so we handle them specially in
2372 * the interrupt handler. The function is called with a pointer to a
2373 * response, which the caller must ensure is a valid pure response.
2374 *
2375 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2376 */
2377static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2378 struct rsp_desc *r)
2379{
2380 struct sge_rspq *q = &qs->rspq;
Divy Le Ray6195c712007-01-30 19:43:56 -08002381 unsigned int sleeping = 0;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002382
2383 do {
2384 u32 flags = ntohl(r->flags);
2385
2386 r++;
2387 if (unlikely(++q->cidx == q->size)) {
2388 q->cidx = 0;
2389 q->gen ^= 1;
2390 r = q->desc;
2391 }
2392 prefetch(r);
2393
2394 if (flags & RSPD_CTRL_MASK) {
2395 sleeping |= flags & RSPD_GTS_MASK;
Divy Le Ray6195c712007-01-30 19:43:56 -08002396 handle_rsp_cntrl_info(qs, flags);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002397 }
2398
2399 q->pure_rsps++;
2400 if (++q->credits >= (q->size / 4)) {
2401 refill_rspq(adap, q, q->credits);
2402 q->credits = 0;
2403 }
2404 } while (is_new_response(r, q) && is_pure_response(r));
2405
Divy Le Ray4d22de32007-01-18 22:04:14 -05002406 if (sleeping)
2407 check_ring_db(adap, qs, sleeping);
2408
2409 smp_mb(); /* commit Tx queue .processed updates */
2410 if (unlikely(qs->txq_stopped != 0))
2411 restart_tx(qs);
2412
2413 return is_new_response(r, q);
2414}
2415
2416/**
2417 * handle_responses - decide what to do with new responses in NAPI mode
2418 * @adap: the adapter
2419 * @q: the response queue
2420 *
2421 * This is used by the NAPI interrupt handlers to decide what to do with
2422 * new SGE responses. If there are no new responses it returns -1. If
2423 * there are new responses and they are pure (i.e., non-data carrying)
2424 * it handles them straight in hard interrupt context as they are very
2425 * cheap and don't deliver any packets. Finally, if there are any data
2426 * signaling responses it schedules the NAPI handler. Returns 1 if it
2427 * schedules NAPI, 0 if all new responses were pure.
2428 *
2429 * The caller must ascertain NAPI is not already running.
2430 */
2431static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2432{
2433 struct sge_qset *qs = rspq_to_qset(q);
2434 struct rsp_desc *r = &q->desc[q->cidx];
2435
2436 if (!is_new_response(r, q))
2437 return -1;
2438 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2439 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2440 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2441 return 0;
2442 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002443 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002444 return 1;
2445}
2446
2447/*
2448 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2449 * (i.e., response queue serviced in hard interrupt).
2450 */
2451irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2452{
2453 struct sge_qset *qs = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002454 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002455 struct sge_rspq *q = &qs->rspq;
2456
2457 spin_lock(&q->lock);
2458 if (process_responses(adap, qs, -1) == 0)
2459 q->unhandled_irqs++;
2460 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2461 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2462 spin_unlock(&q->lock);
2463 return IRQ_HANDLED;
2464}
2465
2466/*
2467 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2468 * (i.e., response queue serviced by NAPI polling).
2469 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002470static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002471{
2472 struct sge_qset *qs = cookie;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002473 struct sge_rspq *q = &qs->rspq;
2474
2475 spin_lock(&q->lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002476
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002477 if (handle_responses(qs->adap, q) < 0)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002478 q->unhandled_irqs++;
2479 spin_unlock(&q->lock);
2480 return IRQ_HANDLED;
2481}
2482
2483/*
2484 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2485 * SGE response queues as well as error and other async events as they all use
2486 * the same MSI vector. We use one SGE response queue per port in this mode
2487 * and protect all response queues with queue 0's lock.
2488 */
2489static irqreturn_t t3_intr_msi(int irq, void *cookie)
2490{
2491 int new_packets = 0;
2492 struct adapter *adap = cookie;
2493 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2494
2495 spin_lock(&q->lock);
2496
2497 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2498 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2499 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2500 new_packets = 1;
2501 }
2502
2503 if (adap->params.nports == 2 &&
2504 process_responses(adap, &adap->sge.qs[1], -1)) {
2505 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2506
2507 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2508 V_NEWTIMER(q1->next_holdoff) |
2509 V_NEWINDEX(q1->cidx));
2510 new_packets = 1;
2511 }
2512
2513 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2514 q->unhandled_irqs++;
2515
2516 spin_unlock(&q->lock);
2517 return IRQ_HANDLED;
2518}
2519
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002520static int rspq_check_napi(struct sge_qset *qs)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002521{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002522 struct sge_rspq *q = &qs->rspq;
2523
2524 if (!napi_is_scheduled(&qs->napi) &&
2525 is_new_response(&q->desc[q->cidx], q)) {
2526 napi_schedule(&qs->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002527 return 1;
2528 }
2529 return 0;
2530}
2531
2532/*
2533 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2534 * by NAPI polling). Handles data events from SGE response queues as well as
2535 * error and other async events as they all use the same MSI vector. We use
2536 * one SGE response queue per port in this mode and protect all response
2537 * queues with queue 0's lock.
2538 */
Stephen Hemminger9265fab2007-10-08 16:22:29 -07002539static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002540{
2541 int new_packets;
2542 struct adapter *adap = cookie;
2543 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2544
2545 spin_lock(&q->lock);
2546
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002547 new_packets = rspq_check_napi(&adap->sge.qs[0]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002548 if (adap->params.nports == 2)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002549 new_packets += rspq_check_napi(&adap->sge.qs[1]);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002550 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2551 q->unhandled_irqs++;
2552
2553 spin_unlock(&q->lock);
2554 return IRQ_HANDLED;
2555}
2556
2557/*
2558 * A helper function that processes responses and issues GTS.
2559 */
2560static inline int process_responses_gts(struct adapter *adap,
2561 struct sge_rspq *rq)
2562{
2563 int work;
2564
2565 work = process_responses(adap, rspq_to_qset(rq), -1);
2566 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2567 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2568 return work;
2569}
2570
2571/*
2572 * The legacy INTx interrupt handler. This needs to handle data events from
2573 * SGE response queues as well as error and other async events as they all use
2574 * the same interrupt pin. We use one SGE response queue per port in this mode
2575 * and protect all response queues with queue 0's lock.
2576 */
2577static irqreturn_t t3_intr(int irq, void *cookie)
2578{
2579 int work_done, w0, w1;
2580 struct adapter *adap = cookie;
2581 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2582 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2583
2584 spin_lock(&q0->lock);
2585
2586 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2587 w1 = adap->params.nports == 2 &&
2588 is_new_response(&q1->desc[q1->cidx], q1);
2589
2590 if (likely(w0 | w1)) {
2591 t3_write_reg(adap, A_PL_CLI, 0);
2592 t3_read_reg(adap, A_PL_CLI); /* flush */
2593
2594 if (likely(w0))
2595 process_responses_gts(adap, q0);
2596
2597 if (w1)
2598 process_responses_gts(adap, q1);
2599
2600 work_done = w0 | w1;
2601 } else
2602 work_done = t3_slow_intr_handler(adap);
2603
2604 spin_unlock(&q0->lock);
2605 return IRQ_RETVAL(work_done != 0);
2606}
2607
2608/*
2609 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2610 * Handles data events from SGE response queues as well as error and other
2611 * async events as they all use the same interrupt pin. We use one SGE
2612 * response queue per port in this mode and protect all response queues with
2613 * queue 0's lock.
2614 */
2615static irqreturn_t t3b_intr(int irq, void *cookie)
2616{
2617 u32 map;
2618 struct adapter *adap = cookie;
2619 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2620
2621 t3_write_reg(adap, A_PL_CLI, 0);
2622 map = t3_read_reg(adap, A_SG_DATA_INTR);
2623
2624 if (unlikely(!map)) /* shared interrupt, most likely */
2625 return IRQ_NONE;
2626
2627 spin_lock(&q0->lock);
2628
2629 if (unlikely(map & F_ERRINTR))
2630 t3_slow_intr_handler(adap);
2631
2632 if (likely(map & 1))
2633 process_responses_gts(adap, q0);
2634
2635 if (map & 2)
2636 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2637
2638 spin_unlock(&q0->lock);
2639 return IRQ_HANDLED;
2640}
2641
2642/*
2643 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2644 * Handles data events from SGE response queues as well as error and other
2645 * async events as they all use the same interrupt pin. We use one SGE
2646 * response queue per port in this mode and protect all response queues with
2647 * queue 0's lock.
2648 */
2649static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2650{
2651 u32 map;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002652 struct adapter *adap = cookie;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002653 struct sge_qset *qs0 = &adap->sge.qs[0];
2654 struct sge_rspq *q0 = &qs0->rspq;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002655
2656 t3_write_reg(adap, A_PL_CLI, 0);
2657 map = t3_read_reg(adap, A_SG_DATA_INTR);
2658
2659 if (unlikely(!map)) /* shared interrupt, most likely */
2660 return IRQ_NONE;
2661
2662 spin_lock(&q0->lock);
2663
2664 if (unlikely(map & F_ERRINTR))
2665 t3_slow_intr_handler(adap);
2666
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002667 if (likely(map & 1))
2668 napi_schedule(&qs0->napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002669
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002670 if (map & 2)
2671 napi_schedule(&adap->sge.qs[1].napi);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002672
2673 spin_unlock(&q0->lock);
2674 return IRQ_HANDLED;
2675}
2676
2677/**
2678 * t3_intr_handler - select the top-level interrupt handler
2679 * @adap: the adapter
2680 * @polling: whether using NAPI to service response queues
2681 *
2682 * Selects the top-level interrupt handler based on the type of interrupts
2683 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2684 * response queues.
2685 */
Jeff Garzik7c239972007-10-19 03:12:20 -04002686irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002687{
2688 if (adap->flags & USING_MSIX)
2689 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2690 if (adap->flags & USING_MSI)
2691 return polling ? t3_intr_msi_napi : t3_intr_msi;
2692 if (adap->params.rev > 0)
2693 return polling ? t3b_intr_napi : t3b_intr;
2694 return t3_intr;
2695}
2696
Divy Le Rayb8819552007-12-17 18:47:31 -08002697#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2698 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2699 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2700 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2701 F_HIRCQPARITYERROR)
2702#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2703#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2704 F_RSPQDISABLED)
2705
Divy Le Ray4d22de32007-01-18 22:04:14 -05002706/**
2707 * t3_sge_err_intr_handler - SGE async event interrupt handler
2708 * @adapter: the adapter
2709 *
2710 * Interrupt handler for SGE asynchronous (non-data) events.
2711 */
2712void t3_sge_err_intr_handler(struct adapter *adapter)
2713{
2714 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2715
Divy Le Rayb8819552007-12-17 18:47:31 -08002716 if (status & SGE_PARERR)
2717 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2718 status & SGE_PARERR);
2719 if (status & SGE_FRAMINGERR)
2720 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2721 status & SGE_FRAMINGERR);
2722
Divy Le Ray4d22de32007-01-18 22:04:14 -05002723 if (status & F_RSPQCREDITOVERFOW)
2724 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2725
2726 if (status & F_RSPQDISABLED) {
2727 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2728
2729 CH_ALERT(adapter,
2730 "packet delivered to disabled response queue "
2731 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2732 }
2733
Divy Le Ray6e3f03b2007-08-21 20:49:10 -07002734 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2735 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2736 status & F_HIPIODRBDROPERR ? "high" : "lo");
2737
Divy Le Ray4d22de32007-01-18 22:04:14 -05002738 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
Divy Le Rayb8819552007-12-17 18:47:31 -08002739 if (status & SGE_FATALERR)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002740 t3_fatal_err(adapter);
2741}
2742
2743/**
2744 * sge_timer_cb - perform periodic maintenance of an SGE qset
2745 * @data: the SGE queue set to maintain
2746 *
2747 * Runs periodically from a timer to perform maintenance of an SGE queue
2748 * set. It performs two tasks:
2749 *
2750 * a) Cleans up any completed Tx descriptors that may still be pending.
2751 * Normal descriptor cleanup happens when new packets are added to a Tx
2752 * queue so this timer is relatively infrequent and does any cleanup only
2753 * if the Tx queue has not seen any new packets in a while. We make a
2754 * best effort attempt to reclaim descriptors, in that we don't wait
2755 * around if we cannot get a queue's lock (which most likely is because
2756 * someone else is queueing new packets and so will also handle the clean
2757 * up). Since control queues use immediate data exclusively we don't
2758 * bother cleaning them up here.
2759 *
2760 * b) Replenishes Rx queues that have run out due to memory shortage.
2761 * Normally new Rx buffers are added when existing ones are consumed but
2762 * when out of memory a queue can become empty. We try to add only a few
2763 * buffers here, the queue will be replenished fully as these new buffers
2764 * are used up if memory shortage has subsided.
2765 */
2766static void sge_timer_cb(unsigned long data)
2767{
2768 spinlock_t *lock;
2769 struct sge_qset *qs = (struct sge_qset *)data;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002770 struct adapter *adap = qs->adap;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002771
2772 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2773 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2774 spin_unlock(&qs->txq[TXQ_ETH].lock);
2775 }
2776 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2777 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2778 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2779 }
2780 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002781 &adap->sge.qs[0].rspq.lock;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002782 if (spin_trylock_irq(lock)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002783 if (!napi_is_scheduled(&qs->napi)) {
Divy Le Raybae73f42007-02-24 16:44:12 -08002784 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2785
Divy Le Ray4d22de32007-01-18 22:04:14 -05002786 if (qs->fl[0].credits < qs->fl[0].size)
2787 __refill_fl(adap, &qs->fl[0]);
2788 if (qs->fl[1].credits < qs->fl[1].size)
2789 __refill_fl(adap, &qs->fl[1]);
Divy Le Raybae73f42007-02-24 16:44:12 -08002790
2791 if (status & (1 << qs->rspq.cntxt_id)) {
2792 qs->rspq.starved++;
2793 if (qs->rspq.credits) {
2794 refill_rspq(adap, &qs->rspq, 1);
2795 qs->rspq.credits--;
2796 qs->rspq.restarted++;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002797 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
Divy Le Raybae73f42007-02-24 16:44:12 -08002798 1 << qs->rspq.cntxt_id);
2799 }
2800 }
Divy Le Ray4d22de32007-01-18 22:04:14 -05002801 }
2802 spin_unlock_irq(lock);
2803 }
2804 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2805}
2806
2807/**
2808 * t3_update_qset_coalesce - update coalescing settings for a queue set
2809 * @qs: the SGE queue set
2810 * @p: new queue set parameters
2811 *
2812 * Update the coalescing settings for an SGE queue set. Nothing is done
2813 * if the queue set is not initialized yet.
2814 */
2815void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2816{
Divy Le Ray4d22de32007-01-18 22:04:14 -05002817 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2818 qs->rspq.polling = p->polling;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002819 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002820}
2821
2822/**
2823 * t3_sge_alloc_qset - initialize an SGE queue set
2824 * @adapter: the adapter
2825 * @id: the queue set id
2826 * @nports: how many Ethernet ports will be using this queue set
2827 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2828 * @p: configuration parameters for this queue set
2829 * @ntxq: number of Tx queues for the queue set
2830 * @netdev: net device associated with this queue set
2831 *
2832 * Allocate resources and initialize an SGE queue set. A queue set
2833 * comprises a response queue, two Rx free-buffer queues, and up to 3
2834 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2835 * queue, offload queue, and control queue.
2836 */
2837int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2838 int irq_vec_idx, const struct qset_params *p,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002839 int ntxq, struct net_device *dev)
Divy Le Ray4d22de32007-01-18 22:04:14 -05002840{
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002841 int i, avail, ret = -ENOMEM;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002842 struct sge_qset *q = &adapter->sge.qs[id];
Divy Le Rayb47385b2008-05-21 18:56:26 -07002843 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002844
2845 init_qset_cntxt(q, id);
2846 init_timer(&q->tx_reclaim_timer);
2847 q->tx_reclaim_timer.data = (unsigned long)q;
2848 q->tx_reclaim_timer.function = sge_timer_cb;
2849
2850 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2851 sizeof(struct rx_desc),
2852 sizeof(struct rx_sw_desc),
2853 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2854 if (!q->fl[0].desc)
2855 goto err;
2856
2857 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2858 sizeof(struct rx_desc),
2859 sizeof(struct rx_sw_desc),
2860 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2861 if (!q->fl[1].desc)
2862 goto err;
2863
2864 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2865 sizeof(struct rsp_desc), 0,
2866 &q->rspq.phys_addr, NULL);
2867 if (!q->rspq.desc)
2868 goto err;
2869
2870 for (i = 0; i < ntxq; ++i) {
2871 /*
2872 * The control queue always uses immediate data so does not
2873 * need to keep track of any sk_buffs.
2874 */
2875 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2876
2877 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2878 sizeof(struct tx_desc), sz,
2879 &q->txq[i].phys_addr,
2880 &q->txq[i].sdesc);
2881 if (!q->txq[i].desc)
2882 goto err;
2883
2884 q->txq[i].gen = 1;
2885 q->txq[i].size = p->txq_size[i];
2886 spin_lock_init(&q->txq[i].lock);
2887 skb_queue_head_init(&q->txq[i].sendq);
2888 }
2889
2890 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2891 (unsigned long)q);
2892 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2893 (unsigned long)q);
2894
2895 q->fl[0].gen = q->fl[1].gen = 1;
2896 q->fl[0].size = p->fl_size;
2897 q->fl[1].size = p->jumbo_size;
2898
2899 q->rspq.gen = 1;
2900 q->rspq.size = p->rspq_size;
2901 spin_lock_init(&q->rspq.lock);
David S. Miller147e70e2008-09-22 01:29:52 -07002902 skb_queue_head_init(&q->rspq.rx_queue);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002903
2904 q->txq[TXQ_ETH].stop_thres = nports *
2905 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2906
Divy Le Raycf992af2007-05-30 21:10:47 -07002907#if FL0_PG_CHUNK_SIZE > 0
2908 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
Divy Le Raye0994eb2007-02-24 16:44:17 -08002909#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002910 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
Divy Le Raye0994eb2007-02-24 16:44:17 -08002911#endif
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002912#if FL1_PG_CHUNK_SIZE > 0
2913 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2914#else
Divy Le Raycf992af2007-05-30 21:10:47 -07002915 q->fl[1].buf_size = is_offload(adapter) ?
2916 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2917 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002918#endif
2919
2920 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2921 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2922 q->fl[0].order = FL0_PG_ORDER;
2923 q->fl[1].order = FL1_PG_ORDER;
Divy Le Ray4d22de32007-01-18 22:04:14 -05002924
Divy Le Rayb47385b2008-05-21 18:56:26 -07002925 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2926 sizeof(struct skb_frag_struct),
2927 GFP_KERNEL);
2928 q->lro_nfrags = q->lro_frag_len = 0;
Roland Dreierb1186de2008-03-20 13:30:48 -07002929 spin_lock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002930
2931 /* FL threshold comparison uses < */
2932 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2933 q->rspq.phys_addr, q->rspq.size,
2934 q->fl[0].buf_size, 1, 0);
2935 if (ret)
2936 goto err_unlock;
2937
2938 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2939 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2940 q->fl[i].phys_addr, q->fl[i].size,
2941 q->fl[i].buf_size, p->cong_thres, 1,
2942 0);
2943 if (ret)
2944 goto err_unlock;
2945 }
2946
2947 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2948 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2949 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2950 1, 0);
2951 if (ret)
2952 goto err_unlock;
2953
2954 if (ntxq > 1) {
2955 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2956 USE_GTS, SGE_CNTXT_OFLD, id,
2957 q->txq[TXQ_OFLD].phys_addr,
2958 q->txq[TXQ_OFLD].size, 0, 1, 0);
2959 if (ret)
2960 goto err_unlock;
2961 }
2962
2963 if (ntxq > 2) {
2964 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2965 SGE_CNTXT_CTRL, id,
2966 q->txq[TXQ_CTRL].phys_addr,
2967 q->txq[TXQ_CTRL].size,
2968 q->txq[TXQ_CTRL].token, 1, 0);
2969 if (ret)
2970 goto err_unlock;
2971 }
2972
Roland Dreierb1186de2008-03-20 13:30:48 -07002973 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002974
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002975 q->adap = adapter;
2976 q->netdev = dev;
2977 t3_update_qset_coalesce(q, p);
Divy Le Rayb47385b2008-05-21 18:56:26 -07002978
2979 init_lro_mgr(q, lro_mgr);
2980
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002981 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
2982 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002983 if (!avail) {
2984 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2985 goto err;
2986 }
2987 if (avail < q->fl[0].size)
2988 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2989 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002990
Divy Le Ray7385ecf2008-05-21 18:56:21 -07002991 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
2992 GFP_KERNEL | __GFP_COMP);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07002993 if (avail < q->fl[1].size)
2994 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2995 avail);
Divy Le Ray4d22de32007-01-18 22:04:14 -05002996 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2997
2998 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2999 V_NEWTIMER(q->rspq.holdoff_tmr));
3000
3001 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3002 return 0;
3003
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003004err_unlock:
Roland Dreierb1186de2008-03-20 13:30:48 -07003005 spin_unlock_irq(&adapter->sge.reg_lock);
Divy Le Rayb1fb1f22008-05-21 18:56:16 -07003006err:
Divy Le Ray4d22de32007-01-18 22:04:14 -05003007 t3_free_qset(adapter, q);
3008 return ret;
3009}
3010
3011/**
3012 * t3_free_sge_resources - free SGE resources
3013 * @adap: the adapter
3014 *
3015 * Frees resources used by the SGE queue sets.
3016 */
3017void t3_free_sge_resources(struct adapter *adap)
3018{
3019 int i;
3020
3021 for (i = 0; i < SGE_QSETS; ++i)
3022 t3_free_qset(adap, &adap->sge.qs[i]);
3023}
3024
3025/**
3026 * t3_sge_start - enable SGE
3027 * @adap: the adapter
3028 *
3029 * Enables the SGE for DMAs. This is the last step in starting packet
3030 * transfers.
3031 */
3032void t3_sge_start(struct adapter *adap)
3033{
3034 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3035}
3036
3037/**
3038 * t3_sge_stop - disable SGE operation
3039 * @adap: the adapter
3040 *
3041 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3042 * from error interrupts) or from normal process context. In the latter
3043 * case it also disables any pending queue restart tasklets. Note that
3044 * if it is called in interrupt context it cannot disable the restart
3045 * tasklets as it cannot wait, however the tasklets will have no effect
3046 * since the doorbells are disabled and the driver will call this again
3047 * later from process context, at which time the tasklets will be stopped
3048 * if they are still running.
3049 */
3050void t3_sge_stop(struct adapter *adap)
3051{
3052 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3053 if (!in_interrupt()) {
3054 int i;
3055
3056 for (i = 0; i < SGE_QSETS; ++i) {
3057 struct sge_qset *qs = &adap->sge.qs[i];
3058
3059 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3060 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3061 }
3062 }
3063}
3064
3065/**
3066 * t3_sge_init - initialize SGE
3067 * @adap: the adapter
3068 * @p: the SGE parameters
3069 *
3070 * Performs SGE initialization needed every time after a chip reset.
3071 * We do not initialize any of the queue sets here, instead the driver
3072 * top-level must request those individually. We also do not enable DMA
3073 * here, that should be done after the queues have been set up.
3074 */
3075void t3_sge_init(struct adapter *adap, struct sge_params *p)
3076{
3077 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3078
3079 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
Divy Le Rayb8819552007-12-17 18:47:31 -08003080 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
Divy Le Ray4d22de32007-01-18 22:04:14 -05003081 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3082 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3083#if SGE_NUM_GENBITS == 1
3084 ctrl |= F_EGRGENCTRL;
3085#endif
3086 if (adap->params.rev > 0) {
3087 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3088 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003089 }
3090 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3091 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3092 V_LORCQDRBTHRSH(512));
3093 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3094 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
Divy Le Ray6195c712007-01-30 19:43:56 -08003095 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
Divy Le Rayb8819552007-12-17 18:47:31 -08003096 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3097 adap->params.rev < T3_REV_C ? 1000 : 500);
Divy Le Ray4d22de32007-01-18 22:04:14 -05003098 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3099 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3100 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3101 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3102 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3103}
3104
3105/**
3106 * t3_sge_prep - one-time SGE initialization
3107 * @adap: the associated adapter
3108 * @p: SGE parameters
3109 *
3110 * Performs one-time initialization of SGE SW state. Includes determining
3111 * defaults for the assorted SGE parameters, which admins can change until
3112 * they are used to initialize the SGE.
3113 */
Roland Dreier7b9b0942008-01-29 14:45:11 -08003114void t3_sge_prep(struct adapter *adap, struct sge_params *p)
Divy Le Ray4d22de32007-01-18 22:04:14 -05003115{
3116 int i;
3117
3118 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3119 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3120
3121 for (i = 0; i < SGE_QSETS; ++i) {
3122 struct qset_params *q = p->qset + i;
3123
3124 q->polling = adap->params.rev > 0;
3125 q->coalesce_usecs = 5;
3126 q->rspq_size = 1024;
Divy Le Raye0994eb2007-02-24 16:44:17 -08003127 q->fl_size = 1024;
Divy Le Ray7385ecf2008-05-21 18:56:21 -07003128 q->jumbo_size = 512;
Divy Le Ray4d22de32007-01-18 22:04:14 -05003129 q->txq_size[TXQ_ETH] = 1024;
3130 q->txq_size[TXQ_OFLD] = 1024;
3131 q->txq_size[TXQ_CTRL] = 256;
3132 q->cong_thres = 0;
3133 }
3134
3135 spin_lock_init(&adap->sge.reg_lock);
3136}
3137
3138/**
3139 * t3_get_desc - dump an SGE descriptor for debugging purposes
3140 * @qs: the queue set
3141 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3142 * @idx: the descriptor index in the queue
3143 * @data: where to dump the descriptor contents
3144 *
3145 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3146 * size of the descriptor.
3147 */
3148int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3149 unsigned char *data)
3150{
3151 if (qnum >= 6)
3152 return -EINVAL;
3153
3154 if (qnum < 3) {
3155 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3156 return -EINVAL;
3157 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3158 return sizeof(struct tx_desc);
3159 }
3160
3161 if (qnum == 3) {
3162 if (!qs->rspq.desc || idx >= qs->rspq.size)
3163 return -EINVAL;
3164 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3165 return sizeof(struct rsp_desc);
3166 }
3167
3168 qnum -= 4;
3169 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3170 return -EINVAL;
3171 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3172 return sizeof(struct rx_desc);
3173}