blob: f7fd1317d99675515b78dec60b7fe1b3e5a228c5 [file] [log] [blame]
Casey Leedomc6e0d912010-06-25 12:13:28 +00001/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040044#include <linux/prefetch.h>
Casey Leedomc6e0d912010-06-25 12:13:28 +000045
46#include "t4vf_common.h"
47#include "t4vf_defs.h"
48
49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4fw_api.h"
51#include "../cxgb4/t4_msg.h"
52
53/*
Casey Leedomc6e0d912010-06-25 12:13:28 +000054 * Constants ...
55 */
56enum {
57 /*
58 * Egress Queue sizes, producer and consumer indices are all in units
59 * of Egress Context Units bytes. Note that as far as the hardware is
60 * concerned, the free list is an Egress Queue (the host produces free
61 * buffers which the hardware consumes) and free list entries are
62 * 64-bit PCI DMA addresses.
63 */
64 EQ_UNIT = SGE_EQ_IDXSIZE,
65 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
66 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67
68 /*
69 * Max number of TX descriptors we clean up at a time. Should be
70 * modest as freeing skbs isn't cheap and it happens while holding
71 * locks. We just need to free packets faster than they arrive, we
72 * eventually catch up and keep the amortized cost reasonable.
73 */
74 MAX_TX_RECLAIM = 16,
75
76 /*
77 * Max number of Rx buffers we replenish at a time. Again keep this
78 * modest, allocating buffers isn't cheap either.
79 */
80 MAX_RX_REFILL = 16,
81
82 /*
83 * Period of the Rx queue check timer. This timer is infrequent as it
84 * has something to do only when the system experiences severe memory
85 * shortage.
86 */
87 RX_QCHECK_PERIOD = (HZ / 2),
88
89 /*
90 * Period of the TX queue check timer and the maximum number of TX
91 * descriptors to be reclaimed by the TX timer.
92 */
93 TX_QCHECK_PERIOD = (HZ / 2),
94 MAX_TIMER_TX_RECLAIM = 100,
95
96 /*
Casey Leedomc6e0d912010-06-25 12:13:28 +000097 * Suspend an Ethernet TX queue with fewer available descriptors than
98 * this. We always want to have room for a maximum sized packet:
99 * inline immediate data + MAX_SKB_FRAGS. This is the same as
100 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
101 * (see that function and its helpers for a description of the
102 * calculation).
103 */
104 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
105 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
106 ((ETHTXQ_MAX_FRAGS-1) & 1) +
107 2),
108 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
109 sizeof(struct cpl_tx_pkt_lso_core) +
110 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
111 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
112
113 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
114
115 /*
116 * Max TX descriptor space we allow for an Ethernet packet to be
117 * inlined into a WR. This is limited by the maximum value which
118 * we can specify for immediate data in the firmware Ethernet TX
119 * Work Request.
120 */
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530121 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
Casey Leedomc6e0d912010-06-25 12:13:28 +0000122
123 /*
124 * Max size of a WR sent through a control TX queue.
125 */
126 MAX_CTRL_WR_LEN = 256,
127
128 /*
129 * Maximum amount of data which we'll ever need to inline into a
130 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
131 */
132 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
133 ? MAX_IMM_TX_PKT_LEN
134 : MAX_CTRL_WR_LEN),
135
136 /*
137 * For incoming packets less than RX_COPY_THRES, we copy the data into
138 * an skb rather than referencing the data. We allocate enough
139 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
140 * of the data (header).
141 */
142 RX_COPY_THRES = 256,
143 RX_PULL_LEN = 128,
Casey Leedomc6e0d912010-06-25 12:13:28 +0000144
Casey Leedomeb6c5032010-11-11 09:06:50 +0000145 /*
146 * Main body length for sk_buffs used for RX Ethernet packets with
147 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
148 * pskb_may_pull() some room.
149 */
150 RX_SKB_LEN = 512,
151};
Casey Leedomc6e0d912010-06-25 12:13:28 +0000152
153/*
154 * Software state per TX descriptor.
155 */
156struct tx_sw_desc {
157 struct sk_buff *skb; /* socket buffer of TX data source */
158 struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
159};
160
161/*
162 * Software state per RX Free List descriptor. We keep track of the allocated
163 * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
164 * page size and its PCI DMA mapped state are stored in the low bits of the
165 * PCI DMA address as per below.
166 */
167struct rx_sw_desc {
168 struct page *page; /* Free List page buffer */
169 dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
170 /* and flags (see below) */
171};
172
173/*
174 * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
175 * SGE also uses the low 4 bits to determine the size of the buffer. It uses
176 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
177 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
178 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
179 * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
180 * maintained in an inverse sense so the hardware never sees that bit high.
181 */
182enum {
183 RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
184 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
185};
186
187/**
188 * get_buf_addr - return DMA buffer address of software descriptor
189 * @sdesc: pointer to the software buffer descriptor
190 *
191 * Return the DMA buffer address of a software descriptor (stripping out
192 * our low-order flag bits).
193 */
194static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
195{
196 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
197}
198
199/**
200 * is_buf_mapped - is buffer mapped for DMA?
201 * @sdesc: pointer to the software buffer descriptor
202 *
203 * Determine whether the buffer associated with a software descriptor in
204 * mapped for DMA or not.
205 */
206static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
207{
208 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
209}
210
211/**
212 * need_skb_unmap - does the platform need unmapping of sk_buffs?
213 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300214 * Returns true if the platform needs sk_buff unmapping. The compiler
215 * optimizes away unnecessary code if this returns true.
Casey Leedomc6e0d912010-06-25 12:13:28 +0000216 */
217static inline int need_skb_unmap(void)
218{
FUJITA Tomonori57b2eaf2010-07-07 23:52:37 +0000219#ifdef CONFIG_NEED_DMA_MAP_STATE
220 return 1;
221#else
222 return 0;
223#endif
Casey Leedomc6e0d912010-06-25 12:13:28 +0000224}
225
226/**
227 * txq_avail - return the number of available slots in a TX queue
228 * @tq: the TX queue
229 *
230 * Returns the number of available descriptors in a TX queue.
231 */
232static inline unsigned int txq_avail(const struct sge_txq *tq)
233{
234 return tq->size - 1 - tq->in_use;
235}
236
237/**
238 * fl_cap - return the capacity of a Free List
239 * @fl: the Free List
240 *
241 * Returns the capacity of a Free List. The capacity is less than the
242 * size because an Egress Queue Index Unit worth of descriptors needs to
243 * be left unpopulated, otherwise the Producer and Consumer indices PIDX
244 * and CIDX will match and the hardware will think the FL is empty.
245 */
246static inline unsigned int fl_cap(const struct sge_fl *fl)
247{
248 return fl->size - FL_PER_EQ_UNIT;
249}
250
251/**
252 * fl_starving - return whether a Free List is starving.
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530253 * @adapter: pointer to the adapter
Casey Leedomc6e0d912010-06-25 12:13:28 +0000254 * @fl: the Free List
255 *
256 * Tests specified Free List to see whether the number of buffers
257 * available to the hardware has falled below our "starvation"
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300258 * threshold.
Casey Leedomc6e0d912010-06-25 12:13:28 +0000259 */
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530260static inline bool fl_starving(const struct adapter *adapter,
261 const struct sge_fl *fl)
Casey Leedomc6e0d912010-06-25 12:13:28 +0000262{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530263 const struct sge *s = &adapter->sge;
264
265 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
Casey Leedomc6e0d912010-06-25 12:13:28 +0000266}
267
268/**
269 * map_skb - map an skb for DMA to the device
270 * @dev: the egress net device
271 * @skb: the packet to map
272 * @addr: a pointer to the base of the DMA mapping array
273 *
274 * Map an skb for DMA to the device and return an array of DMA addresses.
275 */
276static int map_skb(struct device *dev, const struct sk_buff *skb,
277 dma_addr_t *addr)
278{
279 const skb_frag_t *fp, *end;
280 const struct skb_shared_info *si;
281
282 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
283 if (dma_mapping_error(dev, *addr))
284 goto out_err;
285
286 si = skb_shinfo(skb);
287 end = &si->frags[si->nr_frags];
288 for (fp = si->frags; fp < end; fp++) {
Ian Campbella0006a82011-10-19 23:01:47 +0000289 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
290 DMA_TO_DEVICE);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000291 if (dma_mapping_error(dev, *addr))
292 goto unwind;
293 }
294 return 0;
295
296unwind:
297 while (fp-- > si->frags)
Eric Dumazet9e903e02011-10-18 21:00:24 +0000298 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000299 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
300
301out_err:
302 return -ENOMEM;
303}
304
305static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
306 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
307{
308 const struct ulptx_sge_pair *p;
309 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
310
311 if (likely(skb_headlen(skb)))
312 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
313 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
314 else {
315 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
316 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
317 nfrags--;
318 }
319
320 /*
321 * the complexity below is because of the possibility of a wrap-around
322 * in the middle of an SGL
323 */
324 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
325 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
326unmap:
327 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
328 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
329 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
330 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
331 p++;
332 } else if ((u8 *)p == (u8 *)tq->stat) {
333 p = (const struct ulptx_sge_pair *)tq->desc;
334 goto unmap;
335 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
336 const __be64 *addr = (const __be64 *)tq->desc;
337
338 dma_unmap_page(dev, be64_to_cpu(addr[0]),
339 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
340 dma_unmap_page(dev, be64_to_cpu(addr[1]),
341 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
342 p = (const struct ulptx_sge_pair *)&addr[2];
343 } else {
344 const __be64 *addr = (const __be64 *)tq->desc;
345
346 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
347 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
348 dma_unmap_page(dev, be64_to_cpu(addr[0]),
349 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
350 p = (const struct ulptx_sge_pair *)&addr[1];
351 }
352 }
353 if (nfrags) {
354 __be64 addr;
355
356 if ((u8 *)p == (u8 *)tq->stat)
357 p = (const struct ulptx_sge_pair *)tq->desc;
358 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
359 ? p->addr[0]
360 : *(const __be64 *)tq->desc);
361 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
362 DMA_TO_DEVICE);
363 }
364}
365
366/**
367 * free_tx_desc - reclaims TX descriptors and their buffers
368 * @adapter: the adapter
369 * @tq: the TX queue to reclaim descriptors from
370 * @n: the number of descriptors to reclaim
371 * @unmap: whether the buffers should be unmapped for DMA
372 *
373 * Reclaims TX descriptors from an SGE TX queue and frees the associated
374 * TX buffers. Called with the TX queue lock held.
375 */
376static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
377 unsigned int n, bool unmap)
378{
379 struct tx_sw_desc *sdesc;
380 unsigned int cidx = tq->cidx;
381 struct device *dev = adapter->pdev_dev;
382
383 const int need_unmap = need_skb_unmap() && unmap;
384
385 sdesc = &tq->sdesc[cidx];
386 while (n--) {
387 /*
388 * If we kept a reference to the original TX skb, we need to
389 * unmap it from PCI DMA space (if required) and free it.
390 */
391 if (sdesc->skb) {
392 if (need_unmap)
393 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
Eric W. Biederman42ffda52014-03-15 16:31:32 -0700394 dev_consume_skb_any(sdesc->skb);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000395 sdesc->skb = NULL;
396 }
397
398 sdesc++;
399 if (++cidx == tq->size) {
400 cidx = 0;
401 sdesc = tq->sdesc;
402 }
403 }
404 tq->cidx = cidx;
405}
406
407/*
408 * Return the number of reclaimable descriptors in a TX queue.
409 */
410static inline int reclaimable(const struct sge_txq *tq)
411{
412 int hw_cidx = be16_to_cpu(tq->stat->cidx);
413 int reclaimable = hw_cidx - tq->cidx;
414 if (reclaimable < 0)
415 reclaimable += tq->size;
416 return reclaimable;
417}
418
419/**
420 * reclaim_completed_tx - reclaims completed TX descriptors
421 * @adapter: the adapter
422 * @tq: the TX queue to reclaim completed descriptors from
423 * @unmap: whether the buffers should be unmapped for DMA
424 *
425 * Reclaims TX descriptors that the SGE has indicated it has processed,
426 * and frees the associated buffers if possible. Called with the TX
427 * queue locked.
428 */
429static inline void reclaim_completed_tx(struct adapter *adapter,
430 struct sge_txq *tq,
431 bool unmap)
432{
433 int avail = reclaimable(tq);
434
435 if (avail) {
436 /*
437 * Limit the amount of clean up work we do at a time to keep
438 * the TX lock hold time O(1).
439 */
440 if (avail > MAX_TX_RECLAIM)
441 avail = MAX_TX_RECLAIM;
442
443 free_tx_desc(adapter, tq, avail, unmap);
444 tq->in_use -= avail;
445 }
446}
447
448/**
449 * get_buf_size - return the size of an RX Free List buffer.
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530450 * @adapter: pointer to the associated adapter
Casey Leedomc6e0d912010-06-25 12:13:28 +0000451 * @sdesc: pointer to the software buffer descriptor
452 */
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530453static inline int get_buf_size(const struct adapter *adapter,
454 const struct rx_sw_desc *sdesc)
Casey Leedomc6e0d912010-06-25 12:13:28 +0000455{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530456 const struct sge *s = &adapter->sge;
457
458 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
459 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000460}
461
462/**
463 * free_rx_bufs - free RX buffers on an SGE Free List
464 * @adapter: the adapter
465 * @fl: the SGE Free List to free buffers from
466 * @n: how many buffers to free
467 *
468 * Release the next @n buffers on an SGE Free List RX queue. The
469 * buffers must be made inaccessible to hardware before calling this
470 * function.
471 */
472static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
473{
474 while (n--) {
475 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
476
477 if (is_buf_mapped(sdesc))
478 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530479 get_buf_size(adapter, sdesc),
480 PCI_DMA_FROMDEVICE);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000481 put_page(sdesc->page);
482 sdesc->page = NULL;
483 if (++fl->cidx == fl->size)
484 fl->cidx = 0;
485 fl->avail--;
486 }
487}
488
489/**
490 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
491 * @adapter: the adapter
492 * @fl: the SGE Free List
493 *
494 * Unmap the current buffer on an SGE Free List RX queue. The
495 * buffer must be made inaccessible to HW before calling this function.
496 *
497 * This is similar to @free_rx_bufs above but does not free the buffer.
498 * Do note that the FL still loses any further access to the buffer.
499 * This is used predominantly to "transfer ownership" of an FL buffer
500 * to another entity (typically an skb's fragment list).
501 */
502static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
503{
504 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
505
506 if (is_buf_mapped(sdesc))
507 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530508 get_buf_size(adapter, sdesc),
509 PCI_DMA_FROMDEVICE);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000510 sdesc->page = NULL;
511 if (++fl->cidx == fl->size)
512 fl->cidx = 0;
513 fl->avail--;
514}
515
516/**
517 * ring_fl_db - righ doorbell on free list
518 * @adapter: the adapter
519 * @fl: the Free List whose doorbell should be rung ...
520 *
521 * Tell the Scatter Gather Engine that there are new free list entries
522 * available.
523 */
524static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
525{
Santosh Rastapur622c62b2013-03-14 05:08:57 +0000526 u32 val;
527
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +0530528 /* The SGE keeps track of its Producer and Consumer Indices in terms
Casey Leedomc6e0d912010-06-25 12:13:28 +0000529 * of Egress Queue Units so we can only tell it about integral numbers
530 * of multiples of Free List Entries per Egress Queue Units ...
531 */
532 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +0530533 if (is_t4(adapter->params.chip))
534 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
535 else
536 val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) |
537 DBTYPE(1);
538 val |= DBPRIO(1);
539
540 /* Make sure all memory writes to the Free List queue are
541 * committed before we tell the hardware about them.
542 */
Casey Leedomc6e0d912010-06-25 12:13:28 +0000543 wmb();
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +0530544
545 /* If we don't have access to the new User Doorbell (T5+), use
546 * the old doorbell mechanism; otherwise use the new BAR2
547 * mechanism.
548 */
549 if (unlikely(fl->bar2_addr == NULL)) {
550 t4_write_reg(adapter,
551 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
552 QID(fl->cntxt_id) | val);
553 } else {
554 writel(val | QID(fl->bar2_qid),
555 fl->bar2_addr + SGE_UDB_KDOORBELL);
556
557 /* This Write memory Barrier will force the write to
558 * the User Doorbell area to be flushed.
559 */
560 wmb();
561 }
Casey Leedomc6e0d912010-06-25 12:13:28 +0000562 fl->pend_cred %= FL_PER_EQ_UNIT;
563 }
564}
565
566/**
567 * set_rx_sw_desc - initialize software RX buffer descriptor
568 * @sdesc: pointer to the softwore RX buffer descriptor
569 * @page: pointer to the page data structure backing the RX buffer
570 * @dma_addr: PCI DMA address (possibly with low-bit flags)
571 */
572static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
573 dma_addr_t dma_addr)
574{
575 sdesc->page = page;
576 sdesc->dma_addr = dma_addr;
577}
578
579/*
580 * Support for poisoning RX buffers ...
581 */
582#define POISON_BUF_VAL -1
583
584static inline void poison_buf(struct page *page, size_t sz)
585{
586#if POISON_BUF_VAL >= 0
587 memset(page_address(page), POISON_BUF_VAL, sz);
588#endif
589}
590
591/**
592 * refill_fl - refill an SGE RX buffer ring
593 * @adapter: the adapter
594 * @fl: the Free List ring to refill
595 * @n: the number of new buffers to allocate
596 * @gfp: the gfp flags for the allocations
597 *
598 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
599 * allocated with the supplied gfp flags. The caller must assure that
600 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
601 * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
602 * of buffers allocated. If afterwards the queue is found critically low,
603 * mark it as starving in the bitmap of starving FLs.
604 */
605static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
606 int n, gfp_t gfp)
607{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530608 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +0000609 struct page *page;
610 dma_addr_t dma_addr;
611 unsigned int cred = fl->avail;
612 __be64 *d = &fl->desc[fl->pidx];
613 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
614
615 /*
616 * Sanity: ensure that the result of adding n Free List buffers
617 * won't result in wrapping the SGE's Producer Index around to
618 * it's Consumer Index thereby indicating an empty Free List ...
619 */
620 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
621
Alexander Duyckaa9cd312014-11-11 09:26:42 -0800622 gfp |= __GFP_NOWARN;
623
Casey Leedomc6e0d912010-06-25 12:13:28 +0000624 /*
625 * If we support large pages, prefer large buffers and fail over to
626 * small pages if we can't allocate large pages to satisfy the refill.
627 * If we don't support large pages, drop directly into the small page
628 * allocation code.
629 */
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530630 if (s->fl_pg_order == 0)
Casey Leedomc6e0d912010-06-25 12:13:28 +0000631 goto alloc_small_pages;
632
633 while (n) {
David S. Miller076ce442014-11-14 01:01:12 -0500634 page = __dev_alloc_pages(gfp, s->fl_pg_order);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000635 if (unlikely(!page)) {
636 /*
637 * We've failed inour attempt to allocate a "large
638 * page". Fail over to the "small page" allocation
639 * below.
640 */
641 fl->large_alloc_failed++;
642 break;
643 }
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530644 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000645
646 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530647 PAGE_SIZE << s->fl_pg_order,
Casey Leedomc6e0d912010-06-25 12:13:28 +0000648 PCI_DMA_FROMDEVICE);
649 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
650 /*
651 * We've run out of DMA mapping space. Free up the
652 * buffer and return with what we've managed to put
653 * into the free list. We don't want to fail over to
654 * the small page allocation below in this case
655 * because DMA mapping resources are typically
656 * critical resources once they become scarse.
657 */
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530658 __free_pages(page, s->fl_pg_order);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000659 goto out;
660 }
661 dma_addr |= RX_LARGE_BUF;
662 *d++ = cpu_to_be64(dma_addr);
663
664 set_rx_sw_desc(sdesc, page, dma_addr);
665 sdesc++;
666
667 fl->avail++;
668 if (++fl->pidx == fl->size) {
669 fl->pidx = 0;
670 sdesc = fl->sdesc;
671 d = fl->desc;
672 }
673 n--;
674 }
675
676alloc_small_pages:
677 while (n--) {
Alexander Duyckaa9cd312014-11-11 09:26:42 -0800678 page = __dev_alloc_page(gfp);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000679 if (unlikely(!page)) {
680 fl->alloc_failed++;
681 break;
682 }
683 poison_buf(page, PAGE_SIZE);
684
685 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
686 PCI_DMA_FROMDEVICE);
687 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
Eric Dumazet1f2149c2011-11-22 10:57:41 +0000688 put_page(page);
Casey Leedomc6e0d912010-06-25 12:13:28 +0000689 break;
690 }
691 *d++ = cpu_to_be64(dma_addr);
692
693 set_rx_sw_desc(sdesc, page, dma_addr);
694 sdesc++;
695
696 fl->avail++;
697 if (++fl->pidx == fl->size) {
698 fl->pidx = 0;
699 sdesc = fl->sdesc;
700 d = fl->desc;
701 }
702 }
703
704out:
705 /*
706 * Update our accounting state to incorporate the new Free List
707 * buffers, tell the hardware about them and return the number of
Paul Bolle90802ed2011-12-05 13:00:34 +0100708 * buffers which we were able to allocate.
Casey Leedomc6e0d912010-06-25 12:13:28 +0000709 */
710 cred = fl->avail - cred;
711 fl->pend_cred += cred;
712 ring_fl_db(adapter, fl);
713
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +0530714 if (unlikely(fl_starving(adapter, fl))) {
Casey Leedomc6e0d912010-06-25 12:13:28 +0000715 smp_wmb();
716 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
717 }
718
719 return cred;
720}
721
722/*
723 * Refill a Free List to its capacity or the Maximum Refill Increment,
724 * whichever is smaller ...
725 */
726static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
727{
728 refill_fl(adapter, fl,
729 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
730 GFP_ATOMIC);
731}
732
733/**
734 * alloc_ring - allocate resources for an SGE descriptor ring
735 * @dev: the PCI device's core device
736 * @nelem: the number of descriptors
737 * @hwsize: the size of each hardware descriptor
738 * @swsize: the size of each software descriptor
739 * @busaddrp: the physical PCI bus address of the allocated ring
740 * @swringp: return address pointer for software ring
741 * @stat_size: extra space in hardware ring for status information
742 *
743 * Allocates resources for an SGE descriptor ring, such as TX queues,
744 * free buffer lists, response queues, etc. Each SGE ring requires
745 * space for its hardware descriptors plus, optionally, space for software
746 * state associated with each hardware entry (the metadata). The function
747 * returns three values: the virtual address for the hardware ring (the
748 * return value of the function), the PCI bus address of the hardware
749 * ring (in *busaddrp), and the address of the software ring (in swringp).
750 * Both the hardware and software rings are returned zeroed out.
751 */
752static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
753 size_t swsize, dma_addr_t *busaddrp, void *swringp,
754 size_t stat_size)
755{
756 /*
757 * Allocate the hardware ring and PCI DMA bus address space for said.
758 */
759 size_t hwlen = nelem * hwsize + stat_size;
760 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
761
762 if (!hwring)
763 return NULL;
764
765 /*
766 * If the caller wants a software ring, allocate it and return a
767 * pointer to it in *swringp.
768 */
769 BUG_ON((swsize != 0) != (swringp != NULL));
770 if (swsize) {
771 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
772
773 if (!swring) {
774 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
775 return NULL;
776 }
777 *(void **)swringp = swring;
778 }
779
780 /*
781 * Zero out the hardware ring and return its address as our function
782 * value.
783 */
784 memset(hwring, 0, hwlen);
785 return hwring;
786}
787
788/**
789 * sgl_len - calculates the size of an SGL of the given capacity
790 * @n: the number of SGL entries
791 *
792 * Calculates the number of flits (8-byte units) needed for a Direct
793 * Scatter/Gather List that can hold the given number of entries.
794 */
795static inline unsigned int sgl_len(unsigned int n)
796{
797 /*
798 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
799 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
800 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
801 * repeated sequences of { Length[i], Length[i+1], Address[i],
802 * Address[i+1] } (this ensures that all addresses are on 64-bit
803 * boundaries). If N is even, then Length[N+1] should be set to 0 and
804 * Address[N+1] is omitted.
805 *
806 * The following calculation incorporates all of the above. It's
807 * somewhat hard to follow but, briefly: the "+2" accounts for the
808 * first two flits which include the DSGL header, Length0 and
809 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
810 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
811 * finally the "+((n-1)&1)" adds the one remaining flit needed if
812 * (n-1) is odd ...
813 */
814 n--;
815 return (3 * n) / 2 + (n & 1) + 2;
816}
817
818/**
819 * flits_to_desc - returns the num of TX descriptors for the given flits
820 * @flits: the number of flits
821 *
822 * Returns the number of TX descriptors needed for the supplied number
823 * of flits.
824 */
825static inline unsigned int flits_to_desc(unsigned int flits)
826{
827 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
828 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
829}
830
831/**
832 * is_eth_imm - can an Ethernet packet be sent as immediate data?
833 * @skb: the packet
834 *
835 * Returns whether an Ethernet packet is small enough to fit completely as
836 * immediate data.
837 */
838static inline int is_eth_imm(const struct sk_buff *skb)
839{
840 /*
841 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
842 * which does not accommodate immediate data. We could dike out all
843 * of the support code for immediate data but that would tie our hands
844 * too much if we ever want to enhace the firmware. It would also
845 * create more differences between the PF and VF Drivers.
846 */
847 return false;
848}
849
850/**
851 * calc_tx_flits - calculate the number of flits for a packet TX WR
852 * @skb: the packet
853 *
854 * Returns the number of flits needed for a TX Work Request for the
855 * given Ethernet packet, including the needed WR and CPL headers.
856 */
857static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
858{
859 unsigned int flits;
860
861 /*
862 * If the skb is small enough, we can pump it out as a work request
863 * with only immediate data. In that case we just have to have the
864 * TX Packet header plus the skb data in the Work Request.
865 */
866 if (is_eth_imm(skb))
867 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
868 sizeof(__be64));
869
870 /*
871 * Otherwise, we're going to have to construct a Scatter gather list
872 * of the skb body and fragments. We also include the flits necessary
873 * for the TX Packet Work Request and CPL. We always have a firmware
874 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
875 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
876 * message or, if we're doing a Large Send Offload, an LSO CPL message
877 * with an embeded TX Packet Write CPL message.
878 */
879 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
880 if (skb_shinfo(skb)->gso_size)
881 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
882 sizeof(struct cpl_tx_pkt_lso_core) +
883 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
884 else
885 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
886 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
887 return flits;
888}
889
890/**
891 * write_sgl - populate a Scatter/Gather List for a packet
892 * @skb: the packet
893 * @tq: the TX queue we are writing into
894 * @sgl: starting location for writing the SGL
895 * @end: points right after the end of the SGL
896 * @start: start offset into skb main-body data to include in the SGL
897 * @addr: the list of DMA bus addresses for the SGL elements
898 *
899 * Generates a Scatter/Gather List for the buffers that make up a packet.
900 * The caller must provide adequate space for the SGL that will be written.
901 * The SGL includes all of the packet's page fragments and the data in its
902 * main body except for the first @start bytes. @pos must be 16-byte
903 * aligned and within a TX descriptor with available space. @end points
904 * write after the end of the SGL but does not account for any potential
905 * wrap around, i.e., @end > @tq->stat.
906 */
907static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
908 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
909 const dma_addr_t *addr)
910{
911 unsigned int i, len;
912 struct ulptx_sge_pair *to;
913 const struct skb_shared_info *si = skb_shinfo(skb);
914 unsigned int nfrags = si->nr_frags;
915 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
916
917 len = skb_headlen(skb) - start;
918 if (likely(len)) {
919 sgl->len0 = htonl(len);
920 sgl->addr0 = cpu_to_be64(addr[0] + start);
921 nfrags++;
922 } else {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000923 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
Casey Leedomc6e0d912010-06-25 12:13:28 +0000924 sgl->addr0 = cpu_to_be64(addr[1]);
925 }
926
Anish Bhattd7990b02014-11-12 17:15:57 -0800927 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
Casey Leedomc6e0d912010-06-25 12:13:28 +0000928 ULPTX_NSGE(nfrags));
929 if (likely(--nfrags == 0))
930 return;
931 /*
932 * Most of the complexity below deals with the possibility we hit the
933 * end of the queue in the middle of writing the SGL. For this case
934 * only we create the SGL in a temporary buffer and then copy it.
935 */
936 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
937
938 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000939 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
940 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
Casey Leedomc6e0d912010-06-25 12:13:28 +0000941 to->addr[0] = cpu_to_be64(addr[i]);
942 to->addr[1] = cpu_to_be64(addr[++i]);
943 }
944 if (nfrags) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000945 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
Casey Leedomc6e0d912010-06-25 12:13:28 +0000946 to->len[1] = cpu_to_be32(0);
947 to->addr[0] = cpu_to_be64(addr[i + 1]);
948 }
949 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
950 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
951
952 if (likely(part0))
953 memcpy(sgl->sge, buf, part0);
954 part1 = (u8 *)end - (u8 *)tq->stat;
955 memcpy(tq->desc, (u8 *)buf + part0, part1);
956 end = (void *)tq->desc + part1;
957 }
958 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
Joe Perches64699332012-06-04 12:44:16 +0000959 *end = 0;
Casey Leedomc6e0d912010-06-25 12:13:28 +0000960}
961
962/**
963 * check_ring_tx_db - check and potentially ring a TX queue's doorbell
964 * @adapter: the adapter
965 * @tq: the TX queue
966 * @n: number of new descriptors to give to HW
967 *
968 * Ring the doorbel for a TX queue.
969 */
970static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
971 int n)
972{
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +0530973 /* Make sure that all writes to the TX Descriptors are committed
974 * before we tell the hardware about them.
Casey Leedomc6e0d912010-06-25 12:13:28 +0000975 */
Casey Leedomc6e0d912010-06-25 12:13:28 +0000976 wmb();
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +0530977
978 /* If we don't have access to the new User Doorbell (T5+), use the old
979 * doorbell mechanism; otherwise use the new BAR2 mechanism.
980 */
981 if (unlikely(tq->bar2_addr == NULL)) {
982 u32 val = PIDX(n);
983
984 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
985 QID(tq->cntxt_id) | val);
986 } else {
987 u32 val = PIDX_T5(n);
988
989 /* T4 and later chips share the same PIDX field offset within
990 * the doorbell, but T5 and later shrank the field in order to
991 * gain a bit for Doorbell Priority. The field was absurdly
992 * large in the first place (14 bits) so we just use the T5
993 * and later limits and warn if a Queue ID is too large.
994 */
995 WARN_ON(val & DBPRIO(1));
996
997 /* If we're only writing a single Egress Unit and the BAR2
998 * Queue ID is 0, we can use the Write Combining Doorbell
999 * Gather Buffer; otherwise we use the simple doorbell.
1000 */
1001 if (n == 1 && tq->bar2_qid == 0) {
1002 unsigned int index = (tq->pidx
1003 ? (tq->pidx - 1)
1004 : (tq->size - 1));
1005 __be64 *src = (__be64 *)&tq->desc[index];
1006 __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
1007 SGE_UDB_WCDOORBELL);
1008 unsigned int count = EQ_UNIT / sizeof(__be64);
1009
1010 /* Copy the TX Descriptor in a tight loop in order to
1011 * try to get it to the adapter in a single Write
1012 * Combined transfer on the PCI-E Bus. If the Write
1013 * Combine fails (say because of an interrupt, etc.)
1014 * the hardware will simply take the last write as a
1015 * simple doorbell write with a PIDX Increment of 1
1016 * and will fetch the TX Descriptor from memory via
1017 * DMA.
1018 */
1019 while (count) {
1020 writeq(*src, dst);
1021 src++;
1022 dst++;
1023 count--;
1024 }
1025 } else
1026 writel(val | QID(tq->bar2_qid),
1027 tq->bar2_addr + SGE_UDB_KDOORBELL);
1028
1029 /* This Write Memory Barrier will force the write to the User
1030 * Doorbell area to be flushed. This is needed to prevent
1031 * writes on different CPUs for the same queue from hitting
1032 * the adapter out of order. This is required when some Work
1033 * Requests take the Write Combine Gather Buffer path (user
1034 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1035 * take the traditional path where we simply increment the
1036 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1037 * hardware DMA read the actual Work Request.
1038 */
1039 wmb();
1040 }
Casey Leedomc6e0d912010-06-25 12:13:28 +00001041}
1042
1043/**
1044 * inline_tx_skb - inline a packet's data into TX descriptors
1045 * @skb: the packet
1046 * @tq: the TX queue where the packet will be inlined
1047 * @pos: starting position in the TX queue to inline the packet
1048 *
1049 * Inline a packet's contents directly into TX descriptors, starting at
1050 * the given position within the TX DMA ring.
1051 * Most of the complexity of this operation is dealing with wrap arounds
1052 * in the middle of the packet we want to inline.
1053 */
1054static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1055 void *pos)
1056{
1057 u64 *p;
1058 int left = (void *)tq->stat - pos;
1059
1060 if (likely(skb->len <= left)) {
1061 if (likely(!skb->data_len))
1062 skb_copy_from_linear_data(skb, pos, skb->len);
1063 else
1064 skb_copy_bits(skb, 0, pos, skb->len);
1065 pos += skb->len;
1066 } else {
1067 skb_copy_bits(skb, 0, pos, left);
1068 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1069 pos = (void *)tq->desc + (skb->len - left);
1070 }
1071
1072 /* 0-pad to multiple of 16 */
1073 p = PTR_ALIGN(pos, 8);
1074 if ((uintptr_t)p & 8)
1075 *p = 0;
1076}
1077
1078/*
1079 * Figure out what HW csum a packet wants and return the appropriate control
1080 * bits.
1081 */
1082static u64 hwcsum(const struct sk_buff *skb)
1083{
1084 int csum_type;
1085 const struct iphdr *iph = ip_hdr(skb);
1086
1087 if (iph->version == 4) {
1088 if (iph->protocol == IPPROTO_TCP)
1089 csum_type = TX_CSUM_TCPIP;
1090 else if (iph->protocol == IPPROTO_UDP)
1091 csum_type = TX_CSUM_UDPIP;
1092 else {
1093nocsum:
1094 /*
1095 * unknown protocol, disable HW csum
1096 * and hope a bad packet is detected
1097 */
1098 return TXPKT_L4CSUM_DIS;
1099 }
1100 } else {
1101 /*
1102 * this doesn't work with extension headers
1103 */
1104 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1105
1106 if (ip6h->nexthdr == IPPROTO_TCP)
1107 csum_type = TX_CSUM_TCPIP6;
1108 else if (ip6h->nexthdr == IPPROTO_UDP)
1109 csum_type = TX_CSUM_UDPIP6;
1110 else
1111 goto nocsum;
1112 }
1113
1114 if (likely(csum_type >= TX_CSUM_TCPIP))
1115 return TXPKT_CSUM_TYPE(csum_type) |
1116 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1117 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1118 else {
1119 int start = skb_transport_offset(skb);
1120
1121 return TXPKT_CSUM_TYPE(csum_type) |
1122 TXPKT_CSUM_START(start) |
1123 TXPKT_CSUM_LOC(start + skb->csum_offset);
1124 }
1125}
1126
1127/*
1128 * Stop an Ethernet TX queue and record that state change.
1129 */
1130static void txq_stop(struct sge_eth_txq *txq)
1131{
1132 netif_tx_stop_queue(txq->txq);
1133 txq->q.stops++;
1134}
1135
1136/*
1137 * Advance our software state for a TX queue by adding n in use descriptors.
1138 */
1139static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1140{
1141 tq->in_use += n;
1142 tq->pidx += n;
1143 if (tq->pidx >= tq->size)
1144 tq->pidx -= tq->size;
1145}
1146
1147/**
1148 * t4vf_eth_xmit - add a packet to an Ethernet TX queue
1149 * @skb: the packet
1150 * @dev: the egress net device
1151 *
1152 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1153 */
1154int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1155{
Casey Leedom7f9dd2f2010-07-12 14:39:07 -07001156 u32 wr_mid;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001157 u64 cntrl, *end;
1158 int qidx, credits;
1159 unsigned int flits, ndesc;
1160 struct adapter *adapter;
1161 struct sge_eth_txq *txq;
1162 const struct port_info *pi;
1163 struct fw_eth_tx_pkt_vm_wr *wr;
1164 struct cpl_tx_pkt_core *cpl;
1165 const struct skb_shared_info *ssi;
1166 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1167 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1168 sizeof(wr->ethmacsrc) +
1169 sizeof(wr->ethtype) +
1170 sizeof(wr->vlantci));
1171
1172 /*
1173 * The chip minimum packet length is 10 octets but the firmware
1174 * command that we are using requires that we copy the Ethernet header
1175 * (including the VLAN tag) into the header so we reject anything
1176 * smaller than that ...
1177 */
1178 if (unlikely(skb->len < fw_hdr_copy_len))
1179 goto out_free;
1180
1181 /*
1182 * Figure out which TX Queue we're going to use.
1183 */
1184 pi = netdev_priv(dev);
1185 adapter = pi->adapter;
1186 qidx = skb_get_queue_mapping(skb);
1187 BUG_ON(qidx >= pi->nqsets);
1188 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1189
1190 /*
1191 * Take this opportunity to reclaim any TX Descriptors whose DMA
1192 * transfers have completed.
1193 */
1194 reclaim_completed_tx(adapter, &txq->q, true);
1195
1196 /*
1197 * Calculate the number of flits and TX Descriptors we're going to
1198 * need along with how many TX Descriptors will be left over after
1199 * we inject our Work Request.
1200 */
1201 flits = calc_tx_flits(skb);
1202 ndesc = flits_to_desc(flits);
1203 credits = txq_avail(&txq->q) - ndesc;
1204
1205 if (unlikely(credits < 0)) {
1206 /*
1207 * Not enough room for this packet's Work Request. Stop the
1208 * TX Queue and return a "busy" condition. The queue will get
1209 * started later on when the firmware informs us that space
1210 * has opened up.
1211 */
1212 txq_stop(txq);
1213 dev_err(adapter->pdev_dev,
1214 "%s: TX ring %u full while queue awake!\n",
1215 dev->name, qidx);
1216 return NETDEV_TX_BUSY;
1217 }
1218
1219 if (!is_eth_imm(skb) &&
1220 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1221 /*
1222 * We need to map the skb into PCI DMA space (because it can't
1223 * be in-lined directly into the Work Request) and the mapping
1224 * operation failed. Record the error and drop the packet.
1225 */
1226 txq->mapping_err++;
1227 goto out_free;
1228 }
1229
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301230 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
Casey Leedomc6e0d912010-06-25 12:13:28 +00001231 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1232 /*
1233 * After we're done injecting the Work Request for this
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001234 * packet, we'll be below our "stop threshold" so stop the TX
Casey Leedom7f9dd2f2010-07-12 14:39:07 -07001235 * Queue now and schedule a request for an SGE Egress Queue
1236 * Update message. The queue will get started later on when
1237 * the firmware processes this Work Request and sends us an
1238 * Egress Queue Status Update message indicating that space
1239 * has opened up.
Casey Leedomc6e0d912010-06-25 12:13:28 +00001240 */
1241 txq_stop(txq);
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301242 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001243 }
1244
1245 /*
1246 * Start filling in our Work Request. Note that we do _not_ handle
1247 * the WR Header wrapping around the TX Descriptor Ring. If our
1248 * maximum header size ever exceeds one TX Descriptor, we'll need to
1249 * do something else here.
1250 */
1251 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1252 wr = (void *)&txq->q.desc[txq->q.pidx];
Casey Leedom7f9dd2f2010-07-12 14:39:07 -07001253 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001254 wr->r3[0] = cpu_to_be64(0);
1255 wr->r3[1] = cpu_to_be64(0);
1256 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1257 end = (u64 *)wr + flits;
1258
1259 /*
1260 * If this is a Large Send Offload packet we'll put in an LSO CPL
1261 * message with an encapsulated TX Packet CPL message. Otherwise we
1262 * just use a TX Packet CPL message.
1263 */
1264 ssi = skb_shinfo(skb);
1265 if (ssi->gso_size) {
1266 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1267 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1268 int l3hdr_len = skb_network_header_len(skb);
1269 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1270
1271 wr->op_immdlen =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301272 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1273 FW_WR_IMMDLEN_V(sizeof(*lso) +
1274 sizeof(*cpl)));
Casey Leedomc6e0d912010-06-25 12:13:28 +00001275 /*
1276 * Fill in the LSO CPL message.
1277 */
1278 lso->lso_ctrl =
1279 cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
1280 LSO_FIRST_SLICE |
1281 LSO_LAST_SLICE |
1282 LSO_IPV6(v6) |
1283 LSO_ETHHDR_LEN(eth_xtra_len/4) |
1284 LSO_IPHDR_LEN(l3hdr_len/4) |
1285 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1286 lso->ipid_ofst = cpu_to_be16(0);
1287 lso->mss = cpu_to_be16(ssi->gso_size);
1288 lso->seqno_offset = cpu_to_be32(0);
Hariprasad Shenai7207c0d2014-10-09 05:48:45 +05301289 if (is_t4(adapter->params.chip))
1290 lso->len = cpu_to_be32(skb->len);
1291 else
1292 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
Casey Leedomc6e0d912010-06-25 12:13:28 +00001293
1294 /*
1295 * Set up TX Packet CPL pointer, control word and perform
1296 * accounting.
1297 */
1298 cpl = (void *)(lso + 1);
1299 cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1300 TXPKT_IPHDR_LEN(l3hdr_len) |
1301 TXPKT_ETHHDR_LEN(eth_xtra_len));
1302 txq->tso++;
1303 txq->tx_cso += ssi->gso_segs;
1304 } else {
1305 int len;
1306
1307 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1308 wr->op_immdlen =
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301309 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1310 FW_WR_IMMDLEN_V(len));
Casey Leedomc6e0d912010-06-25 12:13:28 +00001311
1312 /*
1313 * Set up TX Packet CPL pointer, control word and perform
1314 * accounting.
1315 */
1316 cpl = (void *)(wr + 1);
1317 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1318 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1319 txq->tx_cso++;
1320 } else
1321 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1322 }
1323
1324 /*
1325 * If there's a VLAN tag present, add that to the list of things to
1326 * do in this Work Request.
1327 */
1328 if (vlan_tx_tag_present(skb)) {
1329 txq->vlan_ins++;
1330 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1331 }
1332
1333 /*
1334 * Fill in the TX Packet CPL message header.
1335 */
1336 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1337 TXPKT_INTF(pi->port_id) |
1338 TXPKT_PF(0));
1339 cpl->pack = cpu_to_be16(0);
1340 cpl->len = cpu_to_be16(skb->len);
1341 cpl->ctrl1 = cpu_to_be64(cntrl);
1342
1343#ifdef T4_TRACE
1344 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1345 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1346 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1347#endif
1348
1349 /*
1350 * Fill in the body of the TX Packet CPL message with either in-lined
1351 * data or a Scatter/Gather List.
1352 */
1353 if (is_eth_imm(skb)) {
1354 /*
1355 * In-line the packet's data and free the skb since we don't
1356 * need it any longer.
1357 */
1358 inline_tx_skb(skb, &txq->q, cpl + 1);
Eric W. Biederman42ffda52014-03-15 16:31:32 -07001359 dev_consume_skb_any(skb);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001360 } else {
1361 /*
1362 * Write the skb's Scatter/Gather list into the TX Packet CPL
1363 * message and retain a pointer to the skb so we can free it
1364 * later when its DMA completes. (We store the skb pointer
1365 * in the Software Descriptor corresponding to the last TX
1366 * Descriptor used by the Work Request.)
1367 *
1368 * The retained skb will be freed when the corresponding TX
1369 * Descriptors are reclaimed after their DMAs complete.
1370 * However, this could take quite a while since, in general,
1371 * the hardware is set up to be lazy about sending DMA
1372 * completion notifications to us and we mostly perform TX
1373 * reclaims in the transmit routine.
1374 *
1375 * This is good for performamce but means that we rely on new
1376 * TX packets arriving to run the destructors of completed
1377 * packets, which open up space in their sockets' send queues.
1378 * Sometimes we do not get such new packets causing TX to
1379 * stall. A single UDP transmitter is a good example of this
1380 * situation. We have a clean up timer that periodically
1381 * reclaims completed packets but it doesn't run often enough
1382 * (nor do we want it to) to prevent lengthy stalls. A
1383 * solution to this problem is to run the destructor early,
1384 * after the packet is queued but before it's DMAd. A con is
1385 * that we lie to socket memory accounting, but the amount of
1386 * extra memory is reasonable (limited by the number of TX
1387 * descriptors), the packets do actually get freed quickly by
1388 * new packets almost always, and for protocols like TCP that
1389 * wait for acks to really free up the data the extra memory
1390 * is even less. On the positive side we run the destructors
1391 * on the sending CPU rather than on a potentially different
Casey Leedom64bb3362010-06-29 12:53:39 +00001392 * completing CPU, usually a good thing.
Casey Leedomc6e0d912010-06-25 12:13:28 +00001393 *
1394 * Run the destructor before telling the DMA engine about the
1395 * packet to make sure it doesn't complete and get freed
1396 * prematurely.
1397 */
1398 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1399 struct sge_txq *tq = &txq->q;
1400 int last_desc;
1401
1402 /*
1403 * If the Work Request header was an exact multiple of our TX
1404 * Descriptor length, then it's possible that the starting SGL
1405 * pointer lines up exactly with the end of our TX Descriptor
1406 * ring. If that's the case, wrap around to the beginning
1407 * here ...
1408 */
1409 if (unlikely((void *)sgl == (void *)tq->stat)) {
1410 sgl = (void *)tq->desc;
Joe Perches64699332012-06-04 12:44:16 +00001411 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
Casey Leedomc6e0d912010-06-25 12:13:28 +00001412 }
1413
1414 write_sgl(skb, tq, sgl, end, 0, addr);
1415 skb_orphan(skb);
1416
1417 last_desc = tq->pidx + ndesc - 1;
1418 if (last_desc >= tq->size)
1419 last_desc -= tq->size;
1420 tq->sdesc[last_desc].skb = skb;
1421 tq->sdesc[last_desc].sgl = sgl;
1422 }
1423
1424 /*
1425 * Advance our internal TX Queue state, tell the hardware about
1426 * the new TX descriptors and return success.
1427 */
1428 txq_advance(&txq->q, ndesc);
1429 dev->trans_start = jiffies;
1430 ring_tx_db(adapter, &txq->q, ndesc);
1431 return NETDEV_TX_OK;
1432
1433out_free:
1434 /*
1435 * An error of some sort happened. Free the TX skb and tell the
1436 * OS that we've "dealt" with the packet ...
1437 */
Eric W. Biederman42ffda52014-03-15 16:31:32 -07001438 dev_kfree_skb_any(skb);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001439 return NETDEV_TX_OK;
1440}
1441
1442/**
Ian Campbella0006a82011-10-19 23:01:47 +00001443 * copy_frags - copy fragments from gather list into skb_shared_info
1444 * @skb: destination skb
1445 * @gl: source internal packet gather list
1446 * @offset: packet start offset in first page
1447 *
1448 * Copy an internal packet gather list into a Linux skb_shared_info
1449 * structure.
1450 */
1451static inline void copy_frags(struct sk_buff *skb,
1452 const struct pkt_gl *gl,
1453 unsigned int offset)
1454{
1455 int i;
1456
1457 /* usually there's just one frag */
1458 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1459 gl->frags[0].offset + offset,
1460 gl->frags[0].size - offset);
1461 skb_shinfo(skb)->nr_frags = gl->nfrags;
1462 for (i = 1; i < gl->nfrags; i++)
1463 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1464 gl->frags[i].offset,
1465 gl->frags[i].size);
1466
1467 /* get a reference to the last page, we don't own it */
1468 get_page(gl->frags[gl->nfrags - 1].page);
1469}
1470
1471/**
Casey Leedomeb6c5032010-11-11 09:06:50 +00001472 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1473 * @gl: the gather list
1474 * @skb_len: size of sk_buff main body if it carries fragments
1475 * @pull_len: amount of data to move to the sk_buff's main body
1476 *
1477 * Builds an sk_buff from the given packet gather list. Returns the
1478 * sk_buff or %NULL if sk_buff allocation failed.
1479 */
Sachin Kamat8a67d1c2013-09-18 09:00:01 +05301480static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1481 unsigned int skb_len,
1482 unsigned int pull_len)
Casey Leedomeb6c5032010-11-11 09:06:50 +00001483{
1484 struct sk_buff *skb;
Casey Leedomeb6c5032010-11-11 09:06:50 +00001485
1486 /*
1487 * If the ingress packet is small enough, allocate an skb large enough
1488 * for all of the data and copy it inline. Otherwise, allocate an skb
1489 * with enough room to pull in the header and reference the rest of
1490 * the data via the skb fragment list.
1491 *
1492 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1493 * buff! size, which is expected since buffers are at least
1494 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1495 * fragment.
1496 */
1497 if (gl->tot_len <= RX_COPY_THRES) {
1498 /* small packets have only one fragment */
1499 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1500 if (unlikely(!skb))
1501 goto out;
1502 __skb_put(skb, gl->tot_len);
1503 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1504 } else {
1505 skb = alloc_skb(skb_len, GFP_ATOMIC);
1506 if (unlikely(!skb))
1507 goto out;
1508 __skb_put(skb, pull_len);
1509 skb_copy_to_linear_data(skb, gl->va, pull_len);
1510
Ian Campbella0006a82011-10-19 23:01:47 +00001511 copy_frags(skb, gl, pull_len);
Casey Leedomeb6c5032010-11-11 09:06:50 +00001512 skb->len = gl->tot_len;
1513 skb->data_len = skb->len - pull_len;
1514 skb->truesize += skb->data_len;
Casey Leedomeb6c5032010-11-11 09:06:50 +00001515 }
1516
1517out:
1518 return skb;
1519}
1520
1521/**
Casey Leedomc6e0d912010-06-25 12:13:28 +00001522 * t4vf_pktgl_free - free a packet gather list
1523 * @gl: the gather list
1524 *
1525 * Releases the pages of a packet gather list. We do not own the last
1526 * page on the list and do not free it.
1527 */
Sachin Kamat8a67d1c2013-09-18 09:00:01 +05301528static void t4vf_pktgl_free(const struct pkt_gl *gl)
Casey Leedomc6e0d912010-06-25 12:13:28 +00001529{
1530 int frag;
1531
1532 frag = gl->nfrags - 1;
1533 while (frag--)
1534 put_page(gl->frags[frag].page);
1535}
1536
1537/**
Casey Leedomc6e0d912010-06-25 12:13:28 +00001538 * do_gro - perform Generic Receive Offload ingress packet processing
1539 * @rxq: ingress RX Ethernet Queue
1540 * @gl: gather list for ingress packet
1541 * @pkt: CPL header for last packet fragment
1542 *
1543 * Perform Generic Receive Offload (GRO) ingress packet processing.
1544 * We use the standard Linux GRO interfaces for this.
1545 */
1546static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1547 const struct cpl_rx_pkt *pkt)
1548{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301549 struct adapter *adapter = rxq->rspq.adapter;
1550 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001551 int ret;
1552 struct sk_buff *skb;
1553
1554 skb = napi_get_frags(&rxq->rspq.napi);
1555 if (unlikely(!skb)) {
1556 t4vf_pktgl_free(gl);
1557 rxq->stats.rx_drops++;
1558 return;
1559 }
1560
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301561 copy_frags(skb, gl, s->pktshift);
1562 skb->len = gl->tot_len - s->pktshift;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001563 skb->data_len = skb->len;
1564 skb->truesize += skb->data_len;
1565 skb->ip_summed = CHECKSUM_UNNECESSARY;
1566 skb_record_rx_queue(skb, rxq->rspq.idx);
1567
Vipul Pandyaaf32de02013-02-12 00:36:21 +00001568 if (pkt->vlan_ex) {
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001569 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1570 be16_to_cpu(pkt->vlan));
Vipul Pandyaaf32de02013-02-12 00:36:21 +00001571 rxq->stats.vlan_ex++;
1572 }
Casey Leedomc6e0d912010-06-25 12:13:28 +00001573 ret = napi_gro_frags(&rxq->rspq.napi);
1574
Casey Leedomc6e0d912010-06-25 12:13:28 +00001575 if (ret == GRO_HELD)
1576 rxq->stats.lro_pkts++;
1577 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1578 rxq->stats.lro_merged++;
1579 rxq->stats.pkts++;
1580 rxq->stats.rx_cso++;
1581}
1582
1583/**
1584 * t4vf_ethrx_handler - process an ingress ethernet packet
1585 * @rspq: the response queue that received the packet
1586 * @rsp: the response queue descriptor holding the RX_PKT message
1587 * @gl: the gather list of packet fragments
1588 *
1589 * Process an ingress ethernet packet and deliver it to the stack.
1590 */
1591int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1592 const struct pkt_gl *gl)
1593{
1594 struct sk_buff *skb;
Vipul Pandya8b9a4d52013-02-08 02:49:51 +00001595 const struct cpl_rx_pkt *pkt = (void *)rsp;
Hariprasad Shenaic3136f52014-05-07 18:01:04 +05301596 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1597 (rspq->netdev->features & NETIF_F_RXCSUM);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001598 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301599 struct adapter *adapter = rspq->adapter;
1600 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001601
1602 /*
1603 * If this is a good TCP packet and we have Generic Receive Offload
1604 * enabled, handle the packet in the GRO path.
1605 */
1606 if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
1607 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1608 !pkt->ip_frag) {
1609 do_gro(rxq, gl, pkt);
1610 return 0;
1611 }
1612
1613 /*
Casey Leedomeb6c5032010-11-11 09:06:50 +00001614 * Convert the Packet Gather List into an skb.
Casey Leedomc6e0d912010-06-25 12:13:28 +00001615 */
Casey Leedomeb6c5032010-11-11 09:06:50 +00001616 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1617 if (unlikely(!skb)) {
1618 t4vf_pktgl_free(gl);
1619 rxq->stats.rx_drops++;
1620 return 0;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001621 }
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301622 __skb_pull(skb, s->pktshift);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001623 skb->protocol = eth_type_trans(skb, rspq->netdev);
1624 skb_record_rx_queue(skb, rspq->idx);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001625 rxq->stats.pkts++;
1626
Hariprasad Shenaic3136f52014-05-07 18:01:04 +05301627 if (csum_ok && !pkt->err_vec &&
1628 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
Casey Leedomc6e0d912010-06-25 12:13:28 +00001629 if (!pkt->ip_frag)
1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 else {
1632 __sum16 c = (__force __sum16)pkt->csum;
1633 skb->csum = csum_unfold(c);
1634 skb->ip_summed = CHECKSUM_COMPLETE;
1635 }
1636 rxq->stats.rx_cso++;
1637 } else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001638 skb_checksum_none_assert(skb);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001639
Jiri Pirko87737662011-07-20 04:54:16 +00001640 if (pkt->vlan_ex) {
Casey Leedomc6e0d912010-06-25 12:13:28 +00001641 rxq->stats.vlan_ex++;
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001642 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
Jiri Pirko87737662011-07-20 04:54:16 +00001643 }
1644
1645 netif_receive_skb(skb);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001646
1647 return 0;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001648}
1649
1650/**
1651 * is_new_response - check if a response is newly written
1652 * @rc: the response control descriptor
1653 * @rspq: the response queue
1654 *
1655 * Returns true if a response descriptor contains a yet unprocessed
1656 * response.
1657 */
1658static inline bool is_new_response(const struct rsp_ctrl *rc,
1659 const struct sge_rspq *rspq)
1660{
1661 return RSPD_GEN(rc->type_gen) == rspq->gen;
1662}
1663
1664/**
1665 * restore_rx_bufs - put back a packet's RX buffers
1666 * @gl: the packet gather list
1667 * @fl: the SGE Free List
1668 * @nfrags: how many fragments in @si
1669 *
1670 * Called when we find out that the current packet, @si, can't be
1671 * processed right away for some reason. This is a very rare event and
1672 * there's no effort to make this suspension/resumption process
1673 * particularly efficient.
1674 *
1675 * We implement the suspension by putting all of the RX buffers associated
1676 * with the current packet back on the original Free List. The buffers
1677 * have already been unmapped and are left unmapped, we mark them as
1678 * unmapped in order to prevent further unmapping attempts. (Effectively
1679 * this function undoes the series of @unmap_rx_buf calls which were done
1680 * to create the current packet's gather list.) This leaves us ready to
1681 * restart processing of the packet the next time we start processing the
1682 * RX Queue ...
1683 */
1684static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1685 int frags)
1686{
1687 struct rx_sw_desc *sdesc;
1688
1689 while (frags--) {
1690 if (fl->cidx == 0)
1691 fl->cidx = fl->size - 1;
1692 else
1693 fl->cidx--;
1694 sdesc = &fl->sdesc[fl->cidx];
1695 sdesc->page = gl->frags[frags].page;
1696 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1697 fl->avail++;
1698 }
1699}
1700
1701/**
1702 * rspq_next - advance to the next entry in a response queue
1703 * @rspq: the queue
1704 *
1705 * Updates the state of a response queue to advance it to the next entry.
1706 */
1707static inline void rspq_next(struct sge_rspq *rspq)
1708{
1709 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1710 if (unlikely(++rspq->cidx == rspq->size)) {
1711 rspq->cidx = 0;
1712 rspq->gen ^= 1;
1713 rspq->cur_desc = rspq->desc;
1714 }
1715}
1716
1717/**
1718 * process_responses - process responses from an SGE response queue
1719 * @rspq: the ingress response queue to process
1720 * @budget: how many responses can be processed in this round
1721 *
1722 * Process responses from a Scatter Gather Engine response queue up to
1723 * the supplied budget. Responses include received packets as well as
1724 * control messages from firmware or hardware.
1725 *
1726 * Additionally choose the interrupt holdoff time for the next interrupt
1727 * on this queue. If the system is under memory shortage use a fairly
1728 * long delay to help recovery.
1729 */
Sachin Kamat8a67d1c2013-09-18 09:00:01 +05301730static int process_responses(struct sge_rspq *rspq, int budget)
Casey Leedomc6e0d912010-06-25 12:13:28 +00001731{
1732 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301733 struct adapter *adapter = rspq->adapter;
1734 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001735 int budget_left = budget;
1736
1737 while (likely(budget_left)) {
1738 int ret, rsp_type;
1739 const struct rsp_ctrl *rc;
1740
1741 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1742 if (!is_new_response(rc, rspq))
1743 break;
1744
1745 /*
1746 * Figure out what kind of response we've received from the
1747 * SGE.
1748 */
1749 rmb();
1750 rsp_type = RSPD_TYPE(rc->type_gen);
1751 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
Ian Campbella0006a82011-10-19 23:01:47 +00001752 struct page_frag *fp;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001753 struct pkt_gl gl;
1754 const struct rx_sw_desc *sdesc;
1755 u32 bufsz, frag;
1756 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1757
1758 /*
1759 * If we get a "new buffer" message from the SGE we
1760 * need to move on to the next Free List buffer.
1761 */
1762 if (len & RSPD_NEWBUF) {
1763 /*
1764 * We get one "new buffer" message when we
1765 * first start up a queue so we need to ignore
1766 * it when our offset into the buffer is 0.
1767 */
1768 if (likely(rspq->offset > 0)) {
1769 free_rx_bufs(rspq->adapter, &rxq->fl,
1770 1);
1771 rspq->offset = 0;
1772 }
1773 len = RSPD_LEN(len);
1774 }
Casey Leedomb94e72e2010-11-11 09:06:49 +00001775 gl.tot_len = len;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001776
1777 /*
1778 * Gather packet fragments.
1779 */
1780 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1781 BUG_ON(frag >= MAX_SKB_FRAGS);
1782 BUG_ON(rxq->fl.avail == 0);
1783 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301784 bufsz = get_buf_size(adapter, sdesc);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001785 fp->page = sdesc->page;
Ian Campbella0006a82011-10-19 23:01:47 +00001786 fp->offset = rspq->offset;
1787 fp->size = min(bufsz, len);
1788 len -= fp->size;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001789 if (!len)
1790 break;
1791 unmap_rx_buf(rspq->adapter, &rxq->fl);
1792 }
1793 gl.nfrags = frag+1;
1794
1795 /*
1796 * Last buffer remains mapped so explicitly make it
1797 * coherent for CPU access and start preloading first
1798 * cache line ...
1799 */
1800 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1801 get_buf_addr(sdesc),
Ian Campbella0006a82011-10-19 23:01:47 +00001802 fp->size, DMA_FROM_DEVICE);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001803 gl.va = (page_address(gl.frags[0].page) +
Ian Campbella0006a82011-10-19 23:01:47 +00001804 gl.frags[0].offset);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001805 prefetch(gl.va);
1806
1807 /*
1808 * Hand the new ingress packet to the handler for
1809 * this Response Queue.
1810 */
1811 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1812 if (likely(ret == 0))
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05301813 rspq->offset += ALIGN(fp->size, s->fl_align);
Casey Leedomc6e0d912010-06-25 12:13:28 +00001814 else
1815 restore_rx_bufs(&gl, &rxq->fl, frag);
1816 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1817 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1818 } else {
1819 WARN_ON(rsp_type > RSP_TYPE_CPL);
1820 ret = 0;
1821 }
1822
1823 if (unlikely(ret)) {
1824 /*
1825 * Couldn't process descriptor, back off for recovery.
1826 * We use the SGE's last timer which has the longest
1827 * interrupt coalescing value ...
1828 */
1829 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1830 rspq->next_intr_params =
1831 QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
1832 break;
1833 }
1834
1835 rspq_next(rspq);
1836 budget_left--;
1837 }
1838
1839 /*
1840 * If this is a Response Queue with an associated Free List and
1841 * at least two Egress Queue units available in the Free List
1842 * for new buffer pointers, refill the Free List.
1843 */
1844 if (rspq->offset >= 0 &&
1845 rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1846 __refill_fl(rspq->adapter, &rxq->fl);
1847 return budget - budget_left;
1848}
1849
1850/**
1851 * napi_rx_handler - the NAPI handler for RX processing
1852 * @napi: the napi instance
1853 * @budget: how many packets we can process in this round
1854 *
1855 * Handler for new data events when using NAPI. This does not need any
1856 * locking or protection from interrupts as data interrupts are off at
1857 * this point and other adapter interrupts do not interfere (the latter
1858 * in not a concern at all with MSI-X as non-data interrupts then have
1859 * a separate handler).
1860 */
1861static int napi_rx_handler(struct napi_struct *napi, int budget)
1862{
1863 unsigned int intr_params;
1864 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1865 int work_done = process_responses(rspq, budget);
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05301866 u32 val;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001867
1868 if (likely(work_done < budget)) {
1869 napi_complete(napi);
1870 intr_params = rspq->next_intr_params;
1871 rspq->next_intr_params = rspq->intr_params;
1872 } else
1873 intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
1874
Casey Leedom68dc9d32010-07-08 10:05:48 -07001875 if (unlikely(work_done == 0))
1876 rspq->unhandled_irqs++;
1877
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05301878 val = CIDXINC(work_done) | SEINTARM(intr_params);
1879 if (is_t4(rspq->adapter->params.chip)) {
1880 t4_write_reg(rspq->adapter,
1881 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1882 val | INGRESSQID((u32)rspq->cntxt_id));
1883 } else {
1884 writel(val | INGRESSQID(rspq->bar2_qid),
1885 rspq->bar2_addr + SGE_UDB_GTS);
1886 wmb();
1887 }
Casey Leedomc6e0d912010-06-25 12:13:28 +00001888 return work_done;
1889}
1890
1891/*
1892 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1893 * (i.e., response queue serviced by NAPI polling).
1894 */
1895irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1896{
1897 struct sge_rspq *rspq = cookie;
1898
1899 napi_schedule(&rspq->napi);
1900 return IRQ_HANDLED;
1901}
1902
1903/*
1904 * Process the indirect interrupt entries in the interrupt queue and kick off
1905 * NAPI for each queue that has generated an entry.
1906 */
1907static unsigned int process_intrq(struct adapter *adapter)
1908{
1909 struct sge *s = &adapter->sge;
1910 struct sge_rspq *intrq = &s->intrq;
1911 unsigned int work_done;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05301912 u32 val;
Casey Leedomc6e0d912010-06-25 12:13:28 +00001913
1914 spin_lock(&adapter->sge.intrq_lock);
1915 for (work_done = 0; ; work_done++) {
1916 const struct rsp_ctrl *rc;
1917 unsigned int qid, iq_idx;
1918 struct sge_rspq *rspq;
1919
1920 /*
1921 * Grab the next response from the interrupt queue and bail
1922 * out if it's not a new response.
1923 */
1924 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1925 if (!is_new_response(rc, intrq))
1926 break;
1927
1928 /*
1929 * If the response isn't a forwarded interrupt message issue a
1930 * error and go on to the next response message. This should
1931 * never happen ...
1932 */
1933 rmb();
1934 if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
1935 dev_err(adapter->pdev_dev,
1936 "Unexpected INTRQ response type %d\n",
1937 RSPD_TYPE(rc->type_gen));
1938 continue;
1939 }
1940
1941 /*
1942 * Extract the Queue ID from the interrupt message and perform
1943 * sanity checking to make sure it really refers to one of our
1944 * Ingress Queues which is active and matches the queue's ID.
1945 * None of these error conditions should ever happen so we may
1946 * want to either make them fatal and/or conditionalized under
1947 * DEBUG.
1948 */
1949 qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
1950 iq_idx = IQ_IDX(s, qid);
1951 if (unlikely(iq_idx >= MAX_INGQ)) {
1952 dev_err(adapter->pdev_dev,
1953 "Ingress QID %d out of range\n", qid);
1954 continue;
1955 }
1956 rspq = s->ingr_map[iq_idx];
1957 if (unlikely(rspq == NULL)) {
1958 dev_err(adapter->pdev_dev,
1959 "Ingress QID %d RSPQ=NULL\n", qid);
1960 continue;
1961 }
1962 if (unlikely(rspq->abs_id != qid)) {
1963 dev_err(adapter->pdev_dev,
1964 "Ingress QID %d refers to RSPQ %d\n",
1965 qid, rspq->abs_id);
1966 continue;
1967 }
1968
1969 /*
1970 * Schedule NAPI processing on the indicated Response Queue
1971 * and move on to the next entry in the Forwarded Interrupt
1972 * Queue.
1973 */
1974 napi_schedule(&rspq->napi);
1975 rspq_next(intrq);
1976 }
1977
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05301978 val = CIDXINC(work_done) | SEINTARM(intrq->intr_params);
1979 if (is_t4(adapter->params.chip))
1980 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1981 val | INGRESSQID(intrq->cntxt_id));
1982 else {
1983 writel(val | INGRESSQID(intrq->bar2_qid),
1984 intrq->bar2_addr + SGE_UDB_GTS);
1985 wmb();
1986 }
Casey Leedomc6e0d912010-06-25 12:13:28 +00001987
1988 spin_unlock(&adapter->sge.intrq_lock);
1989
1990 return work_done;
1991}
1992
1993/*
1994 * The MSI interrupt handler handles data events from SGE response queues as
1995 * well as error and other async events as they all use the same MSI vector.
1996 */
Sachin Kamat8a67d1c2013-09-18 09:00:01 +05301997static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
Casey Leedomc6e0d912010-06-25 12:13:28 +00001998{
1999 struct adapter *adapter = cookie;
2000
2001 process_intrq(adapter);
2002 return IRQ_HANDLED;
2003}
2004
2005/**
2006 * t4vf_intr_handler - select the top-level interrupt handler
2007 * @adapter: the adapter
2008 *
2009 * Selects the top-level interrupt handler based on the type of interrupts
2010 * (MSI-X or MSI).
2011 */
2012irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2013{
2014 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2015 if (adapter->flags & USING_MSIX)
2016 return t4vf_sge_intr_msix;
2017 else
2018 return t4vf_intr_msi;
2019}
2020
2021/**
2022 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2023 * @data: the adapter
2024 *
2025 * Runs periodically from a timer to perform maintenance of SGE RX queues.
2026 *
2027 * a) Replenishes RX queues that have run out due to memory shortage.
2028 * Normally new RX buffers are added when existing ones are consumed but
2029 * when out of memory a queue can become empty. We schedule NAPI to do
2030 * the actual refill.
2031 */
2032static void sge_rx_timer_cb(unsigned long data)
2033{
2034 struct adapter *adapter = (struct adapter *)data;
2035 struct sge *s = &adapter->sge;
2036 unsigned int i;
2037
2038 /*
2039 * Scan the "Starving Free Lists" flag array looking for any Free
2040 * Lists in need of more free buffers. If we find one and it's not
2041 * being actively polled, then bump its "starving" counter and attempt
2042 * to refill it. If we're successful in adding enough buffers to push
2043 * the Free List over the starving threshold, then we can clear its
2044 * "starving" status.
2045 */
2046 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2047 unsigned long m;
2048
2049 for (m = s->starving_fl[i]; m; m &= m - 1) {
2050 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2051 struct sge_fl *fl = s->egr_map[id];
2052
2053 clear_bit(id, s->starving_fl);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01002054 smp_mb__after_atomic();
Casey Leedomc6e0d912010-06-25 12:13:28 +00002055
2056 /*
2057 * Since we are accessing fl without a lock there's a
2058 * small probability of a false positive where we
2059 * schedule napi but the FL is no longer starving.
2060 * No biggie.
2061 */
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302062 if (fl_starving(adapter, fl)) {
Casey Leedomc6e0d912010-06-25 12:13:28 +00002063 struct sge_eth_rxq *rxq;
2064
2065 rxq = container_of(fl, struct sge_eth_rxq, fl);
2066 if (napi_reschedule(&rxq->rspq.napi))
2067 fl->starving++;
2068 else
2069 set_bit(id, s->starving_fl);
2070 }
2071 }
2072 }
2073
2074 /*
2075 * Reschedule the next scan for starving Free Lists ...
2076 */
2077 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2078}
2079
2080/**
2081 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2082 * @data: the adapter
2083 *
2084 * Runs periodically from a timer to perform maintenance of SGE TX queues.
2085 *
2086 * b) Reclaims completed Tx packets for the Ethernet queues. Normally
2087 * packets are cleaned up by new Tx packets, this timer cleans up packets
2088 * when no new packets are being submitted. This is essential for pktgen,
2089 * at least.
2090 */
2091static void sge_tx_timer_cb(unsigned long data)
2092{
2093 struct adapter *adapter = (struct adapter *)data;
2094 struct sge *s = &adapter->sge;
2095 unsigned int i, budget;
2096
2097 budget = MAX_TIMER_TX_RECLAIM;
2098 i = s->ethtxq_rover;
2099 do {
2100 struct sge_eth_txq *txq = &s->ethtxq[i];
2101
2102 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2103 int avail = reclaimable(&txq->q);
2104
2105 if (avail > budget)
2106 avail = budget;
2107
2108 free_tx_desc(adapter, &txq->q, avail, true);
2109 txq->q.in_use -= avail;
2110 __netif_tx_unlock(txq->txq);
2111
2112 budget -= avail;
2113 if (!budget)
2114 break;
2115 }
2116
2117 i++;
2118 if (i >= s->ethqsets)
2119 i = 0;
2120 } while (i != s->ethtxq_rover);
2121 s->ethtxq_rover = i;
2122
2123 /*
2124 * If we found too many reclaimable packets schedule a timer in the
2125 * near future to continue where we left off. Otherwise the next timer
2126 * will be at its normal interval.
2127 */
2128 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2129}
2130
2131/**
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05302132 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2133 * @adapter: the adapter
2134 * @qid: the SGE Queue ID
2135 * @qtype: the SGE Queue Type (Egress or Ingress)
2136 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2137 *
2138 * Returns the BAR2 address for the SGE Queue Registers associated with
2139 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2140 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2141 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2142 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2143 */
2144static void __iomem *bar2_address(struct adapter *adapter,
2145 unsigned int qid,
2146 enum t4_bar2_qtype qtype,
2147 unsigned int *pbar2_qid)
2148{
2149 u64 bar2_qoffset;
2150 int ret;
2151
2152 ret = t4_bar2_sge_qregs(adapter, qid, qtype,
2153 &bar2_qoffset, pbar2_qid);
2154 if (ret)
2155 return NULL;
2156
2157 return adapter->bar2 + bar2_qoffset;
2158}
2159
2160/**
Casey Leedomc6e0d912010-06-25 12:13:28 +00002161 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2162 * @adapter: the adapter
2163 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2164 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2165 * @dev: the network device associated with the new rspq
2166 * @intr_dest: MSI-X vector index (overriden in MSI mode)
2167 * @fl: pointer to the new rxq's Free List to be filled in
2168 * @hnd: the interrupt handler to invoke for the rspq
2169 */
2170int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2171 bool iqasynch, struct net_device *dev,
2172 int intr_dest,
2173 struct sge_fl *fl, rspq_handler_t hnd)
2174{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302175 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +00002176 struct port_info *pi = netdev_priv(dev);
2177 struct fw_iq_cmd cmd, rpl;
2178 int ret, iqandst, flsz = 0;
2179
2180 /*
2181 * If we're using MSI interrupts and we're not initializing the
2182 * Forwarded Interrupt Queue itself, then set up this queue for
2183 * indirect interrupts to the Forwarded Interrupt Queue. Obviously
2184 * the Forwarded Interrupt Queue must be set up before any other
2185 * ingress queue ...
2186 */
2187 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2188 iqandst = SGE_INTRDST_IQ;
2189 intr_dest = adapter->sge.intrq.abs_id;
2190 } else
2191 iqandst = SGE_INTRDST_PCI;
2192
2193 /*
2194 * Allocate the hardware ring for the Response Queue. The size needs
2195 * to be a multiple of 16 which includes the mandatory status entry
2196 * (regardless of whether the Status Page capabilities are enabled or
2197 * not).
2198 */
2199 rspq->size = roundup(rspq->size, 16);
2200 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2201 0, &rspq->phys_addr, NULL, 0);
2202 if (!rspq->desc)
2203 return -ENOMEM;
2204
2205 /*
2206 * Fill in the Ingress Queue Command. Note: Ideally this code would
2207 * be in t4vf_hw.c but there are so many parameters and dependencies
2208 * on our Linux SGE state that we would end up having to pass tons of
2209 * parameters. We'll have to think about how this might be migrated
2210 * into OS-independent common code ...
2211 */
2212 memset(&cmd, 0, sizeof(cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05302213 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2214 FW_CMD_REQUEST_F |
2215 FW_CMD_WRITE_F |
2216 FW_CMD_EXEC_F);
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302217 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2218 FW_IQ_CMD_IQSTART_F |
Casey Leedomc6e0d912010-06-25 12:13:28 +00002219 FW_LEN16(cmd));
2220 cmd.type_to_iqandstindex =
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302221 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2222 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2223 FW_IQ_CMD_VIID_V(pi->viid) |
2224 FW_IQ_CMD_IQANDST_V(iqandst) |
2225 FW_IQ_CMD_IQANUS_V(1) |
2226 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2227 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002228 cmd.iqdroprss_to_iqesize =
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302229 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2230 FW_IQ_CMD_IQGTSMODE_F |
2231 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2232 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002233 cmd.iqsize = cpu_to_be16(rspq->size);
2234 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2235
2236 if (fl) {
2237 /*
2238 * Allocate the ring for the hardware free list (with space
2239 * for its status page) along with the associated software
2240 * descriptor ring. The free list size needs to be a multiple
2241 * of the Egress Queue Unit.
2242 */
2243 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2244 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2245 sizeof(__be64), sizeof(struct rx_sw_desc),
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302246 &fl->addr, &fl->sdesc, s->stat_len);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002247 if (!fl->desc) {
2248 ret = -ENOMEM;
2249 goto err;
2250 }
2251
2252 /*
2253 * Calculate the size of the hardware free list ring plus
Casey Leedomcaedda32010-11-11 09:30:40 +00002254 * Status Page (which the SGE will place after the end of the
Casey Leedomc6e0d912010-06-25 12:13:28 +00002255 * free list ring) in Egress Queue Units.
2256 */
2257 flsz = (fl->size / FL_PER_EQ_UNIT +
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302258 s->stat_len / EQ_UNIT);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002259
2260 /*
2261 * Fill in all the relevant firmware Ingress Queue Command
2262 * fields for the free list.
2263 */
2264 cmd.iqns_to_fl0congen =
2265 cpu_to_be32(
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302266 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2267 FW_IQ_CMD_FL0PACKEN_F |
2268 FW_IQ_CMD_FL0PADEN_F);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002269 cmd.fl0dcaen_to_fl0cidxfthresh =
2270 cpu_to_be16(
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302271 FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2272 FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002273 cmd.fl0size = cpu_to_be16(flsz);
2274 cmd.fl0addr = cpu_to_be64(fl->addr);
2275 }
2276
2277 /*
2278 * Issue the firmware Ingress Queue Command and extract the results if
2279 * it completes successfully.
2280 */
2281 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2282 if (ret)
2283 goto err;
2284
2285 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2286 rspq->cur_desc = rspq->desc;
2287 rspq->cidx = 0;
2288 rspq->gen = 1;
2289 rspq->next_intr_params = rspq->intr_params;
2290 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05302291 rspq->bar2_addr = bar2_address(adapter,
2292 rspq->cntxt_id,
2293 T4_BAR2_QTYPE_INGRESS,
2294 &rspq->bar2_qid);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002295 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2296 rspq->size--; /* subtract status entry */
2297 rspq->adapter = adapter;
2298 rspq->netdev = dev;
2299 rspq->handler = hnd;
2300
2301 /* set offset to -1 to distinguish ingress queues without FL */
2302 rspq->offset = fl ? 0 : -1;
2303
2304 if (fl) {
2305 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2306 fl->avail = 0;
2307 fl->pend_cred = 0;
2308 fl->pidx = 0;
2309 fl->cidx = 0;
2310 fl->alloc_failed = 0;
2311 fl->large_alloc_failed = 0;
2312 fl->starving = 0;
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05302313
2314 /* Note, we must initialize the BAR2 Free List User Doorbell
2315 * information before refilling the Free List!
2316 */
2317 fl->bar2_addr = bar2_address(adapter,
2318 fl->cntxt_id,
2319 T4_BAR2_QTYPE_EGRESS,
2320 &fl->bar2_qid);
2321
Casey Leedomc6e0d912010-06-25 12:13:28 +00002322 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2323 }
2324
2325 return 0;
2326
2327err:
2328 /*
2329 * An error occurred. Clean up our partial allocation state and
2330 * return the error.
2331 */
2332 if (rspq->desc) {
2333 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2334 rspq->desc, rspq->phys_addr);
2335 rspq->desc = NULL;
2336 }
2337 if (fl && fl->desc) {
2338 kfree(fl->sdesc);
2339 fl->sdesc = NULL;
2340 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2341 fl->desc, fl->addr);
2342 fl->desc = NULL;
2343 }
2344 return ret;
2345}
2346
2347/**
2348 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2349 * @adapter: the adapter
2350 * @txq: pointer to the new txq to be filled in
2351 * @devq: the network TX queue associated with the new txq
2352 * @iqid: the relative ingress queue ID to which events relating to
2353 * the new txq should be directed
2354 */
2355int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2356 struct net_device *dev, struct netdev_queue *devq,
2357 unsigned int iqid)
2358{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302359 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +00002360 int ret, nentries;
2361 struct fw_eq_eth_cmd cmd, rpl;
2362 struct port_info *pi = netdev_priv(dev);
2363
2364 /*
Casey Leedomcaedda32010-11-11 09:30:40 +00002365 * Calculate the size of the hardware TX Queue (including the Status
2366 * Page on the end of the TX Queue) in units of TX Descriptors.
Casey Leedomc6e0d912010-06-25 12:13:28 +00002367 */
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302368 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002369
2370 /*
2371 * Allocate the hardware ring for the TX ring (with space for its
2372 * status page) along with the associated software descriptor ring.
2373 */
2374 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2375 sizeof(struct tx_desc),
2376 sizeof(struct tx_sw_desc),
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302377 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002378 if (!txq->q.desc)
2379 return -ENOMEM;
2380
2381 /*
2382 * Fill in the Egress Queue Command. Note: As with the direct use of
2383 * the firmware Ingress Queue COmmand above in our RXQ allocation
2384 * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
2385 * have to see if there's some reasonable way to parameterize it
2386 * into the common code ...
2387 */
2388 memset(&cmd, 0, sizeof(cmd));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05302389 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2390 FW_CMD_REQUEST_F |
2391 FW_CMD_WRITE_F |
2392 FW_CMD_EXEC_F);
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302393 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2394 FW_EQ_ETH_CMD_EQSTART_F |
Casey Leedomc6e0d912010-06-25 12:13:28 +00002395 FW_LEN16(cmd));
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302396 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2397 FW_EQ_ETH_CMD_VIID_V(pi->viid));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002398 cmd.fetchszm_to_iqid =
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302399 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2400 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2401 FW_EQ_ETH_CMD_IQID_V(iqid));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002402 cmd.dcaen_to_eqsize =
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302403 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2404 FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
2405 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2406 SGE_CIDXFLUSHTHRESH_32) |
2407 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002408 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2409
2410 /*
2411 * Issue the firmware Egress Queue Command and extract the results if
2412 * it completes successfully.
2413 */
2414 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2415 if (ret) {
2416 /*
2417 * The girmware Ingress Queue Command failed for some reason.
2418 * Free up our partial allocation state and return the error.
2419 */
2420 kfree(txq->q.sdesc);
2421 txq->q.sdesc = NULL;
2422 dma_free_coherent(adapter->pdev_dev,
2423 nentries * sizeof(struct tx_desc),
2424 txq->q.desc, txq->q.phys_addr);
2425 txq->q.desc = NULL;
2426 return ret;
2427 }
2428
2429 txq->q.in_use = 0;
2430 txq->q.cidx = 0;
2431 txq->q.pidx = 0;
2432 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302433 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
Hariprasad Shenaidf64e4d2014-12-03 19:32:53 +05302434 txq->q.bar2_addr = bar2_address(adapter,
2435 txq->q.cntxt_id,
2436 T4_BAR2_QTYPE_EGRESS,
2437 &txq->q.bar2_qid);
Casey Leedomc6e0d912010-06-25 12:13:28 +00002438 txq->q.abs_id =
Hariprasad Shenai6e4b51a2014-11-21 12:52:03 +05302439 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
Casey Leedomc6e0d912010-06-25 12:13:28 +00002440 txq->txq = devq;
2441 txq->tso = 0;
2442 txq->tx_cso = 0;
2443 txq->vlan_ins = 0;
2444 txq->q.stops = 0;
2445 txq->q.restarts = 0;
2446 txq->mapping_err = 0;
2447 return 0;
2448}
2449
2450/*
2451 * Free the DMA map resources associated with a TX queue.
2452 */
2453static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2454{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302455 struct sge *s = &adapter->sge;
2456
Casey Leedomc6e0d912010-06-25 12:13:28 +00002457 dma_free_coherent(adapter->pdev_dev,
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302458 tq->size * sizeof(*tq->desc) + s->stat_len,
Casey Leedomc6e0d912010-06-25 12:13:28 +00002459 tq->desc, tq->phys_addr);
2460 tq->cntxt_id = 0;
2461 tq->sdesc = NULL;
2462 tq->desc = NULL;
2463}
2464
2465/*
2466 * Free the resources associated with a response queue (possibly including a
2467 * free list).
2468 */
2469static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2470 struct sge_fl *fl)
2471{
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302472 struct sge *s = &adapter->sge;
Casey Leedomc6e0d912010-06-25 12:13:28 +00002473 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2474
2475 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2476 rspq->cntxt_id, flid, 0xffff);
2477 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2478 rspq->desc, rspq->phys_addr);
2479 netif_napi_del(&rspq->napi);
2480 rspq->netdev = NULL;
2481 rspq->cntxt_id = 0;
2482 rspq->abs_id = 0;
2483 rspq->desc = NULL;
2484
2485 if (fl) {
2486 free_rx_bufs(adapter, fl, fl->avail);
2487 dma_free_coherent(adapter->pdev_dev,
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302488 fl->size * sizeof(*fl->desc) + s->stat_len,
Casey Leedomc6e0d912010-06-25 12:13:28 +00002489 fl->desc, fl->addr);
2490 kfree(fl->sdesc);
2491 fl->sdesc = NULL;
2492 fl->cntxt_id = 0;
2493 fl->desc = NULL;
2494 }
2495}
2496
2497/**
2498 * t4vf_free_sge_resources - free SGE resources
2499 * @adapter: the adapter
2500 *
2501 * Frees resources used by the SGE queue sets.
2502 */
2503void t4vf_free_sge_resources(struct adapter *adapter)
2504{
2505 struct sge *s = &adapter->sge;
2506 struct sge_eth_rxq *rxq = s->ethrxq;
2507 struct sge_eth_txq *txq = s->ethtxq;
2508 struct sge_rspq *evtq = &s->fw_evtq;
2509 struct sge_rspq *intrq = &s->intrq;
2510 int qs;
2511
Casey Leedomb97d13a2010-07-15 22:47:06 -07002512 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
Casey Leedomc6e0d912010-06-25 12:13:28 +00002513 if (rxq->rspq.desc)
2514 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2515 if (txq->q.desc) {
2516 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2517 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2518 kfree(txq->q.sdesc);
2519 free_txq(adapter, &txq->q);
2520 }
2521 }
2522 if (evtq->desc)
2523 free_rspq_fl(adapter, evtq, NULL);
2524 if (intrq->desc)
2525 free_rspq_fl(adapter, intrq, NULL);
2526}
2527
2528/**
2529 * t4vf_sge_start - enable SGE operation
2530 * @adapter: the adapter
2531 *
2532 * Start tasklets and timers associated with the DMA engine.
2533 */
2534void t4vf_sge_start(struct adapter *adapter)
2535{
2536 adapter->sge.ethtxq_rover = 0;
2537 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2538 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2539}
2540
2541/**
2542 * t4vf_sge_stop - disable SGE operation
2543 * @adapter: the adapter
2544 *
2545 * Stop tasklets and timers associated with the DMA engine. Note that
2546 * this is effective only if measures have been taken to disable any HW
2547 * events that may restart them.
2548 */
2549void t4vf_sge_stop(struct adapter *adapter)
2550{
2551 struct sge *s = &adapter->sge;
2552
2553 if (s->rx_timer.function)
2554 del_timer_sync(&s->rx_timer);
2555 if (s->tx_timer.function)
2556 del_timer_sync(&s->tx_timer);
2557}
2558
2559/**
2560 * t4vf_sge_init - initialize SGE
2561 * @adapter: the adapter
2562 *
2563 * Performs SGE initialization needed every time after a chip reset.
2564 * We do not initialize any of the queue sets here, instead the driver
2565 * top-level must request those individually. We also do not enable DMA
2566 * here, that should be done after the queues have been set up.
2567 */
2568int t4vf_sge_init(struct adapter *adapter)
2569{
2570 struct sge_params *sge_params = &adapter->params.sge;
2571 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2572 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2573 struct sge *s = &adapter->sge;
Hariprasad Shenaice8f4072014-11-07 17:06:30 +05302574 unsigned int ingpadboundary, ingpackboundary;
Casey Leedomc6e0d912010-06-25 12:13:28 +00002575
2576 /*
2577 * Start by vetting the basic SGE parameters which have been set up by
2578 * the Physical Function Driver. Ideally we should be able to deal
2579 * with _any_ configuration. Practice is different ...
2580 */
2581 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2582 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2583 fl0, fl1);
2584 return -EINVAL;
2585 }
Vipul Pandya52367a72012-09-26 02:39:38 +00002586 if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
Casey Leedomc6e0d912010-06-25 12:13:28 +00002587 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2588 return -EINVAL;
2589 }
2590
2591 /*
2592 * Now translate the adapter parameters into our internal forms.
2593 */
2594 if (fl1)
Hariprasad Shenai65f6ecc2014-11-07 17:06:29 +05302595 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2596 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2597 ? 128 : 64);
2598 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
Hariprasad Shenaice8f4072014-11-07 17:06:30 +05302599
2600 /* T4 uses a single control field to specify both the PCIe Padding and
2601 * Packing Boundary. T5 introduced the ability to specify these
2602 * separately. The actual Ingress Packet Data alignment boundary
2603 * within Packed Buffer Mode is the maximum of these two
2604 * specifications. (Note that it makes no real practical sense to
2605 * have the Pading Boudary be larger than the Packing Boundary but you
2606 * could set the chip up that way and, in fact, legacy T4 code would
2607 * end doing this because it would initialize the Padding Boundary and
2608 * leave the Packing Boundary initialized to 0 (16 bytes).)
2609 */
2610 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2611 X_INGPADBOUNDARY_SHIFT);
2612 if (is_t4(adapter->params.chip)) {
2613 s->fl_align = ingpadboundary;
2614 } else {
2615 /* T5 has a different interpretation of one of the PCIe Packing
2616 * Boundary values.
2617 */
2618 ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2619 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2620 ingpackboundary = 16;
2621 else
2622 ingpackboundary = 1 << (ingpackboundary +
2623 INGPACKBOUNDARY_SHIFT_X);
2624
2625 s->fl_align = max(ingpadboundary, ingpackboundary);
2626 }
Casey Leedomc6e0d912010-06-25 12:13:28 +00002627
Hariprasad Shenai50d21a62014-11-07 17:06:31 +05302628 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2629 * timer will attempt to refill it. This needs to be larger than the
2630 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2631 * stuck waiting for new packets while the SGE is waiting for us to
2632 * give it more Free List entries. (Note that the SGE's Egress
2633 * Congestion Threshold is in units of 2 Free List pointers.)
2634 */
2635 s->fl_starve_thres
2636 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
Casey Leedomc6e0d912010-06-25 12:13:28 +00002637
2638 /*
2639 * Set up tasklet timers.
2640 */
2641 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2642 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2643
2644 /*
2645 * Initialize Forwarded Interrupt Queue lock.
2646 */
2647 spin_lock_init(&s->intrq_lock);
2648
2649 return 0;
2650}