blob: a2c554f8a61bc3262823d9ffd551af6ccedfc299 [file] [log] [blame]
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
Shrikrishna Khare190af102016-06-16 10:51:53 -07004 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
Shrikrishna Khare190af102016-06-16 10:51:53 -070023 * Maintained by: pv-drivers@vmware.com
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070024 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
Jesse Gross72e85c42011-06-23 13:04:39 +000030#include <linux/bitops.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070031#include <linux/ethtool.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070035#include <linux/compiler.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070036#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/ioport.h>
39#include <linux/highmem.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070040#include <linux/timer.h>
41#include <linux/skbuff.h>
42#include <linux/interrupt.h>
43#include <linux/workqueue.h>
44#include <linux/uaccess.h>
45#include <asm/dma.h>
46#include <asm/page.h>
47
48#include <linux/tcp.h>
49#include <linux/udp.h>
50#include <linux/ip.h>
51#include <linux/ipv6.h>
52#include <linux/in.h>
53#include <linux/etherdevice.h>
54#include <asm/checksum.h>
55#include <linux/if_vlan.h>
56#include <linux/if_arp.h>
57#include <linux/inetdevice.h>
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -070058#include <linux/log2.h>
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070059
60#include "vmxnet3_defs.h"
61
62#ifdef DEBUG
63# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
64#else
65# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
66#endif
67
68
69/*
70 * Version numbers
71 */
hpreg@vmware.comf3002c12018-05-14 08:14:49 -040072#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070073
hpreg@vmware.com61aeece2018-05-14 08:14:34 -040074/* Each byte of this 32-bit integer encodes a version number in
75 * VMXNET3_DRIVER_VERSION_STRING.
76 */
hpreg@vmware.comf3002c12018-05-14 08:14:49 -040077#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070078
Shreyas Bhatewara09c50882010-11-19 10:55:24 +000079#if defined(CONFIG_PCI_MSI)
80 /* RSS only makes sense if MSI-X is supported. */
81 #define VMXNET3_RSS
82#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070083
Shrikrishna Khare190af102016-06-16 10:51:53 -070084#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
85#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
86#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
87
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070088/*
89 * Capabilities
90 */
91
92enum {
93 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
94 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
95 * IPv4 */
96 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
97 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
98 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
99 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
100 * offload */
101 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
102 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
103 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
104 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
105 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
106 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
107 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
108 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
109 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
110 * for a pkt */
111 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
112 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
113 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
114 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
115 /* pages transmits */
116 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
117 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
118 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
119 /* pkts up to 256kB. */
120 VMNET_CAP_UPT = 0x400000 /* Support UPT */
121};
122
123/*
Adit Ranadiveb1226c72016-10-02 19:10:21 -0700124 * Maximum devices supported.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700125 */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700126#define MAX_ETHERNET_CARDS 10
127#define MAX_PCI_PASSTHRU_DEVICE 6
128
129struct vmxnet3_cmd_ring {
130 union Vmxnet3_GenericDesc *base;
131 u32 size;
132 u32 next2fill;
133 u32 next2comp;
134 u8 gen;
135 dma_addr_t basePA;
136};
137
138static inline void
139vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
140{
141 ring->next2fill++;
142 if (unlikely(ring->next2fill == ring->size)) {
143 ring->next2fill = 0;
144 VMXNET3_FLIP_RING_GEN(ring->gen);
145 }
146}
147
148static inline void
149vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
150{
151 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
152}
153
154static inline int
155vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
156{
157 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
158 ring->next2comp - ring->next2fill - 1;
159}
160
161struct vmxnet3_comp_ring {
162 union Vmxnet3_GenericDesc *base;
163 u32 size;
164 u32 next2proc;
165 u8 gen;
166 u8 intr_idx;
167 dma_addr_t basePA;
168};
169
170static inline void
171vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
172{
173 ring->next2proc++;
174 if (unlikely(ring->next2proc == ring->size)) {
175 ring->next2proc = 0;
176 VMXNET3_FLIP_RING_GEN(ring->gen);
177 }
178}
179
180struct vmxnet3_tx_data_ring {
181 struct Vmxnet3_TxDataDesc *base;
182 u32 size;
183 dma_addr_t basePA;
184};
185
186enum vmxnet3_buf_map_type {
187 VMXNET3_MAP_INVALID = 0,
188 VMXNET3_MAP_NONE,
189 VMXNET3_MAP_SINGLE,
190 VMXNET3_MAP_PAGE,
191};
192
193struct vmxnet3_tx_buf_info {
194 u32 map_type;
195 u16 len;
196 u16 sop_idx;
197 dma_addr_t dma_addr;
198 struct sk_buff *skb;
199};
200
201struct vmxnet3_tq_driver_stats {
202 u64 drop_total; /* # of pkts dropped by the driver, the
203 * counters below track droppings due to
204 * different reasons
205 */
206 u64 drop_too_many_frags;
207 u64 drop_oversized_hdr;
208 u64 drop_hdr_inspect_err;
209 u64 drop_tso;
210
211 u64 tx_ring_full;
212 u64 linearized; /* # of pkts linearized */
213 u64 copy_skb_header; /* # of times we have to copy skb header */
214 u64 oversized_hdr;
215};
216
217struct vmxnet3_tx_ctx {
218 bool ipv4;
Shrikrishna Khare759c9352015-02-28 20:33:09 -0800219 bool ipv6;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700220 u16 mss;
221 u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
222 * offloading
223 */
224 u32 l4_hdr_size; /* only valid if mss != 0 */
225 u32 copy_size; /* # of bytes copied into the data ring */
226 union Vmxnet3_GenericDesc *sop_txd;
227 union Vmxnet3_GenericDesc *eop_txd;
228};
229
230struct vmxnet3_tx_queue {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000231 char name[IFNAMSIZ+8]; /* To identify interrupt */
232 struct vmxnet3_adapter *adapter;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700233 spinlock_t tx_lock;
234 struct vmxnet3_cmd_ring tx_ring;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000235 struct vmxnet3_tx_buf_info *buf_info;
Andy Kingb0eb57c2013-08-23 09:33:49 -0700236 dma_addr_t buf_info_pa;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700237 struct vmxnet3_tx_data_ring data_ring;
238 struct vmxnet3_comp_ring comp_ring;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000239 struct Vmxnet3_TxQueueCtrl *shared;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700240 struct vmxnet3_tq_driver_stats stats;
241 bool stopped;
242 int num_stop; /* # of times the queue is
243 * stopped */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000244 int qid;
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700245 u16 txdata_desc_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700246} __attribute__((__aligned__(SMP_CACHE_BYTES)));
247
248enum vmxnet3_rx_buf_type {
249 VMXNET3_RX_BUF_NONE = 0,
250 VMXNET3_RX_BUF_SKB = 1,
251 VMXNET3_RX_BUF_PAGE = 2
252};
253
254struct vmxnet3_rx_buf_info {
255 enum vmxnet3_rx_buf_type buf_type;
256 u16 len;
257 union {
258 struct sk_buff *skb;
259 struct page *page;
260 };
261 dma_addr_t dma_addr;
262};
263
264struct vmxnet3_rx_ctx {
265 struct sk_buff *skb;
266 u32 sop_idx;
267};
268
269struct vmxnet3_rq_driver_stats {
270 u64 drop_total;
271 u64 drop_err;
272 u64 drop_fcs;
273 u64 rx_buf_alloc_failure;
274};
275
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700276struct vmxnet3_rx_data_ring {
277 Vmxnet3_RxDataDesc *base;
278 dma_addr_t basePA;
279 u16 desc_size;
280};
281
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700282struct vmxnet3_rx_queue {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000283 char name[IFNAMSIZ + 8]; /* To identify interrupt */
284 struct vmxnet3_adapter *adapter;
285 struct napi_struct napi;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700286 struct vmxnet3_cmd_ring rx_ring[2];
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700287 struct vmxnet3_rx_data_ring data_ring;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700288 struct vmxnet3_comp_ring comp_ring;
289 struct vmxnet3_rx_ctx rx_ctx;
290 u32 qid; /* rqID in RCD for buffer from 1st ring */
291 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700292 u32 dataRingQid; /* rqID in RCD for buffer from data ring */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700293 struct vmxnet3_rx_buf_info *buf_info[2];
Andy Kingb0eb57c2013-08-23 09:33:49 -0700294 dma_addr_t buf_info_pa;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700295 struct Vmxnet3_RxQueueCtrl *shared;
296 struct vmxnet3_rq_driver_stats stats;
297} __attribute__((__aligned__(SMP_CACHE_BYTES)));
298
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000299#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
300#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
301
302/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
303#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
304
305#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
306 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +0000307#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000308
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700309
310struct vmxnet3_intr {
311 enum vmxnet3_intr_mask_mode mask_mode;
312 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
313 u8 num_intrs; /* # of intr vectors */
314 u8 event_intr_idx; /* idx of the intr vector for event */
315 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
Arnd Bergmannc7673e42017-07-14 14:07:04 +0200316 char event_msi_vector_name[IFNAMSIZ+17];
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700317#ifdef CONFIG_PCI_MSI
318 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
319#endif
320};
321
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000322/* Interrupt sharing schemes, share_intr */
323#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
324#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
325#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
326
327
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700328#define VMXNET3_STATE_BIT_RESETTING 0
329#define VMXNET3_STATE_BIT_QUIESCED 1
330struct vmxnet3_adapter {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000331 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
332 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
Jesse Gross72e85c42011-06-23 13:04:39 +0000333 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000334 struct vmxnet3_intr intr;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000335 spinlock_t cmd_lock;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000336 struct Vmxnet3_DriverShared *shared;
337 struct Vmxnet3_PMConf *pm_conf;
338 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
339 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
340 struct net_device *netdev;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000341 struct pci_dev *pdev;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700342
Harvey Harrison81e8e562010-10-21 18:05:33 +0000343 u8 __iomem *hw_addr0; /* for BAR 0 */
344 u8 __iomem *hw_addr1; /* for BAR 1 */
Shreyas Bhatewara45dac1d2015-06-19 13:38:29 -0700345 u8 version;
346
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000347#ifdef VMXNET3_RSS
348 struct UPT1_RSSConf *rss_conf;
349 bool rss;
350#endif
351 u32 num_rx_queues;
352 u32 num_tx_queues;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700353
354 /* rx buffer related */
355 unsigned skb_buf_size;
356 int rx_buf_per_pkt; /* only apply to the 1st ring */
357 dma_addr_t shared_pa;
358 dma_addr_t queue_desc_pa;
Shrikrishna Khare4edef40e2016-06-16 10:51:57 -0700359 dma_addr_t coal_conf_pa;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700360
361 /* Wake-on-LAN */
362 u32 wol;
363
364 /* Link speed */
365 u32 link_speed; /* in mbps */
366
367 u64 tx_timeout_count;
Neil Hormanf00e2b02014-06-13 10:03:21 -0400368
369 /* Ring sizes */
370 u32 tx_ring_size;
371 u32 rx_ring_size;
Shrikrishna Khare53831aa2015-01-06 09:20:15 -0800372 u32 rx_ring2_size;
Neil Hormanf00e2b02014-06-13 10:03:21 -0400373
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700374 /* Size of buffer in the data ring */
375 u16 txdata_desc_size;
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700376 u16 rxdata_desc_size;
377
378 bool rxdataring_enabled;
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700379
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700380 struct work_struct work;
381
382 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
383
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000384 int share_intr;
Andy Kingb0eb57c2013-08-23 09:33:49 -0700385
Shrikrishna Khare4edef40e2016-06-16 10:51:57 -0700386 struct Vmxnet3_CoalesceScheme *coal_conf;
387 bool default_coal_mode;
388
Andy Kingb0eb57c2013-08-23 09:33:49 -0700389 dma_addr_t adapter_pa;
390 dma_addr_t pm_conf_pa;
391 dma_addr_t rss_conf_pa;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700392};
393
394#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700395 writel((val), (adapter)->hw_addr0 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700396#define VMXNET3_READ_BAR0_REG(adapter, reg) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700397 readl((adapter)->hw_addr0 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700398
399#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700400 writel((val), (adapter)->hw_addr1 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700401#define VMXNET3_READ_BAR1_REG(adapter, reg) \
Harvey Harrisonb8744ca2010-10-30 16:19:18 -0700402 readl((adapter)->hw_addr1 + (reg))
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700403
404#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
405#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
406 ((rq)->rx_ring[ring_idx].size >> 3)
407
408#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
409#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
410
Shrikrishna Khare190af102016-06-16 10:51:53 -0700411#define VMXNET3_VERSION_GE_2(adapter) \
412 (adapter->version >= VMXNET3_REV_2 + 1)
413#define VMXNET3_VERSION_GE_3(adapter) \
414 (adapter->version >= VMXNET3_REV_3 + 1)
415
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700416/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
417#define VMXNET3_DEF_TX_RING_SIZE 512
Shrikrishna Khare74759082017-11-30 10:29:51 -0800418#define VMXNET3_DEF_RX_RING_SIZE 1024
419#define VMXNET3_DEF_RX_RING2_SIZE 256
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700420
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700421#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
422
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700423#define VMXNET3_MAX_ETH_HDR_SIZE 22
424#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
425
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700426#define VMXNET3_GET_RING_IDX(adapter, rqID) \
427 ((rqID >= adapter->num_rx_queues && \
428 rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
429
430#define VMXNET3_RX_DATA_RING(adapter, rqID) \
431 (rqID >= 2 * adapter->num_rx_queues && \
432 rqID < 3 * adapter->num_rx_queues) \
433
Shrikrishna Khare4edef40e2016-06-16 10:51:57 -0700434#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
435
436#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
437#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
438
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700439int
440vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
441
442int
443vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
444
445void
446vmxnet3_force_close(struct vmxnet3_adapter *adapter);
447
448void
449vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
450
451void
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000452vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700453
454void
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000455vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700456
457int
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000458vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
Michał Mirosława0d27302011-04-18 13:31:21 +0000459
460int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700461vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
Shrikrishna Khare3c8b3ef2016-06-16 10:51:55 -0700462 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
Shrikrishna Khare50a5ce32016-06-16 10:51:56 -0700463 u16 txdata_desc_size, u16 rxdata_desc_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700464
Joe Perchesd8dea1e2013-09-23 11:37:59 -0700465void vmxnet3_set_ethtool_ops(struct net_device *netdev);
stephen hemminger95305f62011-06-08 14:53:57 +0000466
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800467void vmxnet3_get_stats64(struct net_device *dev,
468 struct rtnl_link_stats64 *stats);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700469
470extern char vmxnet3_driver_name[];
471#endif