blob: b6bd8f679127ed27ed228c796f85899cc8077547 [file] [log] [blame]
Catherine Sullivan893ce442019-07-01 15:57:52 -07001/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
Bailey Forrest920fb452021-06-24 11:06:20 -07004 * Copyright (C) 2015-2021 Google, Inc.
Catherine Sullivan893ce442019-07-01 15:57:52 -07005 */
6
7#ifndef _GVE_H_
8#define _GVE_H_
9
10#include <linux/dma-mapping.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070013#include <linux/u64_stats_sync.h>
Bailey Forrestc4b87ac2021-06-24 11:06:24 -070014
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070015#include "gve_desc.h"
Bailey Forresta4aa1f12021-06-24 11:06:26 -070016#include "gve_desc_dqo.h"
Catherine Sullivan893ce442019-07-01 15:57:52 -070017
18#ifndef PCI_VENDOR_ID_GOOGLE
19#define PCI_VENDOR_ID_GOOGLE 0x1ae0
20#endif
21
22#define PCI_DEV_ID_GVNIC 0x0042
23
24#define GVE_REGISTER_BAR 0
25#define GVE_DOORBELL_BAR 2
26
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070027/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
28#define GVE_TX_MAX_IOVEC 4
29/* 1 for management, 1 for rx, 1 for tx */
Catherine Sullivan893ce442019-07-01 15:57:52 -070030#define GVE_MIN_MSIX 3
31
Kuo Zhao24aeb562020-09-11 10:38:47 -070032/* Numbers of gve tx/rx stats in stats report. */
John Fraker87a7f322021-10-11 08:36:47 -070033#define GVE_TX_STATS_REPORT_NUM 6
Kuo Zhao24aeb562020-09-11 10:38:47 -070034#define GVE_RX_STATS_REPORT_NUM 2
35
36/* Interval to schedule a stats report update, 20000ms. */
37#define GVE_STATS_REPORT_TIMER_PERIOD 20000
38
David Awogbemila2f523dc2020-09-11 10:38:48 -070039/* Numbers of NIC tx/rx stats in stats report. */
40#define NIC_TX_STATS_REPORT_NUM 0
41#define NIC_RX_STATS_REPORT_NUM 4
42
Catherine Sullivanede3fcf2020-12-07 14:45:24 -080043#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44
Bailey Forrestc4b87ac2021-06-24 11:06:24 -070045/* PTYPEs are always 10 bits. */
46#define GVE_NUM_PTYPES 1024
47
Bailey Forrest5e8c5ad2021-06-24 11:06:28 -070048#define GVE_RX_BUFFER_SIZE_DQO 2048
49
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070050/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
51struct gve_rx_desc_queue {
52 struct gve_rx_desc *desc_ring; /* the descriptor ring */
53 dma_addr_t bus; /* the bus for the desc_ring */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070054 u8 seqno; /* the next expected seqno for this desc*/
55};
56
57/* The page info for a single slot in the RX data queue */
58struct gve_rx_slot_page_info {
59 struct page *page;
60 void *page_address;
Bailey Forrest920fb452021-06-24 11:06:20 -070061 u32 page_offset; /* offset to write to in page */
Bailey Forrest9b8dd5e2021-06-24 11:06:32 -070062 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
David Awogbemila02b0e0c2020-12-07 14:45:25 -080063 u8 can_flip;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070064};
65
66/* A list of pages registered with the device during setup and used by a queue
67 * as buffers
68 */
69struct gve_queue_page_list {
70 u32 id; /* unique id */
71 u32 num_entries;
72 struct page **pages; /* list of num_entries pages */
73 dma_addr_t *page_buses; /* the dma addrs of the pages */
74};
75
76/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
77struct gve_rx_data_queue {
Catherine Sullivanede3fcf2020-12-07 14:45:24 -080078 union gve_rx_data_slot *data_ring; /* read by NIC */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070079 dma_addr_t data_bus; /* dma mapping of the slots */
80 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
81 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
Catherine Sullivanede3fcf2020-12-07 14:45:24 -080082 u8 raw_addressing; /* use raw_addressing? */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070083};
84
85struct gve_priv;
86
Bailey Forresta4aa1f12021-06-24 11:06:26 -070087/* RX buffer queue for posting buffers to HW.
88 * Each RX (completion) queue has a corresponding buffer queue.
89 */
90struct gve_rx_buf_queue_dqo {
91 struct gve_rx_desc_dqo *desc_ring;
92 dma_addr_t bus;
93 u32 head; /* Pointer to start cleaning buffers at. */
94 u32 tail; /* Last posted buffer index + 1 */
95 u32 mask; /* Mask for indices to the size of the ring */
96};
97
98/* RX completion queue to receive packets from HW. */
99struct gve_rx_compl_queue_dqo {
100 struct gve_rx_compl_desc_dqo *desc_ring;
101 dma_addr_t bus;
102
103 /* Number of slots which did not have a buffer posted yet. We should not
104 * post more buffers than the queue size to avoid HW overrunning the
105 * queue.
106 */
107 int num_free_slots;
108
109 /* HW uses a "generation bit" to notify SW of new descriptors. When a
110 * descriptor's generation bit is different from the current generation,
111 * that descriptor is ready to be consumed by SW.
112 */
113 u8 cur_gen_bit;
114
115 /* Pointer into desc_ring where the next completion descriptor will be
116 * received.
117 */
118 u32 head;
119 u32 mask; /* Mask for indices to the size of the ring */
120};
121
122/* Stores state for tracking buffers posted to HW */
123struct gve_rx_buf_state_dqo {
124 /* The page posted to HW. */
125 struct gve_rx_slot_page_info page_info;
126
127 /* The DMA address corresponding to `page_info`. */
128 dma_addr_t addr;
129
130 /* Last offset into the page when it only had a single reference, at
131 * which point every other offset is free to be reused.
132 */
133 u32 last_single_ref_offset;
134
135 /* Linked list index to next element in the list, or -1 if none */
136 s16 next;
137};
138
139/* `head` and `tail` are indices into an array, or -1 if empty. */
140struct gve_index_list {
141 s16 head;
142 s16 tail;
143};
144
David Awogbemila1344e752021-10-24 11:42:36 -0700145/* A single received packet split across multiple buffers may be
146 * reconstructed using the information in this structure.
147 */
148struct gve_rx_ctx {
149 /* head and tail of skb chain for the current packet or NULL if none */
150 struct sk_buff *skb_head;
151 struct sk_buff *skb_tail;
David Awogbemila37149e92021-10-24 11:42:37 -0700152 u16 total_expected_size;
153 u8 expected_frag_cnt;
154 u8 curr_frag_cnt;
155 u8 reuse_frags;
David Awogbemila1344e752021-10-24 11:42:36 -0700156};
157
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700158/* Contains datapath state used to represent an RX queue. */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700159struct gve_rx_ring {
160 struct gve_priv *gve;
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700161 union {
162 /* GQI fields */
163 struct {
164 struct gve_rx_desc_queue desc;
165 struct gve_rx_data_queue data;
166
167 /* threshold for posting new buffs and descs */
168 u32 db_threshold;
David Awogbemila37149e92021-10-24 11:42:37 -0700169 u16 packet_buffer_size;
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700170 };
171
172 /* DQO fields. */
173 struct {
174 struct gve_rx_buf_queue_dqo bufq;
175 struct gve_rx_compl_queue_dqo complq;
176
177 struct gve_rx_buf_state_dqo *buf_states;
178 u16 num_buf_states;
179
180 /* Linked list of gve_rx_buf_state_dqo. Index into
181 * buf_states, or -1 if empty.
182 */
183 s16 free_buf_states;
184
185 /* Linked list of gve_rx_buf_state_dqo. Indexes into
186 * buf_states, or -1 if empty.
187 *
188 * This list contains buf_states which are pointing to
189 * valid buffers.
190 *
191 * We use a FIFO here in order to increase the
192 * probability that buffers can be reused by increasing
193 * the time between usages.
194 */
195 struct gve_index_list recycled_buf_states;
196
197 /* Linked list of gve_rx_buf_state_dqo. Indexes into
198 * buf_states, or -1 if empty.
199 *
200 * This list contains buf_states which have buffers
201 * which cannot be reused yet.
202 */
203 struct gve_index_list used_buf_states;
204 } dqo;
205 };
206
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700207 u64 rbytes; /* free-running bytes received */
208 u64 rpackets; /* free-running packets received */
Catherine Sullivan438b43b2019-08-01 16:07:31 -0700209 u32 cnt; /* free-running total number of completed packets */
210 u32 fill_cnt; /* free-running total number of descs and buffs posted */
211 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
Kuo Zhao433e2742020-09-11 10:38:45 -0700212 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
213 u64 rx_copied_pkt; /* free-running total number of copied packets */
214 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
215 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
216 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
David Awogbemila37149e92021-10-24 11:42:37 -0700217 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
218 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
219 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700220 u32 q_num; /* queue index */
221 u32 ntfy_id; /* notification block index */
222 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
223 dma_addr_t q_resources_bus; /* dma address for the queue resources */
224 struct u64_stats_sync statss; /* sync stats for 32bit archs */
Bailey Forrest9c1a59a2021-06-24 11:06:29 -0700225
David Awogbemila1344e752021-10-24 11:42:36 -0700226 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700227};
228
229/* A TX desc ring entry */
230union gve_tx_desc {
231 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
232 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
233};
234
235/* Tracks the memory in the fifo occupied by a segment of a packet */
236struct gve_tx_iovec {
237 u32 iov_offset; /* offset into this segment */
238 u32 iov_len; /* length */
239 u32 iov_padding; /* padding associated with this segment */
240};
241
242/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
243 * ring entry but only used for a pkt_desc not a seg_desc
244 */
245struct gve_tx_buffer_state {
246 struct sk_buff *skb; /* skb for this pkt */
Catherine Sullivan6f007c62020-12-07 14:45:26 -0800247 union {
248 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
Arnd Bergmann1e0083b2021-09-28 16:15:13 +0200249 struct {
250 DEFINE_DMA_UNMAP_ADDR(dma);
251 DEFINE_DMA_UNMAP_LEN(len);
252 };
Catherine Sullivan6f007c62020-12-07 14:45:26 -0800253 };
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700254};
255
256/* A TX buffer - each queue has one */
257struct gve_tx_fifo {
258 void *base; /* address of base of FIFO */
259 u32 size; /* total size */
260 atomic_t available; /* how much space is still available */
261 u32 head; /* offset to write at */
262 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
263};
264
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700265/* TX descriptor for DQO format */
266union gve_tx_desc_dqo {
267 struct gve_tx_pkt_desc_dqo pkt;
268 struct gve_tx_tso_context_desc_dqo tso_ctx;
269 struct gve_tx_general_context_desc_dqo general_ctx;
270};
271
272enum gve_packet_state {
273 /* Packet is in free list, available to be allocated.
274 * This should always be zero since state is not explicitly initialized.
275 */
276 GVE_PACKET_STATE_UNALLOCATED,
277 /* Packet is expecting a regular data completion or miss completion */
278 GVE_PACKET_STATE_PENDING_DATA_COMPL,
279 /* Packet has received a miss completion and is expecting a
280 * re-injection completion.
281 */
282 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
283 /* No valid completion received within the specified timeout. */
284 GVE_PACKET_STATE_TIMED_OUT_COMPL,
285};
286
287struct gve_tx_pending_packet_dqo {
288 struct sk_buff *skb; /* skb for this packet */
289
290 /* 0th element corresponds to the linear portion of `skb`, should be
291 * unmapped with `dma_unmap_single`.
292 *
293 * All others correspond to `skb`'s frags and should be unmapped with
294 * `dma_unmap_page`.
295 */
Arnd Bergmann1e0083b2021-09-28 16:15:13 +0200296 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
297 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700298 u16 num_bufs;
299
300 /* Linked list index to next element in the list, or -1 if none */
301 s16 next;
302
303 /* Linked list index to prev element in the list, or -1 if none.
304 * Used for tracking either outstanding miss completions or prematurely
305 * freed packets.
306 */
307 s16 prev;
308
309 /* Identifies the current state of the packet as defined in
310 * `enum gve_packet_state`.
311 */
312 u8 state;
313
314 /* If packet is an outstanding miss completion, then the packet is
315 * freed if the corresponding re-injection completion is not received
316 * before kernel jiffies exceeds timeout_jiffies.
317 */
318 unsigned long timeout_jiffies;
319};
320
321/* Contains datapath state used to represent a TX queue. */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700322struct gve_tx_ring {
323 /* Cacheline 0 -- Accessed & dirtied during transmit */
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700324 union {
325 /* GQI fields */
326 struct {
327 struct gve_tx_fifo tx_fifo;
328 u32 req; /* driver tracked head pointer */
329 u32 done; /* driver tracked tail pointer */
330 };
331
332 /* DQO fields. */
333 struct {
334 /* Linked list of gve_tx_pending_packet_dqo. Index into
335 * pending_packets, or -1 if empty.
336 *
337 * This is a consumer list owned by the TX path. When it
338 * runs out, the producer list is stolen from the
339 * completion handling path
340 * (dqo_compl.free_pending_packets).
341 */
342 s16 free_pending_packets;
343
344 /* Cached value of `dqo_compl.hw_tx_head` */
345 u32 head;
346 u32 tail; /* Last posted buffer index + 1 */
347
348 /* Index of the last descriptor with "report event" bit
349 * set.
350 */
351 u32 last_re_idx;
352 } dqo_tx;
353 };
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700354
355 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700356 union {
357 /* GQI fields */
358 struct {
Tao Liu61d72c7e2021-10-11 08:36:46 -0700359 /* Spinlock for when cleanup in progress */
360 spinlock_t clean_lock;
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700361 };
362
363 /* DQO fields. */
364 struct {
365 u32 head; /* Last read on compl_desc */
366
367 /* Tracks the current gen bit of compl_q */
368 u8 cur_gen_bit;
369
370 /* Linked list of gve_tx_pending_packet_dqo. Index into
371 * pending_packets, or -1 if empty.
372 *
373 * This is the producer list, owned by the completion
374 * handling path. When the consumer list
375 * (dqo_tx.free_pending_packets) is runs out, this list
376 * will be stolen.
377 */
378 atomic_t free_pending_packets;
379
380 /* Last TX ring index fetched by HW */
381 atomic_t hw_tx_head;
382
383 /* List to track pending packets which received a miss
384 * completion but not a corresponding reinjection.
385 */
386 struct gve_index_list miss_completions;
387
388 /* List to track pending packets that were completed
389 * before receiving a valid completion because they
390 * reached a specified timeout.
391 */
392 struct gve_index_list timed_out_completions;
393 } dqo_compl;
394 } ____cacheline_aligned;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700395 u64 pkt_done; /* free-running - total packets completed */
396 u64 bytes_done; /* free-running - total bytes completed */
Catherine Sullivan6f007c62020-12-07 14:45:26 -0800397 u64 dropped_pkt; /* free-running - total packets dropped */
398 u64 dma_mapping_error; /* count of dma mapping errors */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700399
400 /* Cacheline 2 -- Read-mostly fields */
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700401 union {
402 /* GQI fields */
403 struct {
404 union gve_tx_desc *desc;
405
406 /* Maps 1:1 to a desc */
407 struct gve_tx_buffer_state *info;
408 };
409
410 /* DQO fields. */
411 struct {
412 union gve_tx_desc_dqo *tx_ring;
413 struct gve_tx_compl_desc *compl_ring;
414
415 struct gve_tx_pending_packet_dqo *pending_packets;
416 s16 num_pending_packets;
417
418 u32 complq_mask; /* complq size is complq_mask + 1 */
419 } dqo;
420 } ____cacheline_aligned;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700421 struct netdev_queue *netdev_txq;
422 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
Catherine Sullivan6f007c62020-12-07 14:45:26 -0800423 struct device *dev;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700424 u32 mask; /* masks req and done down to queue size */
Catherine Sullivan6f007c62020-12-07 14:45:26 -0800425 u8 raw_addressing; /* use raw_addressing? */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700426
427 /* Slow-path fields */
428 u32 q_num ____cacheline_aligned; /* queue idx */
429 u32 stop_queue; /* count of queue stops */
430 u32 wake_queue; /* count of queue wakes */
John Fraker87a7f322021-10-11 08:36:47 -0700431 u32 queue_timeout; /* count of queue timeouts */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700432 u32 ntfy_id; /* notification block index */
John Fraker87a7f322021-10-11 08:36:47 -0700433 u32 last_kick_msec; /* Last time the queue was kicked */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700434 dma_addr_t bus; /* dma address of the descr ring */
435 dma_addr_t q_resources_bus; /* dma address of the queue resources */
Bailey Forresta4aa1f12021-06-24 11:06:26 -0700436 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700437 struct u64_stats_sync statss; /* sync stats for 32bit archs */
438} ____cacheline_aligned;
439
440/* Wraps the info for one irq including the napi struct and the queues
441 * associated with that irq.
442 */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700443struct gve_notify_block {
Catherine Sullivand30baac2021-12-15 16:46:46 -0800444 __be32 *irq_db_index; /* pointer to idx into Bar2 */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700445 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
446 struct napi_struct napi; /* kernel napi struct for this block */
447 struct gve_priv *priv;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700448 struct gve_tx_ring *tx; /* tx rings on this block */
449 struct gve_rx_ring *rx; /* rx rings on this block */
Catherine Sullivand30baac2021-12-15 16:46:46 -0800450};
Catherine Sullivan893ce442019-07-01 15:57:52 -0700451
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700452/* Tracks allowed and current queue settings */
453struct gve_queue_config {
454 u16 max_queues;
455 u16 num_queues; /* current */
456};
457
458/* Tracks the available and used qpl IDs */
459struct gve_qpl_config {
460 u32 qpl_map_size; /* map memory size */
461 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
462};
463
Bailey Forrest5ca22652021-06-24 11:06:23 -0700464struct gve_options_dqo_rda {
465 u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
466 u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
467};
468
Catherine Sullivand30baac2021-12-15 16:46:46 -0800469struct gve_irq_db {
470 __be32 index;
471} ____cacheline_aligned;
472
Bailey Forrestc4b87ac2021-06-24 11:06:24 -0700473struct gve_ptype {
474 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
475 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
476};
477
478struct gve_ptype_lut {
479 struct gve_ptype ptypes[GVE_NUM_PTYPES];
480};
481
Bailey Forresta5886ef2021-06-24 11:06:22 -0700482/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
483 * when the entire configure_device_resources command is zeroed out and the
484 * queue_format is not specified.
485 */
486enum gve_queue_format {
487 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
488 GVE_GQI_RDA_FORMAT = 0x1,
489 GVE_GQI_QPL_FORMAT = 0x2,
490 GVE_DQO_RDA_FORMAT = 0x3,
491};
492
Catherine Sullivan893ce442019-07-01 15:57:52 -0700493struct gve_priv {
494 struct net_device *dev;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700495 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
496 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
497 struct gve_queue_page_list *qpls; /* array of num qpls */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700498 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
Catherine Sullivand30baac2021-12-15 16:46:46 -0800499 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
500 dma_addr_t irq_db_indices_bus;
Catherine Sullivan893ce442019-07-01 15:57:52 -0700501 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
502 char mgmt_msix_name[IFNAMSIZ + 16];
503 u32 mgmt_msix_idx;
504 __be32 *counter_array; /* array of num_event_counters */
505 dma_addr_t counter_array_bus;
506
507 u16 num_event_counters;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700508 u16 tx_desc_cnt; /* num desc per ring */
509 u16 rx_desc_cnt; /* num desc per ring */
510 u16 tx_pages_per_qpl; /* tx buffer length */
Catherine Sullivanede3fcf2020-12-07 14:45:24 -0800511 u16 rx_data_slot_cnt; /* rx buffer length */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700512 u64 max_registered_pages;
513 u64 num_registered_pages; /* num pages registered with NIC */
514 u32 rx_copybreak; /* copy packets smaller than this */
515 u16 default_num_queues; /* default num queues to set up */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700516
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700517 struct gve_queue_config tx_cfg;
518 struct gve_queue_config rx_cfg;
519 struct gve_qpl_config qpl_cfg; /* map used QPL ids */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700520 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
521
522 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
523 __be32 __iomem *db_bar2; /* "array" of doorbells */
524 u32 msg_enable; /* level for netif* netdev print macros */
525 struct pci_dev *pdev;
526
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700527 /* metrics */
528 u32 tx_timeo_cnt;
529
Catherine Sullivan893ce442019-07-01 15:57:52 -0700530 /* Admin queue - see gve_adminq.h*/
531 union gve_adminq_command *adminq;
532 dma_addr_t adminq_bus_addr;
533 u32 adminq_mask; /* masks prod_cnt to adminq size */
534 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
Kuo Zhao433e2742020-09-11 10:38:45 -0700535 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
536 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
537 /* free-running count of per AQ cmd executed */
538 u32 adminq_describe_device_cnt;
539 u32 adminq_cfg_device_resources_cnt;
540 u32 adminq_register_page_list_cnt;
541 u32 adminq_unregister_page_list_cnt;
542 u32 adminq_create_tx_queue_cnt;
543 u32 adminq_create_rx_queue_cnt;
544 u32 adminq_destroy_tx_queue_cnt;
545 u32 adminq_destroy_rx_queue_cnt;
546 u32 adminq_dcfg_device_resources_cnt;
547 u32 adminq_set_driver_parameter_cnt;
Kuo Zhao24aeb562020-09-11 10:38:47 -0700548 u32 adminq_report_stats_cnt;
David Awogbemila7e074d52020-09-11 10:38:51 -0700549 u32 adminq_report_link_speed_cnt;
Bailey Forrestc4b87ac2021-06-24 11:06:24 -0700550 u32 adminq_get_ptype_map_cnt;
Kuo Zhao433e2742020-09-11 10:38:45 -0700551
552 /* Global stats */
553 u32 interface_up_cnt; /* count of times interface turned up since last reset */
554 u32 interface_down_cnt; /* count of times interface turned down since last reset */
555 u32 reset_cnt; /* count of reset */
556 u32 page_alloc_fail; /* count of page alloc fails */
557 u32 dma_mapping_error; /* count of dma mapping errors */
Kuo Zhao24aeb562020-09-11 10:38:47 -0700558 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700559 struct workqueue_struct *gve_wq;
560 struct work_struct service_task;
Kuo Zhao24aeb562020-09-11 10:38:47 -0700561 struct work_struct stats_report_task;
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700562 unsigned long service_task_flags;
Catherine Sullivan893ce442019-07-01 15:57:52 -0700563 unsigned long state_flags;
Kuo Zhao24aeb562020-09-11 10:38:47 -0700564
565 struct gve_stats_report *stats_report;
566 u64 stats_report_len;
567 dma_addr_t stats_report_bus; /* dma address for the stats report */
568 unsigned long ethtool_flags;
569
570 unsigned long stats_report_timer_period;
571 struct timer_list stats_report_timer;
572
David Awogbemila7e074d52020-09-11 10:38:51 -0700573 /* Gvnic device link speed from hypervisor. */
574 u64 link_speed;
Bailey Forresta5886ef2021-06-24 11:06:22 -0700575
Bailey Forrest5ca22652021-06-24 11:06:23 -0700576 struct gve_options_dqo_rda options_dqo_rda;
Bailey Forrestc4b87ac2021-06-24 11:06:24 -0700577 struct gve_ptype_lut *ptype_lut_dqo;
Bailey Forrest5ca22652021-06-24 11:06:23 -0700578
Bailey Forrest1f6228e2021-06-24 11:06:27 -0700579 /* Must be a power of two. */
580 int data_buffer_size_dqo;
581
Bailey Forresta5886ef2021-06-24 11:06:22 -0700582 enum gve_queue_format queue_format;
Catherine Sullivan893ce442019-07-01 15:57:52 -0700583};
584
Kuo Zhao24aeb562020-09-11 10:38:47 -0700585enum gve_service_task_flags_bit {
586 GVE_PRIV_FLAGS_DO_RESET = 1,
587 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
588 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
589 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700590};
591
Kuo Zhao24aeb562020-09-11 10:38:47 -0700592enum gve_state_flags_bit {
593 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
594 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
595 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
596 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
597};
598
599enum gve_ethtool_flags_bit {
600 GVE_PRIV_FLAGS_REPORT_STATS = 0,
Catherine Sullivan893ce442019-07-01 15:57:52 -0700601};
602
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700603static inline bool gve_get_do_reset(struct gve_priv *priv)
604{
605 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
606}
607
608static inline void gve_set_do_reset(struct gve_priv *priv)
609{
610 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
611}
612
613static inline void gve_clear_do_reset(struct gve_priv *priv)
614{
615 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
616}
617
618static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
619{
620 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
621 &priv->service_task_flags);
622}
623
624static inline void gve_set_reset_in_progress(struct gve_priv *priv)
625{
626 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
627}
628
629static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
630{
631 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
632}
633
634static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
635{
636 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
637 &priv->service_task_flags);
638}
639
640static inline void gve_set_probe_in_progress(struct gve_priv *priv)
641{
642 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
643}
644
645static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
646{
647 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
648}
649
Kuo Zhao24aeb562020-09-11 10:38:47 -0700650static inline bool gve_get_do_report_stats(struct gve_priv *priv)
651{
652 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
653 &priv->service_task_flags);
654}
655
656static inline void gve_set_do_report_stats(struct gve_priv *priv)
657{
658 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
659}
660
661static inline void gve_clear_do_report_stats(struct gve_priv *priv)
662{
663 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
664}
665
Catherine Sullivan893ce442019-07-01 15:57:52 -0700666static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
667{
668 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
669}
670
671static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
672{
673 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
674}
675
676static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
677{
678 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
679}
680
681static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
682{
683 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
684}
685
686static inline void gve_set_device_resources_ok(struct gve_priv *priv)
687{
688 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
689}
690
691static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
692{
693 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
694}
695
696static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
697{
698 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
699}
700
701static inline void gve_set_device_rings_ok(struct gve_priv *priv)
702{
703 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
704}
705
706static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
707{
708 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
709}
710
711static inline bool gve_get_napi_enabled(struct gve_priv *priv)
712{
713 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
714}
715
716static inline void gve_set_napi_enabled(struct gve_priv *priv)
717{
718 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
719}
720
721static inline void gve_clear_napi_enabled(struct gve_priv *priv)
722{
723 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
724}
725
Kuo Zhao24aeb562020-09-11 10:38:47 -0700726static inline bool gve_get_report_stats(struct gve_priv *priv)
727{
728 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
729}
730
731static inline void gve_clear_report_stats(struct gve_priv *priv)
732{
733 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
734}
735
Catherine Sullivan893ce442019-07-01 15:57:52 -0700736/* Returns the address of the ntfy_blocks irq doorbell
737 */
738static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
739 struct gve_notify_block *block)
740{
Catherine Sullivand30baac2021-12-15 16:46:46 -0800741 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
Catherine Sullivan893ce442019-07-01 15:57:52 -0700742}
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700743
744/* Returns the index into ntfy_blocks of the given tx ring's block
745 */
746static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
747{
748 return queue_idx;
749}
750
751/* Returns the index into ntfy_blocks of the given rx ring's block
752 */
753static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
754{
755 return (priv->num_ntfy_blks / 2) + queue_idx;
756}
757
758/* Returns the number of tx queue page lists
759 */
760static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
761{
Bailey Forresta5886ef2021-06-24 11:06:22 -0700762 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
763 return 0;
764
765 return priv->tx_cfg.num_queues;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700766}
767
768/* Returns the number of rx queue page lists
769 */
770static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
771{
Bailey Forresta5886ef2021-06-24 11:06:22 -0700772 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
773 return 0;
774
775 return priv->rx_cfg.num_queues;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700776}
777
778/* Returns a pointer to the next available tx qpl in the list of qpls
779 */
780static inline
781struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
782{
783 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
784 priv->qpl_cfg.qpl_map_size);
785
786 /* we are out of tx qpls */
787 if (id >= gve_num_tx_qpls(priv))
788 return NULL;
789
790 set_bit(id, priv->qpl_cfg.qpl_id_map);
791 return &priv->qpls[id];
792}
793
794/* Returns a pointer to the next available rx qpl in the list of qpls
795 */
796static inline
797struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
798{
799 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
800 priv->qpl_cfg.qpl_map_size,
801 gve_num_tx_qpls(priv));
802
803 /* we are out of rx qpls */
Catherine Sullivand03477e2021-10-05 19:42:19 -0700804 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700805 return NULL;
806
807 set_bit(id, priv->qpl_cfg.qpl_id_map);
808 return &priv->qpls[id];
809}
810
811/* Unassigns the qpl with the given id
812 */
813static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
814{
815 clear_bit(id, priv->qpl_cfg.qpl_id_map);
816}
817
818/* Returns the correct dma direction for tx and rx qpls
819 */
820static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
821 int id)
822{
823 if (id < gve_num_tx_qpls(priv))
824 return DMA_TO_DEVICE;
825 else
826 return DMA_FROM_DEVICE;
827}
828
Bailey Forrest5ca22652021-06-24 11:06:23 -0700829static inline bool gve_is_gqi(struct gve_priv *priv)
830{
831 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
832 priv->queue_format == GVE_GQI_QPL_FORMAT;
833}
834
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700835/* buffers */
Kuo Zhao433e2742020-09-11 10:38:45 -0700836int gve_alloc_page(struct gve_priv *priv, struct device *dev,
837 struct page **page, dma_addr_t *dma,
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700838 enum dma_data_direction);
839void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
840 enum dma_data_direction);
841/* tx handling */
842netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
843bool gve_tx_poll(struct gve_notify_block *block, int budget);
844int gve_tx_alloc_rings(struct gve_priv *priv);
Bailey Forrest9c1a59a2021-06-24 11:06:29 -0700845void gve_tx_free_rings_gqi(struct gve_priv *priv);
Tao Liu61d72c7e2021-10-11 08:36:46 -0700846u32 gve_tx_load_event_counter(struct gve_priv *priv,
847 struct gve_tx_ring *tx);
848bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700849/* rx handling */
850void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
Yangchun Fu2cb67ab2021-10-11 08:36:44 -0700851int gve_rx_poll(struct gve_notify_block *block, int budget);
852bool gve_rx_work_pending(struct gve_rx_ring *rx);
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700853int gve_rx_alloc_rings(struct gve_priv *priv);
Bailey Forrest9c1a59a2021-06-24 11:06:29 -0700854void gve_rx_free_rings_gqi(struct gve_priv *priv);
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700855/* Reset */
856void gve_schedule_reset(struct gve_priv *priv);
857int gve_reset(struct gve_priv *priv, bool attempt_teardown);
858int gve_adjust_queues(struct gve_priv *priv,
859 struct gve_queue_config new_rx_config,
860 struct gve_queue_config new_tx_config);
Kuo Zhao24aeb562020-09-11 10:38:47 -0700861/* report stats handling */
862void gve_handle_report_stats(struct gve_priv *priv);
Catherine Sullivane5b845dc2019-07-01 15:57:55 -0700863/* exported by ethtool.c */
864extern const struct ethtool_ops gve_ethtool_ops;
865/* needed by ethtool */
866extern const char gve_version_str[];
Catherine Sullivan893ce442019-07-01 15:57:52 -0700867#endif /* _GVE_H_ */