blob: ebc37e2569221a9c6425704f3dade054ab86c9f1 [file] [log] [blame]
Catherine Sullivan893ce442019-07-01 15:57:52 -07001/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2019 Google, Inc.
5 */
6
7#ifndef _GVE_H_
8#define _GVE_H_
9
10#include <linux/dma-mapping.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070013#include <linux/u64_stats_sync.h>
14#include "gve_desc.h"
Catherine Sullivan893ce442019-07-01 15:57:52 -070015
16#ifndef PCI_VENDOR_ID_GOOGLE
17#define PCI_VENDOR_ID_GOOGLE 0x1ae0
18#endif
19
20#define PCI_DEV_ID_GVNIC 0x0042
21
22#define GVE_REGISTER_BAR 0
23#define GVE_DOORBELL_BAR 2
24
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070025/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
26#define GVE_TX_MAX_IOVEC 4
27/* 1 for management, 1 for rx, 1 for tx */
Catherine Sullivan893ce442019-07-01 15:57:52 -070028#define GVE_MIN_MSIX 3
29
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070030/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
31struct gve_rx_desc_queue {
32 struct gve_rx_desc *desc_ring; /* the descriptor ring */
33 dma_addr_t bus; /* the bus for the desc_ring */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070034 u8 seqno; /* the next expected seqno for this desc*/
35};
36
37/* The page info for a single slot in the RX data queue */
38struct gve_rx_slot_page_info {
39 struct page *page;
40 void *page_address;
41 u32 page_offset; /* offset to write to in page */
42};
43
44/* A list of pages registered with the device during setup and used by a queue
45 * as buffers
46 */
47struct gve_queue_page_list {
48 u32 id; /* unique id */
49 u32 num_entries;
50 struct page **pages; /* list of num_entries pages */
51 dma_addr_t *page_buses; /* the dma addrs of the pages */
52};
53
54/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
55struct gve_rx_data_queue {
56 struct gve_rx_data_slot *data_ring; /* read by NIC */
57 dma_addr_t data_bus; /* dma mapping of the slots */
58 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
59 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070060};
61
62struct gve_priv;
63
64/* An RX ring that contains a power-of-two sized desc and data ring. */
65struct gve_rx_ring {
66 struct gve_priv *gve;
67 struct gve_rx_desc_queue desc;
68 struct gve_rx_data_queue data;
69 u64 rbytes; /* free-running bytes received */
70 u64 rpackets; /* free-running packets received */
Catherine Sullivan438b43b2019-08-01 16:07:31 -070071 u32 cnt; /* free-running total number of completed packets */
72 u32 fill_cnt; /* free-running total number of descs and buffs posted */
73 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
Catherine Sullivanf5cedc82019-07-01 15:57:53 -070074 u32 q_num; /* queue index */
75 u32 ntfy_id; /* notification block index */
76 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
77 dma_addr_t q_resources_bus; /* dma address for the queue resources */
78 struct u64_stats_sync statss; /* sync stats for 32bit archs */
79};
80
81/* A TX desc ring entry */
82union gve_tx_desc {
83 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
84 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
85};
86
87/* Tracks the memory in the fifo occupied by a segment of a packet */
88struct gve_tx_iovec {
89 u32 iov_offset; /* offset into this segment */
90 u32 iov_len; /* length */
91 u32 iov_padding; /* padding associated with this segment */
92};
93
94/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
95 * ring entry but only used for a pkt_desc not a seg_desc
96 */
97struct gve_tx_buffer_state {
98 struct sk_buff *skb; /* skb for this pkt */
99 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
100};
101
102/* A TX buffer - each queue has one */
103struct gve_tx_fifo {
104 void *base; /* address of base of FIFO */
105 u32 size; /* total size */
106 atomic_t available; /* how much space is still available */
107 u32 head; /* offset to write at */
108 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
109};
110
111/* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
112struct gve_tx_ring {
113 /* Cacheline 0 -- Accessed & dirtied during transmit */
114 struct gve_tx_fifo tx_fifo;
115 u32 req; /* driver tracked head pointer */
116 u32 done; /* driver tracked tail pointer */
117
118 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
119 __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
120 u64 pkt_done; /* free-running - total packets completed */
121 u64 bytes_done; /* free-running - total bytes completed */
122
123 /* Cacheline 2 -- Read-mostly fields */
124 union gve_tx_desc *desc ____cacheline_aligned;
125 struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
126 struct netdev_queue *netdev_txq;
127 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
128 u32 mask; /* masks req and done down to queue size */
129
130 /* Slow-path fields */
131 u32 q_num ____cacheline_aligned; /* queue idx */
132 u32 stop_queue; /* count of queue stops */
133 u32 wake_queue; /* count of queue wakes */
134 u32 ntfy_id; /* notification block index */
135 dma_addr_t bus; /* dma address of the descr ring */
136 dma_addr_t q_resources_bus; /* dma address of the queue resources */
137 struct u64_stats_sync statss; /* sync stats for 32bit archs */
138} ____cacheline_aligned;
139
140/* Wraps the info for one irq including the napi struct and the queues
141 * associated with that irq.
142 */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700143struct gve_notify_block {
144 __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
145 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
146 struct napi_struct napi; /* kernel napi struct for this block */
147 struct gve_priv *priv;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700148 struct gve_tx_ring *tx; /* tx rings on this block */
149 struct gve_rx_ring *rx; /* rx rings on this block */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700150} ____cacheline_aligned;
151
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700152/* Tracks allowed and current queue settings */
153struct gve_queue_config {
154 u16 max_queues;
155 u16 num_queues; /* current */
156};
157
158/* Tracks the available and used qpl IDs */
159struct gve_qpl_config {
160 u32 qpl_map_size; /* map memory size */
161 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
162};
163
Catherine Sullivan893ce442019-07-01 15:57:52 -0700164struct gve_priv {
165 struct net_device *dev;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700166 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
167 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
168 struct gve_queue_page_list *qpls; /* array of num qpls */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700169 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
170 dma_addr_t ntfy_block_bus;
171 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
172 char mgmt_msix_name[IFNAMSIZ + 16];
173 u32 mgmt_msix_idx;
174 __be32 *counter_array; /* array of num_event_counters */
175 dma_addr_t counter_array_bus;
176
177 u16 num_event_counters;
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700178 u16 tx_desc_cnt; /* num desc per ring */
179 u16 rx_desc_cnt; /* num desc per ring */
180 u16 tx_pages_per_qpl; /* tx buffer length */
181 u16 rx_pages_per_qpl; /* rx buffer length */
182 u64 max_registered_pages;
183 u64 num_registered_pages; /* num pages registered with NIC */
184 u32 rx_copybreak; /* copy packets smaller than this */
185 u16 default_num_queues; /* default num queues to set up */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700186
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700187 struct gve_queue_config tx_cfg;
188 struct gve_queue_config rx_cfg;
189 struct gve_qpl_config qpl_cfg; /* map used QPL ids */
Catherine Sullivan893ce442019-07-01 15:57:52 -0700190 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
191
192 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
193 __be32 __iomem *db_bar2; /* "array" of doorbells */
194 u32 msg_enable; /* level for netif* netdev print macros */
195 struct pci_dev *pdev;
196
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700197 /* metrics */
198 u32 tx_timeo_cnt;
199
Catherine Sullivan893ce442019-07-01 15:57:52 -0700200 /* Admin queue - see gve_adminq.h*/
201 union gve_adminq_command *adminq;
202 dma_addr_t adminq_bus_addr;
203 u32 adminq_mask; /* masks prod_cnt to adminq size */
204 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
205
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700206 struct workqueue_struct *gve_wq;
207 struct work_struct service_task;
208 unsigned long service_task_flags;
Catherine Sullivan893ce442019-07-01 15:57:52 -0700209 unsigned long state_flags;
210};
211
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700212enum gve_service_task_flags {
213 GVE_PRIV_FLAGS_DO_RESET = BIT(1),
214 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = BIT(2),
215 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = BIT(3),
216};
217
Catherine Sullivan893ce442019-07-01 15:57:52 -0700218enum gve_state_flags {
219 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = BIT(1),
220 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = BIT(2),
221 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = BIT(3),
222 GVE_PRIV_FLAGS_NAPI_ENABLED = BIT(4),
223};
224
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700225static inline bool gve_get_do_reset(struct gve_priv *priv)
226{
227 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
228}
229
230static inline void gve_set_do_reset(struct gve_priv *priv)
231{
232 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
233}
234
235static inline void gve_clear_do_reset(struct gve_priv *priv)
236{
237 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
238}
239
240static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
241{
242 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
243 &priv->service_task_flags);
244}
245
246static inline void gve_set_reset_in_progress(struct gve_priv *priv)
247{
248 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
249}
250
251static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
252{
253 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
254}
255
256static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
257{
258 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
259 &priv->service_task_flags);
260}
261
262static inline void gve_set_probe_in_progress(struct gve_priv *priv)
263{
264 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
265}
266
267static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
268{
269 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
270}
271
Catherine Sullivan893ce442019-07-01 15:57:52 -0700272static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
273{
274 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
275}
276
277static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
278{
279 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
280}
281
282static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
283{
284 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
285}
286
287static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
288{
289 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
290}
291
292static inline void gve_set_device_resources_ok(struct gve_priv *priv)
293{
294 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
295}
296
297static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
298{
299 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
300}
301
302static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
303{
304 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
305}
306
307static inline void gve_set_device_rings_ok(struct gve_priv *priv)
308{
309 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
310}
311
312static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
313{
314 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
315}
316
317static inline bool gve_get_napi_enabled(struct gve_priv *priv)
318{
319 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
320}
321
322static inline void gve_set_napi_enabled(struct gve_priv *priv)
323{
324 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
325}
326
327static inline void gve_clear_napi_enabled(struct gve_priv *priv)
328{
329 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
330}
331
332/* Returns the address of the ntfy_blocks irq doorbell
333 */
334static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
335 struct gve_notify_block *block)
336{
337 return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
338}
Catherine Sullivanf5cedc82019-07-01 15:57:53 -0700339
340/* Returns the index into ntfy_blocks of the given tx ring's block
341 */
342static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
343{
344 return queue_idx;
345}
346
347/* Returns the index into ntfy_blocks of the given rx ring's block
348 */
349static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
350{
351 return (priv->num_ntfy_blks / 2) + queue_idx;
352}
353
354/* Returns the number of tx queue page lists
355 */
356static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
357{
358 return priv->tx_cfg.num_queues;
359}
360
361/* Returns the number of rx queue page lists
362 */
363static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
364{
365 return priv->rx_cfg.num_queues;
366}
367
368/* Returns a pointer to the next available tx qpl in the list of qpls
369 */
370static inline
371struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
372{
373 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
374 priv->qpl_cfg.qpl_map_size);
375
376 /* we are out of tx qpls */
377 if (id >= gve_num_tx_qpls(priv))
378 return NULL;
379
380 set_bit(id, priv->qpl_cfg.qpl_id_map);
381 return &priv->qpls[id];
382}
383
384/* Returns a pointer to the next available rx qpl in the list of qpls
385 */
386static inline
387struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
388{
389 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
390 priv->qpl_cfg.qpl_map_size,
391 gve_num_tx_qpls(priv));
392
393 /* we are out of rx qpls */
394 if (id == priv->qpl_cfg.qpl_map_size)
395 return NULL;
396
397 set_bit(id, priv->qpl_cfg.qpl_id_map);
398 return &priv->qpls[id];
399}
400
401/* Unassigns the qpl with the given id
402 */
403static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
404{
405 clear_bit(id, priv->qpl_cfg.qpl_id_map);
406}
407
408/* Returns the correct dma direction for tx and rx qpls
409 */
410static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
411 int id)
412{
413 if (id < gve_num_tx_qpls(priv))
414 return DMA_TO_DEVICE;
415 else
416 return DMA_FROM_DEVICE;
417}
418
419/* Returns true if the max mtu allows page recycling */
420static inline bool gve_can_recycle_pages(struct net_device *dev)
421{
422 /* We can't recycle the pages if we can't fit a packet into half a
423 * page.
424 */
425 return dev->max_mtu <= PAGE_SIZE / 2;
426}
427
428/* buffers */
429int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
430 enum dma_data_direction);
431void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
432 enum dma_data_direction);
433/* tx handling */
434netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
435bool gve_tx_poll(struct gve_notify_block *block, int budget);
436int gve_tx_alloc_rings(struct gve_priv *priv);
437void gve_tx_free_rings(struct gve_priv *priv);
438__be32 gve_tx_load_event_counter(struct gve_priv *priv,
439 struct gve_tx_ring *tx);
440/* rx handling */
441void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
442bool gve_rx_poll(struct gve_notify_block *block, int budget);
443int gve_rx_alloc_rings(struct gve_priv *priv);
444void gve_rx_free_rings(struct gve_priv *priv);
445bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
446 netdev_features_t feat);
Catherine Sullivan9e5f7d22019-07-01 15:57:54 -0700447/* Reset */
448void gve_schedule_reset(struct gve_priv *priv);
449int gve_reset(struct gve_priv *priv, bool attempt_teardown);
450int gve_adjust_queues(struct gve_priv *priv,
451 struct gve_queue_config new_rx_config,
452 struct gve_queue_config new_tx_config);
Catherine Sullivane5b845dc2019-07-01 15:57:55 -0700453/* exported by ethtool.c */
454extern const struct ethtool_ops gve_ethtool_ops;
455/* needed by ethtool */
456extern const char gve_version_str[];
Catherine Sullivan893ce442019-07-01 15:57:52 -0700457#endif /* _GVE_H_ */