Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: (GPL-2.0 OR MIT) |
| 2 | * Google virtual Ethernet (gve) driver |
| 3 | * |
Bailey Forrest | 920fb45 | 2021-06-24 11:06:20 -0700 | [diff] [blame] | 4 | * Copyright (C) 2015-2021 Google, Inc. |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #ifndef _GVE_H_ |
| 8 | #define _GVE_H_ |
| 9 | |
| 10 | #include <linux/dma-mapping.h> |
| 11 | #include <linux/netdevice.h> |
| 12 | #include <linux/pci.h> |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 13 | #include <linux/u64_stats_sync.h> |
Bailey Forrest | c4b87ac | 2021-06-24 11:06:24 -0700 | [diff] [blame] | 14 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 15 | #include "gve_desc.h" |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 16 | #include "gve_desc_dqo.h" |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 17 | |
| 18 | #ifndef PCI_VENDOR_ID_GOOGLE |
| 19 | #define PCI_VENDOR_ID_GOOGLE 0x1ae0 |
| 20 | #endif |
| 21 | |
| 22 | #define PCI_DEV_ID_GVNIC 0x0042 |
| 23 | |
| 24 | #define GVE_REGISTER_BAR 0 |
| 25 | #define GVE_DOORBELL_BAR 2 |
| 26 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 27 | /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ |
| 28 | #define GVE_TX_MAX_IOVEC 4 |
| 29 | /* 1 for management, 1 for rx, 1 for tx */ |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 30 | #define GVE_MIN_MSIX 3 |
| 31 | |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 32 | /* Numbers of gve tx/rx stats in stats report. */ |
John Fraker | 87a7f32 | 2021-10-11 08:36:47 -0700 | [diff] [blame] | 33 | #define GVE_TX_STATS_REPORT_NUM 6 |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 34 | #define GVE_RX_STATS_REPORT_NUM 2 |
| 35 | |
| 36 | /* Interval to schedule a stats report update, 20000ms. */ |
| 37 | #define GVE_STATS_REPORT_TIMER_PERIOD 20000 |
| 38 | |
David Awogbemila | 2f523dc | 2020-09-11 10:38:48 -0700 | [diff] [blame] | 39 | /* Numbers of NIC tx/rx stats in stats report. */ |
| 40 | #define NIC_TX_STATS_REPORT_NUM 0 |
| 41 | #define NIC_RX_STATS_REPORT_NUM 4 |
| 42 | |
Catherine Sullivan | ede3fcf | 2020-12-07 14:45:24 -0800 | [diff] [blame] | 43 | #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) |
| 44 | |
Bailey Forrest | c4b87ac | 2021-06-24 11:06:24 -0700 | [diff] [blame] | 45 | /* PTYPEs are always 10 bits. */ |
| 46 | #define GVE_NUM_PTYPES 1024 |
| 47 | |
Bailey Forrest | 5e8c5ad | 2021-06-24 11:06:28 -0700 | [diff] [blame] | 48 | #define GVE_RX_BUFFER_SIZE_DQO 2048 |
| 49 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 50 | /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ |
| 51 | struct gve_rx_desc_queue { |
| 52 | struct gve_rx_desc *desc_ring; /* the descriptor ring */ |
| 53 | dma_addr_t bus; /* the bus for the desc_ring */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 54 | u8 seqno; /* the next expected seqno for this desc*/ |
| 55 | }; |
| 56 | |
| 57 | /* The page info for a single slot in the RX data queue */ |
| 58 | struct gve_rx_slot_page_info { |
| 59 | struct page *page; |
| 60 | void *page_address; |
Bailey Forrest | 920fb45 | 2021-06-24 11:06:20 -0700 | [diff] [blame] | 61 | u32 page_offset; /* offset to write to in page */ |
Bailey Forrest | 9b8dd5e | 2021-06-24 11:06:32 -0700 | [diff] [blame] | 62 | int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ |
David Awogbemila | 02b0e0c | 2020-12-07 14:45:25 -0800 | [diff] [blame] | 63 | u8 can_flip; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 64 | }; |
| 65 | |
| 66 | /* A list of pages registered with the device during setup and used by a queue |
| 67 | * as buffers |
| 68 | */ |
| 69 | struct gve_queue_page_list { |
| 70 | u32 id; /* unique id */ |
| 71 | u32 num_entries; |
| 72 | struct page **pages; /* list of num_entries pages */ |
| 73 | dma_addr_t *page_buses; /* the dma addrs of the pages */ |
| 74 | }; |
| 75 | |
| 76 | /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ |
| 77 | struct gve_rx_data_queue { |
Catherine Sullivan | ede3fcf | 2020-12-07 14:45:24 -0800 | [diff] [blame] | 78 | union gve_rx_data_slot *data_ring; /* read by NIC */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 79 | dma_addr_t data_bus; /* dma mapping of the slots */ |
| 80 | struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ |
| 81 | struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ |
Catherine Sullivan | ede3fcf | 2020-12-07 14:45:24 -0800 | [diff] [blame] | 82 | u8 raw_addressing; /* use raw_addressing? */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 83 | }; |
| 84 | |
| 85 | struct gve_priv; |
| 86 | |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 87 | /* RX buffer queue for posting buffers to HW. |
| 88 | * Each RX (completion) queue has a corresponding buffer queue. |
| 89 | */ |
| 90 | struct gve_rx_buf_queue_dqo { |
| 91 | struct gve_rx_desc_dqo *desc_ring; |
| 92 | dma_addr_t bus; |
| 93 | u32 head; /* Pointer to start cleaning buffers at. */ |
| 94 | u32 tail; /* Last posted buffer index + 1 */ |
| 95 | u32 mask; /* Mask for indices to the size of the ring */ |
| 96 | }; |
| 97 | |
| 98 | /* RX completion queue to receive packets from HW. */ |
| 99 | struct gve_rx_compl_queue_dqo { |
| 100 | struct gve_rx_compl_desc_dqo *desc_ring; |
| 101 | dma_addr_t bus; |
| 102 | |
| 103 | /* Number of slots which did not have a buffer posted yet. We should not |
| 104 | * post more buffers than the queue size to avoid HW overrunning the |
| 105 | * queue. |
| 106 | */ |
| 107 | int num_free_slots; |
| 108 | |
| 109 | /* HW uses a "generation bit" to notify SW of new descriptors. When a |
| 110 | * descriptor's generation bit is different from the current generation, |
| 111 | * that descriptor is ready to be consumed by SW. |
| 112 | */ |
| 113 | u8 cur_gen_bit; |
| 114 | |
| 115 | /* Pointer into desc_ring where the next completion descriptor will be |
| 116 | * received. |
| 117 | */ |
| 118 | u32 head; |
| 119 | u32 mask; /* Mask for indices to the size of the ring */ |
| 120 | }; |
| 121 | |
| 122 | /* Stores state for tracking buffers posted to HW */ |
| 123 | struct gve_rx_buf_state_dqo { |
| 124 | /* The page posted to HW. */ |
| 125 | struct gve_rx_slot_page_info page_info; |
| 126 | |
| 127 | /* The DMA address corresponding to `page_info`. */ |
| 128 | dma_addr_t addr; |
| 129 | |
| 130 | /* Last offset into the page when it only had a single reference, at |
| 131 | * which point every other offset is free to be reused. |
| 132 | */ |
| 133 | u32 last_single_ref_offset; |
| 134 | |
| 135 | /* Linked list index to next element in the list, or -1 if none */ |
| 136 | s16 next; |
| 137 | }; |
| 138 | |
| 139 | /* `head` and `tail` are indices into an array, or -1 if empty. */ |
| 140 | struct gve_index_list { |
| 141 | s16 head; |
| 142 | s16 tail; |
| 143 | }; |
| 144 | |
David Awogbemila | 1344e75 | 2021-10-24 11:42:36 -0700 | [diff] [blame] | 145 | /* A single received packet split across multiple buffers may be |
| 146 | * reconstructed using the information in this structure. |
| 147 | */ |
| 148 | struct gve_rx_ctx { |
| 149 | /* head and tail of skb chain for the current packet or NULL if none */ |
| 150 | struct sk_buff *skb_head; |
| 151 | struct sk_buff *skb_tail; |
David Awogbemila | 37149e9 | 2021-10-24 11:42:37 -0700 | [diff] [blame] | 152 | u16 total_expected_size; |
| 153 | u8 expected_frag_cnt; |
| 154 | u8 curr_frag_cnt; |
| 155 | u8 reuse_frags; |
David Awogbemila | 1344e75 | 2021-10-24 11:42:36 -0700 | [diff] [blame] | 156 | }; |
| 157 | |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 158 | /* Contains datapath state used to represent an RX queue. */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 159 | struct gve_rx_ring { |
| 160 | struct gve_priv *gve; |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 161 | union { |
| 162 | /* GQI fields */ |
| 163 | struct { |
| 164 | struct gve_rx_desc_queue desc; |
| 165 | struct gve_rx_data_queue data; |
| 166 | |
| 167 | /* threshold for posting new buffs and descs */ |
| 168 | u32 db_threshold; |
David Awogbemila | 37149e9 | 2021-10-24 11:42:37 -0700 | [diff] [blame] | 169 | u16 packet_buffer_size; |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 170 | }; |
| 171 | |
| 172 | /* DQO fields. */ |
| 173 | struct { |
| 174 | struct gve_rx_buf_queue_dqo bufq; |
| 175 | struct gve_rx_compl_queue_dqo complq; |
| 176 | |
| 177 | struct gve_rx_buf_state_dqo *buf_states; |
| 178 | u16 num_buf_states; |
| 179 | |
| 180 | /* Linked list of gve_rx_buf_state_dqo. Index into |
| 181 | * buf_states, or -1 if empty. |
| 182 | */ |
| 183 | s16 free_buf_states; |
| 184 | |
| 185 | /* Linked list of gve_rx_buf_state_dqo. Indexes into |
| 186 | * buf_states, or -1 if empty. |
| 187 | * |
| 188 | * This list contains buf_states which are pointing to |
| 189 | * valid buffers. |
| 190 | * |
| 191 | * We use a FIFO here in order to increase the |
| 192 | * probability that buffers can be reused by increasing |
| 193 | * the time between usages. |
| 194 | */ |
| 195 | struct gve_index_list recycled_buf_states; |
| 196 | |
| 197 | /* Linked list of gve_rx_buf_state_dqo. Indexes into |
| 198 | * buf_states, or -1 if empty. |
| 199 | * |
| 200 | * This list contains buf_states which have buffers |
| 201 | * which cannot be reused yet. |
| 202 | */ |
| 203 | struct gve_index_list used_buf_states; |
| 204 | } dqo; |
| 205 | }; |
| 206 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 207 | u64 rbytes; /* free-running bytes received */ |
| 208 | u64 rpackets; /* free-running packets received */ |
Catherine Sullivan | 438b43b | 2019-08-01 16:07:31 -0700 | [diff] [blame] | 209 | u32 cnt; /* free-running total number of completed packets */ |
| 210 | u32 fill_cnt; /* free-running total number of descs and buffs posted */ |
| 211 | u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ |
Kuo Zhao | 433e274 | 2020-09-11 10:38:45 -0700 | [diff] [blame] | 212 | u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ |
| 213 | u64 rx_copied_pkt; /* free-running total number of copied packets */ |
| 214 | u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ |
| 215 | u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ |
| 216 | u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ |
David Awogbemila | 37149e9 | 2021-10-24 11:42:37 -0700 | [diff] [blame] | 217 | u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ |
| 218 | u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ |
| 219 | u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 220 | u32 q_num; /* queue index */ |
| 221 | u32 ntfy_id; /* notification block index */ |
| 222 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ |
| 223 | dma_addr_t q_resources_bus; /* dma address for the queue resources */ |
| 224 | struct u64_stats_sync statss; /* sync stats for 32bit archs */ |
Bailey Forrest | 9c1a59a | 2021-06-24 11:06:29 -0700 | [diff] [blame] | 225 | |
David Awogbemila | 1344e75 | 2021-10-24 11:42:36 -0700 | [diff] [blame] | 226 | struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 227 | }; |
| 228 | |
| 229 | /* A TX desc ring entry */ |
| 230 | union gve_tx_desc { |
| 231 | struct gve_tx_pkt_desc pkt; /* first desc for a packet */ |
| 232 | struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ |
| 233 | }; |
| 234 | |
| 235 | /* Tracks the memory in the fifo occupied by a segment of a packet */ |
| 236 | struct gve_tx_iovec { |
| 237 | u32 iov_offset; /* offset into this segment */ |
| 238 | u32 iov_len; /* length */ |
| 239 | u32 iov_padding; /* padding associated with this segment */ |
| 240 | }; |
| 241 | |
| 242 | /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc |
| 243 | * ring entry but only used for a pkt_desc not a seg_desc |
| 244 | */ |
| 245 | struct gve_tx_buffer_state { |
| 246 | struct sk_buff *skb; /* skb for this pkt */ |
Catherine Sullivan | 6f007c6 | 2020-12-07 14:45:26 -0800 | [diff] [blame] | 247 | union { |
| 248 | struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ |
Arnd Bergmann | 1e0083b | 2021-09-28 16:15:13 +0200 | [diff] [blame] | 249 | struct { |
| 250 | DEFINE_DMA_UNMAP_ADDR(dma); |
| 251 | DEFINE_DMA_UNMAP_LEN(len); |
| 252 | }; |
Catherine Sullivan | 6f007c6 | 2020-12-07 14:45:26 -0800 | [diff] [blame] | 253 | }; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 254 | }; |
| 255 | |
| 256 | /* A TX buffer - each queue has one */ |
| 257 | struct gve_tx_fifo { |
| 258 | void *base; /* address of base of FIFO */ |
| 259 | u32 size; /* total size */ |
| 260 | atomic_t available; /* how much space is still available */ |
| 261 | u32 head; /* offset to write at */ |
| 262 | struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ |
| 263 | }; |
| 264 | |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 265 | /* TX descriptor for DQO format */ |
| 266 | union gve_tx_desc_dqo { |
| 267 | struct gve_tx_pkt_desc_dqo pkt; |
| 268 | struct gve_tx_tso_context_desc_dqo tso_ctx; |
| 269 | struct gve_tx_general_context_desc_dqo general_ctx; |
| 270 | }; |
| 271 | |
| 272 | enum gve_packet_state { |
| 273 | /* Packet is in free list, available to be allocated. |
| 274 | * This should always be zero since state is not explicitly initialized. |
| 275 | */ |
| 276 | GVE_PACKET_STATE_UNALLOCATED, |
| 277 | /* Packet is expecting a regular data completion or miss completion */ |
| 278 | GVE_PACKET_STATE_PENDING_DATA_COMPL, |
| 279 | /* Packet has received a miss completion and is expecting a |
| 280 | * re-injection completion. |
| 281 | */ |
| 282 | GVE_PACKET_STATE_PENDING_REINJECT_COMPL, |
| 283 | /* No valid completion received within the specified timeout. */ |
| 284 | GVE_PACKET_STATE_TIMED_OUT_COMPL, |
| 285 | }; |
| 286 | |
| 287 | struct gve_tx_pending_packet_dqo { |
| 288 | struct sk_buff *skb; /* skb for this packet */ |
| 289 | |
| 290 | /* 0th element corresponds to the linear portion of `skb`, should be |
| 291 | * unmapped with `dma_unmap_single`. |
| 292 | * |
| 293 | * All others correspond to `skb`'s frags and should be unmapped with |
| 294 | * `dma_unmap_page`. |
| 295 | */ |
Arnd Bergmann | 1e0083b | 2021-09-28 16:15:13 +0200 | [diff] [blame] | 296 | DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
| 297 | DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 298 | u16 num_bufs; |
| 299 | |
| 300 | /* Linked list index to next element in the list, or -1 if none */ |
| 301 | s16 next; |
| 302 | |
| 303 | /* Linked list index to prev element in the list, or -1 if none. |
| 304 | * Used for tracking either outstanding miss completions or prematurely |
| 305 | * freed packets. |
| 306 | */ |
| 307 | s16 prev; |
| 308 | |
| 309 | /* Identifies the current state of the packet as defined in |
| 310 | * `enum gve_packet_state`. |
| 311 | */ |
| 312 | u8 state; |
| 313 | |
| 314 | /* If packet is an outstanding miss completion, then the packet is |
| 315 | * freed if the corresponding re-injection completion is not received |
| 316 | * before kernel jiffies exceeds timeout_jiffies. |
| 317 | */ |
| 318 | unsigned long timeout_jiffies; |
| 319 | }; |
| 320 | |
| 321 | /* Contains datapath state used to represent a TX queue. */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 322 | struct gve_tx_ring { |
| 323 | /* Cacheline 0 -- Accessed & dirtied during transmit */ |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 324 | union { |
| 325 | /* GQI fields */ |
| 326 | struct { |
| 327 | struct gve_tx_fifo tx_fifo; |
| 328 | u32 req; /* driver tracked head pointer */ |
| 329 | u32 done; /* driver tracked tail pointer */ |
| 330 | }; |
| 331 | |
| 332 | /* DQO fields. */ |
| 333 | struct { |
| 334 | /* Linked list of gve_tx_pending_packet_dqo. Index into |
| 335 | * pending_packets, or -1 if empty. |
| 336 | * |
| 337 | * This is a consumer list owned by the TX path. When it |
| 338 | * runs out, the producer list is stolen from the |
| 339 | * completion handling path |
| 340 | * (dqo_compl.free_pending_packets). |
| 341 | */ |
| 342 | s16 free_pending_packets; |
| 343 | |
| 344 | /* Cached value of `dqo_compl.hw_tx_head` */ |
| 345 | u32 head; |
| 346 | u32 tail; /* Last posted buffer index + 1 */ |
| 347 | |
| 348 | /* Index of the last descriptor with "report event" bit |
| 349 | * set. |
| 350 | */ |
| 351 | u32 last_re_idx; |
| 352 | } dqo_tx; |
| 353 | }; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 354 | |
| 355 | /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 356 | union { |
| 357 | /* GQI fields */ |
| 358 | struct { |
Tao Liu | 61d72c7e | 2021-10-11 08:36:46 -0700 | [diff] [blame] | 359 | /* Spinlock for when cleanup in progress */ |
| 360 | spinlock_t clean_lock; |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 361 | }; |
| 362 | |
| 363 | /* DQO fields. */ |
| 364 | struct { |
| 365 | u32 head; /* Last read on compl_desc */ |
| 366 | |
| 367 | /* Tracks the current gen bit of compl_q */ |
| 368 | u8 cur_gen_bit; |
| 369 | |
| 370 | /* Linked list of gve_tx_pending_packet_dqo. Index into |
| 371 | * pending_packets, or -1 if empty. |
| 372 | * |
| 373 | * This is the producer list, owned by the completion |
| 374 | * handling path. When the consumer list |
| 375 | * (dqo_tx.free_pending_packets) is runs out, this list |
| 376 | * will be stolen. |
| 377 | */ |
| 378 | atomic_t free_pending_packets; |
| 379 | |
| 380 | /* Last TX ring index fetched by HW */ |
| 381 | atomic_t hw_tx_head; |
| 382 | |
| 383 | /* List to track pending packets which received a miss |
| 384 | * completion but not a corresponding reinjection. |
| 385 | */ |
| 386 | struct gve_index_list miss_completions; |
| 387 | |
| 388 | /* List to track pending packets that were completed |
| 389 | * before receiving a valid completion because they |
| 390 | * reached a specified timeout. |
| 391 | */ |
| 392 | struct gve_index_list timed_out_completions; |
| 393 | } dqo_compl; |
| 394 | } ____cacheline_aligned; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 395 | u64 pkt_done; /* free-running - total packets completed */ |
| 396 | u64 bytes_done; /* free-running - total bytes completed */ |
Catherine Sullivan | 6f007c6 | 2020-12-07 14:45:26 -0800 | [diff] [blame] | 397 | u64 dropped_pkt; /* free-running - total packets dropped */ |
| 398 | u64 dma_mapping_error; /* count of dma mapping errors */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 399 | |
| 400 | /* Cacheline 2 -- Read-mostly fields */ |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 401 | union { |
| 402 | /* GQI fields */ |
| 403 | struct { |
| 404 | union gve_tx_desc *desc; |
| 405 | |
| 406 | /* Maps 1:1 to a desc */ |
| 407 | struct gve_tx_buffer_state *info; |
| 408 | }; |
| 409 | |
| 410 | /* DQO fields. */ |
| 411 | struct { |
| 412 | union gve_tx_desc_dqo *tx_ring; |
| 413 | struct gve_tx_compl_desc *compl_ring; |
| 414 | |
| 415 | struct gve_tx_pending_packet_dqo *pending_packets; |
| 416 | s16 num_pending_packets; |
| 417 | |
| 418 | u32 complq_mask; /* complq size is complq_mask + 1 */ |
| 419 | } dqo; |
| 420 | } ____cacheline_aligned; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 421 | struct netdev_queue *netdev_txq; |
| 422 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ |
Catherine Sullivan | 6f007c6 | 2020-12-07 14:45:26 -0800 | [diff] [blame] | 423 | struct device *dev; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 424 | u32 mask; /* masks req and done down to queue size */ |
Catherine Sullivan | 6f007c6 | 2020-12-07 14:45:26 -0800 | [diff] [blame] | 425 | u8 raw_addressing; /* use raw_addressing? */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 426 | |
| 427 | /* Slow-path fields */ |
| 428 | u32 q_num ____cacheline_aligned; /* queue idx */ |
| 429 | u32 stop_queue; /* count of queue stops */ |
| 430 | u32 wake_queue; /* count of queue wakes */ |
John Fraker | 87a7f32 | 2021-10-11 08:36:47 -0700 | [diff] [blame] | 431 | u32 queue_timeout; /* count of queue timeouts */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 432 | u32 ntfy_id; /* notification block index */ |
John Fraker | 87a7f32 | 2021-10-11 08:36:47 -0700 | [diff] [blame] | 433 | u32 last_kick_msec; /* Last time the queue was kicked */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 434 | dma_addr_t bus; /* dma address of the descr ring */ |
| 435 | dma_addr_t q_resources_bus; /* dma address of the queue resources */ |
Bailey Forrest | a4aa1f1 | 2021-06-24 11:06:26 -0700 | [diff] [blame] | 436 | dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 437 | struct u64_stats_sync statss; /* sync stats for 32bit archs */ |
| 438 | } ____cacheline_aligned; |
| 439 | |
| 440 | /* Wraps the info for one irq including the napi struct and the queues |
| 441 | * associated with that irq. |
| 442 | */ |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 443 | struct gve_notify_block { |
Catherine Sullivan | d30baac | 2021-12-15 16:46:46 -0800 | [diff] [blame^] | 444 | __be32 *irq_db_index; /* pointer to idx into Bar2 */ |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 445 | char name[IFNAMSIZ + 16]; /* name registered with the kernel */ |
| 446 | struct napi_struct napi; /* kernel napi struct for this block */ |
| 447 | struct gve_priv *priv; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 448 | struct gve_tx_ring *tx; /* tx rings on this block */ |
| 449 | struct gve_rx_ring *rx; /* rx rings on this block */ |
Catherine Sullivan | d30baac | 2021-12-15 16:46:46 -0800 | [diff] [blame^] | 450 | }; |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 451 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 452 | /* Tracks allowed and current queue settings */ |
| 453 | struct gve_queue_config { |
| 454 | u16 max_queues; |
| 455 | u16 num_queues; /* current */ |
| 456 | }; |
| 457 | |
| 458 | /* Tracks the available and used qpl IDs */ |
| 459 | struct gve_qpl_config { |
| 460 | u32 qpl_map_size; /* map memory size */ |
| 461 | unsigned long *qpl_id_map; /* bitmap of used qpl ids */ |
| 462 | }; |
| 463 | |
Bailey Forrest | 5ca2265 | 2021-06-24 11:06:23 -0700 | [diff] [blame] | 464 | struct gve_options_dqo_rda { |
| 465 | u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ |
| 466 | u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ |
| 467 | }; |
| 468 | |
Catherine Sullivan | d30baac | 2021-12-15 16:46:46 -0800 | [diff] [blame^] | 469 | struct gve_irq_db { |
| 470 | __be32 index; |
| 471 | } ____cacheline_aligned; |
| 472 | |
Bailey Forrest | c4b87ac | 2021-06-24 11:06:24 -0700 | [diff] [blame] | 473 | struct gve_ptype { |
| 474 | u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ |
| 475 | u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ |
| 476 | }; |
| 477 | |
| 478 | struct gve_ptype_lut { |
| 479 | struct gve_ptype ptypes[GVE_NUM_PTYPES]; |
| 480 | }; |
| 481 | |
Bailey Forrest | a5886ef | 2021-06-24 11:06:22 -0700 | [diff] [blame] | 482 | /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value |
| 483 | * when the entire configure_device_resources command is zeroed out and the |
| 484 | * queue_format is not specified. |
| 485 | */ |
| 486 | enum gve_queue_format { |
| 487 | GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, |
| 488 | GVE_GQI_RDA_FORMAT = 0x1, |
| 489 | GVE_GQI_QPL_FORMAT = 0x2, |
| 490 | GVE_DQO_RDA_FORMAT = 0x3, |
| 491 | }; |
| 492 | |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 493 | struct gve_priv { |
| 494 | struct net_device *dev; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 495 | struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ |
| 496 | struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ |
| 497 | struct gve_queue_page_list *qpls; /* array of num qpls */ |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 498 | struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ |
Catherine Sullivan | d30baac | 2021-12-15 16:46:46 -0800 | [diff] [blame^] | 499 | struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ |
| 500 | dma_addr_t irq_db_indices_bus; |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 501 | struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ |
| 502 | char mgmt_msix_name[IFNAMSIZ + 16]; |
| 503 | u32 mgmt_msix_idx; |
| 504 | __be32 *counter_array; /* array of num_event_counters */ |
| 505 | dma_addr_t counter_array_bus; |
| 506 | |
| 507 | u16 num_event_counters; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 508 | u16 tx_desc_cnt; /* num desc per ring */ |
| 509 | u16 rx_desc_cnt; /* num desc per ring */ |
| 510 | u16 tx_pages_per_qpl; /* tx buffer length */ |
Catherine Sullivan | ede3fcf | 2020-12-07 14:45:24 -0800 | [diff] [blame] | 511 | u16 rx_data_slot_cnt; /* rx buffer length */ |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 512 | u64 max_registered_pages; |
| 513 | u64 num_registered_pages; /* num pages registered with NIC */ |
| 514 | u32 rx_copybreak; /* copy packets smaller than this */ |
| 515 | u16 default_num_queues; /* default num queues to set up */ |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 516 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 517 | struct gve_queue_config tx_cfg; |
| 518 | struct gve_queue_config rx_cfg; |
| 519 | struct gve_qpl_config qpl_cfg; /* map used QPL ids */ |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 520 | u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ |
| 521 | |
| 522 | struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ |
| 523 | __be32 __iomem *db_bar2; /* "array" of doorbells */ |
| 524 | u32 msg_enable; /* level for netif* netdev print macros */ |
| 525 | struct pci_dev *pdev; |
| 526 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 527 | /* metrics */ |
| 528 | u32 tx_timeo_cnt; |
| 529 | |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 530 | /* Admin queue - see gve_adminq.h*/ |
| 531 | union gve_adminq_command *adminq; |
| 532 | dma_addr_t adminq_bus_addr; |
| 533 | u32 adminq_mask; /* masks prod_cnt to adminq size */ |
| 534 | u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ |
Kuo Zhao | 433e274 | 2020-09-11 10:38:45 -0700 | [diff] [blame] | 535 | u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ |
| 536 | u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ |
| 537 | /* free-running count of per AQ cmd executed */ |
| 538 | u32 adminq_describe_device_cnt; |
| 539 | u32 adminq_cfg_device_resources_cnt; |
| 540 | u32 adminq_register_page_list_cnt; |
| 541 | u32 adminq_unregister_page_list_cnt; |
| 542 | u32 adminq_create_tx_queue_cnt; |
| 543 | u32 adminq_create_rx_queue_cnt; |
| 544 | u32 adminq_destroy_tx_queue_cnt; |
| 545 | u32 adminq_destroy_rx_queue_cnt; |
| 546 | u32 adminq_dcfg_device_resources_cnt; |
| 547 | u32 adminq_set_driver_parameter_cnt; |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 548 | u32 adminq_report_stats_cnt; |
David Awogbemila | 7e074d5 | 2020-09-11 10:38:51 -0700 | [diff] [blame] | 549 | u32 adminq_report_link_speed_cnt; |
Bailey Forrest | c4b87ac | 2021-06-24 11:06:24 -0700 | [diff] [blame] | 550 | u32 adminq_get_ptype_map_cnt; |
Kuo Zhao | 433e274 | 2020-09-11 10:38:45 -0700 | [diff] [blame] | 551 | |
| 552 | /* Global stats */ |
| 553 | u32 interface_up_cnt; /* count of times interface turned up since last reset */ |
| 554 | u32 interface_down_cnt; /* count of times interface turned down since last reset */ |
| 555 | u32 reset_cnt; /* count of reset */ |
| 556 | u32 page_alloc_fail; /* count of page alloc fails */ |
| 557 | u32 dma_mapping_error; /* count of dma mapping errors */ |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 558 | u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ |
Catherine Sullivan | 9e5f7d2 | 2019-07-01 15:57:54 -0700 | [diff] [blame] | 559 | struct workqueue_struct *gve_wq; |
| 560 | struct work_struct service_task; |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 561 | struct work_struct stats_report_task; |
Catherine Sullivan | 9e5f7d2 | 2019-07-01 15:57:54 -0700 | [diff] [blame] | 562 | unsigned long service_task_flags; |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 563 | unsigned long state_flags; |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 564 | |
| 565 | struct gve_stats_report *stats_report; |
| 566 | u64 stats_report_len; |
| 567 | dma_addr_t stats_report_bus; /* dma address for the stats report */ |
| 568 | unsigned long ethtool_flags; |
| 569 | |
| 570 | unsigned long stats_report_timer_period; |
| 571 | struct timer_list stats_report_timer; |
| 572 | |
David Awogbemila | 7e074d5 | 2020-09-11 10:38:51 -0700 | [diff] [blame] | 573 | /* Gvnic device link speed from hypervisor. */ |
| 574 | u64 link_speed; |
Bailey Forrest | a5886ef | 2021-06-24 11:06:22 -0700 | [diff] [blame] | 575 | |
Bailey Forrest | 5ca2265 | 2021-06-24 11:06:23 -0700 | [diff] [blame] | 576 | struct gve_options_dqo_rda options_dqo_rda; |
Bailey Forrest | c4b87ac | 2021-06-24 11:06:24 -0700 | [diff] [blame] | 577 | struct gve_ptype_lut *ptype_lut_dqo; |
Bailey Forrest | 5ca2265 | 2021-06-24 11:06:23 -0700 | [diff] [blame] | 578 | |
Bailey Forrest | 1f6228e | 2021-06-24 11:06:27 -0700 | [diff] [blame] | 579 | /* Must be a power of two. */ |
| 580 | int data_buffer_size_dqo; |
| 581 | |
Bailey Forrest | a5886ef | 2021-06-24 11:06:22 -0700 | [diff] [blame] | 582 | enum gve_queue_format queue_format; |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 583 | }; |
| 584 | |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 585 | enum gve_service_task_flags_bit { |
| 586 | GVE_PRIV_FLAGS_DO_RESET = 1, |
| 587 | GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, |
| 588 | GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, |
| 589 | GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, |
Catherine Sullivan | 9e5f7d2 | 2019-07-01 15:57:54 -0700 | [diff] [blame] | 590 | }; |
| 591 | |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 592 | enum gve_state_flags_bit { |
| 593 | GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, |
| 594 | GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, |
| 595 | GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, |
| 596 | GVE_PRIV_FLAGS_NAPI_ENABLED = 4, |
| 597 | }; |
| 598 | |
| 599 | enum gve_ethtool_flags_bit { |
| 600 | GVE_PRIV_FLAGS_REPORT_STATS = 0, |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 601 | }; |
| 602 | |
Catherine Sullivan | 9e5f7d2 | 2019-07-01 15:57:54 -0700 | [diff] [blame] | 603 | static inline bool gve_get_do_reset(struct gve_priv *priv) |
| 604 | { |
| 605 | return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); |
| 606 | } |
| 607 | |
| 608 | static inline void gve_set_do_reset(struct gve_priv *priv) |
| 609 | { |
| 610 | set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); |
| 611 | } |
| 612 | |
| 613 | static inline void gve_clear_do_reset(struct gve_priv *priv) |
| 614 | { |
| 615 | clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); |
| 616 | } |
| 617 | |
| 618 | static inline bool gve_get_reset_in_progress(struct gve_priv *priv) |
| 619 | { |
| 620 | return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, |
| 621 | &priv->service_task_flags); |
| 622 | } |
| 623 | |
| 624 | static inline void gve_set_reset_in_progress(struct gve_priv *priv) |
| 625 | { |
| 626 | set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); |
| 627 | } |
| 628 | |
| 629 | static inline void gve_clear_reset_in_progress(struct gve_priv *priv) |
| 630 | { |
| 631 | clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); |
| 632 | } |
| 633 | |
| 634 | static inline bool gve_get_probe_in_progress(struct gve_priv *priv) |
| 635 | { |
| 636 | return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, |
| 637 | &priv->service_task_flags); |
| 638 | } |
| 639 | |
| 640 | static inline void gve_set_probe_in_progress(struct gve_priv *priv) |
| 641 | { |
| 642 | set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); |
| 643 | } |
| 644 | |
| 645 | static inline void gve_clear_probe_in_progress(struct gve_priv *priv) |
| 646 | { |
| 647 | clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); |
| 648 | } |
| 649 | |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 650 | static inline bool gve_get_do_report_stats(struct gve_priv *priv) |
| 651 | { |
| 652 | return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, |
| 653 | &priv->service_task_flags); |
| 654 | } |
| 655 | |
| 656 | static inline void gve_set_do_report_stats(struct gve_priv *priv) |
| 657 | { |
| 658 | set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); |
| 659 | } |
| 660 | |
| 661 | static inline void gve_clear_do_report_stats(struct gve_priv *priv) |
| 662 | { |
| 663 | clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); |
| 664 | } |
| 665 | |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 666 | static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) |
| 667 | { |
| 668 | return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); |
| 669 | } |
| 670 | |
| 671 | static inline void gve_set_admin_queue_ok(struct gve_priv *priv) |
| 672 | { |
| 673 | set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); |
| 674 | } |
| 675 | |
| 676 | static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) |
| 677 | { |
| 678 | clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); |
| 679 | } |
| 680 | |
| 681 | static inline bool gve_get_device_resources_ok(struct gve_priv *priv) |
| 682 | { |
| 683 | return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); |
| 684 | } |
| 685 | |
| 686 | static inline void gve_set_device_resources_ok(struct gve_priv *priv) |
| 687 | { |
| 688 | set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); |
| 689 | } |
| 690 | |
| 691 | static inline void gve_clear_device_resources_ok(struct gve_priv *priv) |
| 692 | { |
| 693 | clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); |
| 694 | } |
| 695 | |
| 696 | static inline bool gve_get_device_rings_ok(struct gve_priv *priv) |
| 697 | { |
| 698 | return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); |
| 699 | } |
| 700 | |
| 701 | static inline void gve_set_device_rings_ok(struct gve_priv *priv) |
| 702 | { |
| 703 | set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); |
| 704 | } |
| 705 | |
| 706 | static inline void gve_clear_device_rings_ok(struct gve_priv *priv) |
| 707 | { |
| 708 | clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); |
| 709 | } |
| 710 | |
| 711 | static inline bool gve_get_napi_enabled(struct gve_priv *priv) |
| 712 | { |
| 713 | return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); |
| 714 | } |
| 715 | |
| 716 | static inline void gve_set_napi_enabled(struct gve_priv *priv) |
| 717 | { |
| 718 | set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); |
| 719 | } |
| 720 | |
| 721 | static inline void gve_clear_napi_enabled(struct gve_priv *priv) |
| 722 | { |
| 723 | clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); |
| 724 | } |
| 725 | |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 726 | static inline bool gve_get_report_stats(struct gve_priv *priv) |
| 727 | { |
| 728 | return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); |
| 729 | } |
| 730 | |
| 731 | static inline void gve_clear_report_stats(struct gve_priv *priv) |
| 732 | { |
| 733 | clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); |
| 734 | } |
| 735 | |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 736 | /* Returns the address of the ntfy_blocks irq doorbell |
| 737 | */ |
| 738 | static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, |
| 739 | struct gve_notify_block *block) |
| 740 | { |
Catherine Sullivan | d30baac | 2021-12-15 16:46:46 -0800 | [diff] [blame^] | 741 | return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 742 | } |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 743 | |
| 744 | /* Returns the index into ntfy_blocks of the given tx ring's block |
| 745 | */ |
| 746 | static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) |
| 747 | { |
| 748 | return queue_idx; |
| 749 | } |
| 750 | |
| 751 | /* Returns the index into ntfy_blocks of the given rx ring's block |
| 752 | */ |
| 753 | static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) |
| 754 | { |
| 755 | return (priv->num_ntfy_blks / 2) + queue_idx; |
| 756 | } |
| 757 | |
| 758 | /* Returns the number of tx queue page lists |
| 759 | */ |
| 760 | static inline u32 gve_num_tx_qpls(struct gve_priv *priv) |
| 761 | { |
Bailey Forrest | a5886ef | 2021-06-24 11:06:22 -0700 | [diff] [blame] | 762 | if (priv->queue_format != GVE_GQI_QPL_FORMAT) |
| 763 | return 0; |
| 764 | |
| 765 | return priv->tx_cfg.num_queues; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 766 | } |
| 767 | |
| 768 | /* Returns the number of rx queue page lists |
| 769 | */ |
| 770 | static inline u32 gve_num_rx_qpls(struct gve_priv *priv) |
| 771 | { |
Bailey Forrest | a5886ef | 2021-06-24 11:06:22 -0700 | [diff] [blame] | 772 | if (priv->queue_format != GVE_GQI_QPL_FORMAT) |
| 773 | return 0; |
| 774 | |
| 775 | return priv->rx_cfg.num_queues; |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 776 | } |
| 777 | |
| 778 | /* Returns a pointer to the next available tx qpl in the list of qpls |
| 779 | */ |
| 780 | static inline |
| 781 | struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv) |
| 782 | { |
| 783 | int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map, |
| 784 | priv->qpl_cfg.qpl_map_size); |
| 785 | |
| 786 | /* we are out of tx qpls */ |
| 787 | if (id >= gve_num_tx_qpls(priv)) |
| 788 | return NULL; |
| 789 | |
| 790 | set_bit(id, priv->qpl_cfg.qpl_id_map); |
| 791 | return &priv->qpls[id]; |
| 792 | } |
| 793 | |
| 794 | /* Returns a pointer to the next available rx qpl in the list of qpls |
| 795 | */ |
| 796 | static inline |
| 797 | struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv) |
| 798 | { |
| 799 | int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map, |
| 800 | priv->qpl_cfg.qpl_map_size, |
| 801 | gve_num_tx_qpls(priv)); |
| 802 | |
| 803 | /* we are out of rx qpls */ |
Catherine Sullivan | d03477e | 2021-10-05 19:42:19 -0700 | [diff] [blame] | 804 | if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 805 | return NULL; |
| 806 | |
| 807 | set_bit(id, priv->qpl_cfg.qpl_id_map); |
| 808 | return &priv->qpls[id]; |
| 809 | } |
| 810 | |
| 811 | /* Unassigns the qpl with the given id |
| 812 | */ |
| 813 | static inline void gve_unassign_qpl(struct gve_priv *priv, int id) |
| 814 | { |
| 815 | clear_bit(id, priv->qpl_cfg.qpl_id_map); |
| 816 | } |
| 817 | |
| 818 | /* Returns the correct dma direction for tx and rx qpls |
| 819 | */ |
| 820 | static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, |
| 821 | int id) |
| 822 | { |
| 823 | if (id < gve_num_tx_qpls(priv)) |
| 824 | return DMA_TO_DEVICE; |
| 825 | else |
| 826 | return DMA_FROM_DEVICE; |
| 827 | } |
| 828 | |
Bailey Forrest | 5ca2265 | 2021-06-24 11:06:23 -0700 | [diff] [blame] | 829 | static inline bool gve_is_gqi(struct gve_priv *priv) |
| 830 | { |
| 831 | return priv->queue_format == GVE_GQI_RDA_FORMAT || |
| 832 | priv->queue_format == GVE_GQI_QPL_FORMAT; |
| 833 | } |
| 834 | |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 835 | /* buffers */ |
Kuo Zhao | 433e274 | 2020-09-11 10:38:45 -0700 | [diff] [blame] | 836 | int gve_alloc_page(struct gve_priv *priv, struct device *dev, |
| 837 | struct page **page, dma_addr_t *dma, |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 838 | enum dma_data_direction); |
| 839 | void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, |
| 840 | enum dma_data_direction); |
| 841 | /* tx handling */ |
| 842 | netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); |
| 843 | bool gve_tx_poll(struct gve_notify_block *block, int budget); |
| 844 | int gve_tx_alloc_rings(struct gve_priv *priv); |
Bailey Forrest | 9c1a59a | 2021-06-24 11:06:29 -0700 | [diff] [blame] | 845 | void gve_tx_free_rings_gqi(struct gve_priv *priv); |
Tao Liu | 61d72c7e | 2021-10-11 08:36:46 -0700 | [diff] [blame] | 846 | u32 gve_tx_load_event_counter(struct gve_priv *priv, |
| 847 | struct gve_tx_ring *tx); |
| 848 | bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 849 | /* rx handling */ |
| 850 | void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); |
Yangchun Fu | 2cb67ab | 2021-10-11 08:36:44 -0700 | [diff] [blame] | 851 | int gve_rx_poll(struct gve_notify_block *block, int budget); |
| 852 | bool gve_rx_work_pending(struct gve_rx_ring *rx); |
Catherine Sullivan | f5cedc8 | 2019-07-01 15:57:53 -0700 | [diff] [blame] | 853 | int gve_rx_alloc_rings(struct gve_priv *priv); |
Bailey Forrest | 9c1a59a | 2021-06-24 11:06:29 -0700 | [diff] [blame] | 854 | void gve_rx_free_rings_gqi(struct gve_priv *priv); |
Catherine Sullivan | 9e5f7d2 | 2019-07-01 15:57:54 -0700 | [diff] [blame] | 855 | /* Reset */ |
| 856 | void gve_schedule_reset(struct gve_priv *priv); |
| 857 | int gve_reset(struct gve_priv *priv, bool attempt_teardown); |
| 858 | int gve_adjust_queues(struct gve_priv *priv, |
| 859 | struct gve_queue_config new_rx_config, |
| 860 | struct gve_queue_config new_tx_config); |
Kuo Zhao | 24aeb56 | 2020-09-11 10:38:47 -0700 | [diff] [blame] | 861 | /* report stats handling */ |
| 862 | void gve_handle_report_stats(struct gve_priv *priv); |
Catherine Sullivan | e5b845dc | 2019-07-01 15:57:55 -0700 | [diff] [blame] | 863 | /* exported by ethtool.c */ |
| 864 | extern const struct ethtool_ops gve_ethtool_ops; |
| 865 | /* needed by ethtool */ |
| 866 | extern const char gve_version_str[]; |
Catherine Sullivan | 893ce44 | 2019-07-01 15:57:52 -0700 | [diff] [blame] | 867 | #endif /* _GVE_H_ */ |