gve: Add support for raw addressing to the rx path
Add support to use raw dma addresses in the rx path. Due to this new
support we can alloc a new buffer instead of making a copy.
RX buffers are handed to the networking stack and are
re-allocated as needed, avoiding the need to use
skb_copy_to_linear_data() as in "qpl" mode.
Reviewed-by: Yangchun Fu <yangchun@google.com>
Signed-off-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David Awogbemila <awogbemila@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 782e279..d8bba0b 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -38,6 +38,8 @@
#define NIC_TX_STATS_REPORT_NUM 0
#define NIC_RX_STATS_REPORT_NUM 4
+#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -49,7 +51,7 @@ struct gve_rx_desc_queue {
struct gve_rx_slot_page_info {
struct page *page;
void *page_address;
- u32 page_offset; /* offset to write to in page */
+ u8 page_offset; /* flipped to second half? */
};
/* A list of pages registered with the device during setup and used by a queue
@@ -64,10 +66,11 @@ struct gve_queue_page_list {
/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
struct gve_rx_data_queue {
- struct gve_rx_data_slot *data_ring; /* read by NIC */
+ union gve_rx_data_slot *data_ring; /* read by NIC */
dma_addr_t data_bus; /* dma mapping of the slots */
struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
+ u8 raw_addressing; /* use raw_addressing? */
};
struct gve_priv;
@@ -82,6 +85,7 @@ struct gve_rx_ring {
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u32 db_threshold; /* threshold for posting new buffs and descs */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
@@ -194,7 +198,7 @@ struct gve_priv {
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
u16 tx_pages_per_qpl; /* tx buffer length */
- u16 rx_pages_per_qpl; /* rx buffer length */
+ u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
u32 rx_copybreak; /* copy packets smaller than this */
@@ -444,7 +448,7 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
*/
static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
{
- return priv->rx_cfg.num_queues;
+ return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
}
/* Returns a pointer to the next available tx qpl in the list of qpls