Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* Copyright (c) 2018, Intel Corporation. */ |
| 3 | |
| 4 | #ifndef _ICE_H_ |
| 5 | #define _ICE_H_ |
| 6 | |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/errno.h> |
| 9 | #include <linux/kernel.h> |
| 10 | #include <linux/module.h> |
Tony Nguyen | 462acf6 | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 11 | #include <linux/firmware.h> |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 12 | #include <linux/netdevice.h> |
| 13 | #include <linux/compiler.h> |
Anirudh Venkataramanan | dc49c77 | 2018-03-20 07:58:09 -0700 | [diff] [blame] | 14 | #include <linux/etherdevice.h> |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 15 | #include <linux/skbuff.h> |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 16 | #include <linux/cpumask.h> |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 17 | #include <linux/rtnetlink.h> |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 18 | #include <linux/if_vlan.h> |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 19 | #include <linux/dma-mapping.h> |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 20 | #include <linux/pci.h> |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 21 | #include <linux/workqueue.h> |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 22 | #include <linux/wait.h> |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 23 | #include <linux/aer.h> |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 24 | #include <linux/interrupt.h> |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 25 | #include <linux/ethtool.h> |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 26 | #include <linux/timer.h> |
Anirudh Venkataramanan | 7ec59ee | 2018-03-20 07:58:06 -0700 | [diff] [blame] | 27 | #include <linux/delay.h> |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 28 | #include <linux/bitmap.h> |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 29 | #include <linux/log2.h> |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 30 | #include <linux/ip.h> |
Anirudh Venkataramanan | cf909e1 | 2018-12-19 10:03:32 -0800 | [diff] [blame] | 31 | #include <linux/sctp.h> |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 32 | #include <linux/ipv6.h> |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 33 | #include <linux/pkt_sched.h> |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 34 | #include <linux/if_bridge.h> |
Paul M Stillwell Jr | e3710a0 | 2019-09-09 06:47:42 -0700 | [diff] [blame] | 35 | #include <linux/ctype.h> |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 36 | #include <linux/bpf.h> |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 37 | #include <linux/btf.h> |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 38 | #include <linux/auxiliary_bus.h> |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 39 | #include <linux/avf/virtchnl.h> |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 40 | #include <linux/cpu_rmap.h> |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 41 | #include <linux/dim.h> |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 42 | #include <net/pkt_cls.h> |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 43 | #include <net/tc_act/tc_mirred.h> |
| 44 | #include <net/tc_act/tc_gact.h> |
| 45 | #include <net/ip.h> |
Jacob Keller | 1adf7ea | 2020-03-11 18:58:15 -0700 | [diff] [blame] | 46 | #include <net/devlink.h> |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 47 | #include <net/ipv6.h> |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 48 | #include <net/xdp_sock.h> |
Michal Swiatkowski | c7a2190 | 2020-11-02 04:37:27 -0500 | [diff] [blame] | 49 | #include <net/xdp_sock_drv.h> |
Tony Nguyen | a4e82a8 | 2020-05-06 09:32:30 -0700 | [diff] [blame] | 50 | #include <net/geneve.h> |
| 51 | #include <net/gre.h> |
| 52 | #include <net/udp_tunnel.h> |
| 53 | #include <net/vxlan.h> |
Bruce Allan | d41f26b | 2021-03-02 10:12:06 -0800 | [diff] [blame] | 54 | #if IS_ENABLED(CONFIG_DCB) |
| 55 | #include <scsi/iscsi_proto.h> |
| 56 | #endif /* CONFIG_DCB */ |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 57 | #include "ice_devids.h" |
| 58 | #include "ice_type.h" |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 59 | #include "ice_txrx.h" |
Anirudh Venkataramanan | 37b6f64 | 2019-02-28 15:24:22 -0800 | [diff] [blame] | 60 | #include "ice_dcb.h" |
Anirudh Venkataramanan | 9c20346 | 2018-03-20 07:58:08 -0700 | [diff] [blame] | 61 | #include "ice_switch.h" |
Anirudh Venkataramanan | f31e4b6 | 2018-03-20 07:58:07 -0700 | [diff] [blame] | 62 | #include "ice_common.h" |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 63 | #include "ice_flow.h" |
Anirudh Venkataramanan | 9c20346 | 2018-03-20 07:58:08 -0700 | [diff] [blame] | 64 | #include "ice_sched.h" |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 65 | #include "ice_idc_int.h" |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 66 | #include "ice_virtchnl_pf.h" |
Anirudh Venkataramanan | 007676b | 2018-09-19 17:42:57 -0700 | [diff] [blame] | 67 | #include "ice_sriov.h" |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 68 | #include "ice_ptp.h" |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 69 | #include "ice_fdir.h" |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 70 | #include "ice_xsk.h" |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 71 | #include "ice_arfs.h" |
Michal Swiatkowski | 37165e3 | 2021-08-19 17:08:50 -0700 | [diff] [blame] | 72 | #include "ice_repr.h" |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 73 | #include "ice_eswitch.h" |
Dave Ertman | df006dd | 2020-11-20 16:39:26 -0800 | [diff] [blame] | 74 | #include "ice_lag.h" |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 75 | |
| 76 | #define ICE_BAR0 0 |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 77 | #define ICE_REQ_DESC_MULTIPLE 32 |
Preethi Banala | 8be92a7 | 2019-04-16 10:34:56 -0700 | [diff] [blame] | 78 | #define ICE_MIN_NUM_DESC 64 |
Bruce Allan | 3b6bf29 | 2018-09-19 17:23:11 -0700 | [diff] [blame] | 79 | #define ICE_MAX_NUM_DESC 8160 |
Brett Creeley | 1aec6e1 | 2019-04-16 10:30:41 -0700 | [diff] [blame] | 80 | #define ICE_DFLT_MIN_RX_DESC 512 |
Jesse Brandeburg | dd47e1f | 2019-09-03 01:31:07 -0700 | [diff] [blame] | 81 | #define ICE_DFLT_NUM_TX_DESC 256 |
| 82 | #define ICE_DFLT_NUM_RX_DESC 2048 |
Brett Creeley | ad71b25 | 2019-02-08 12:50:59 -0800 | [diff] [blame] | 83 | |
Anirudh Venkataramanan | 5513b92 | 2018-03-20 07:58:17 -0700 | [diff] [blame] | 84 | #define ICE_DFLT_TRAFFIC_CLASS BIT(0) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 85 | #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 86 | #define ICE_AQ_LEN 192 |
Brett Creeley | 1183621 | 2019-07-25 01:55:38 -0700 | [diff] [blame] | 87 | #define ICE_MBXSQ_LEN 64 |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 88 | #define ICE_SBQ_LEN 64 |
Brett Creeley | f3fe97f | 2021-01-21 10:38:06 -0800 | [diff] [blame] | 89 | #define ICE_MIN_LAN_TXRX_MSIX 1 |
| 90 | #define ICE_MIN_LAN_OICR_MSIX 1 |
| 91 | #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) |
Qi Zhang | da62c5f | 2021-03-09 11:08:03 +0800 | [diff] [blame] | 92 | #define ICE_FDIR_MSIX 2 |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 93 | #define ICE_RDMA_NUM_AEQ_MSIX 4 |
| 94 | #define ICE_MIN_RDMA_MSIX 2 |
Grzegorz Nitka | f66756e | 2021-08-19 17:08:55 -0700 | [diff] [blame] | 95 | #define ICE_ESWITCH_MSIX 1 |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 96 | #define ICE_NO_VSI 0xffff |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 97 | #define ICE_VSI_MAP_CONTIG 0 |
| 98 | #define ICE_VSI_MAP_SCATTER 1 |
| 99 | #define ICE_MAX_SCATTER_TXQS 16 |
| 100 | #define ICE_MAX_SCATTER_RXQS 16 |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 101 | #define ICE_Q_WAIT_RETRY_LIMIT 10 |
| 102 | #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 103 | #define ICE_MAX_LG_RSS_QS 256 |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 104 | #define ICE_RES_VALID_BIT 0x8000 |
| 105 | #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 106 | #define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) |
Qi Zhang | da62c5f | 2021-03-09 11:08:03 +0800 | [diff] [blame] | 107 | /* All VF control VSIs share the same IRQ, so assign a unique ID for them */ |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 108 | #define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 109 | #define ICE_INVAL_Q_INDEX 0xffff |
Anirudh Venkataramanan | 0f9d502 | 2018-08-09 06:29:50 -0700 | [diff] [blame] | 110 | #define ICE_INVAL_VFID 256 |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 111 | |
Brett Creeley | 8134d5f | 2021-03-02 10:15:33 -0800 | [diff] [blame] | 112 | #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 113 | |
| 114 | #define ICE_CHNL_START_TC 1 |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 115 | |
Anirudh Venkataramanan | afd9d4a | 2018-10-26 10:40:51 -0700 | [diff] [blame] | 116 | #define ICE_MAX_RESET_WAIT 20 |
| 117 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 118 | #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) |
| 119 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 120 | #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
| 121 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 122 | #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 123 | |
| 124 | #define ICE_UP_TABLE_TRANSLATE(val, i) \ |
| 125 | (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ |
| 126 | ICE_AQ_VSI_UP_TABLE_UP##i##_M) |
| 127 | |
Anirudh Venkataramanan | 2b245cb | 2018-03-20 07:58:14 -0700 | [diff] [blame] | 128 | #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 129 | #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 130 | #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) |
Henry Tieman | cac2a27 | 2020-05-11 18:01:42 -0700 | [diff] [blame] | 131 | #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 132 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 133 | /* Minimum BW limit is 500 Kbps for any scheduler node */ |
| 134 | #define ICE_MIN_BW_LIMIT 500 |
| 135 | /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. |
| 136 | * use it to convert user specified BW limit into Kbps |
| 137 | */ |
| 138 | #define ICE_BW_KBPS_DIVISOR 125 |
| 139 | |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 140 | /* Macro for each VSI in a PF */ |
| 141 | #define ice_for_each_vsi(pf, i) \ |
| 142 | for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) |
| 143 | |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 144 | /* Macros for each Tx/Xdp/Rx ring in a VSI */ |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 145 | #define ice_for_each_txq(vsi, i) \ |
| 146 | for ((i) = 0; (i) < (vsi)->num_txq; (i)++) |
| 147 | |
Maciej Fijalkowski | 2faf63b | 2021-08-19 14:00:04 +0200 | [diff] [blame] | 148 | #define ice_for_each_xdp_txq(vsi, i) \ |
| 149 | for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) |
| 150 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 151 | #define ice_for_each_rxq(vsi, i) \ |
| 152 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) |
| 153 | |
Anirudh Venkataramanan | d337f2a | 2018-10-26 11:44:47 -0700 | [diff] [blame] | 154 | /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ |
Jacob Keller | f8ba7db | 2018-08-09 06:28:54 -0700 | [diff] [blame] | 155 | #define ice_for_each_alloc_txq(vsi, i) \ |
| 156 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) |
| 157 | |
| 158 | #define ice_for_each_alloc_rxq(vsi, i) \ |
| 159 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) |
| 160 | |
Brett Creeley | 67fe64d | 2018-12-19 10:03:30 -0800 | [diff] [blame] | 161 | #define ice_for_each_q_vector(vsi, i) \ |
| 162 | for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) |
| 163 | |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 164 | #define ice_for_each_chnl_tc(i) \ |
| 165 | for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) |
| 166 | |
Brett Creeley | 1a8c777 | 2021-02-26 13:19:23 -0800 | [diff] [blame] | 167 | #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 168 | |
| 169 | #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 170 | ICE_PROMISC_UCAST_RX | \ |
Akeem G Abodunrin | 5eda8af | 2019-02-26 16:35:14 -0800 | [diff] [blame] | 171 | ICE_PROMISC_VLAN_TX | \ |
| 172 | ICE_PROMISC_VLAN_RX) |
| 173 | |
| 174 | #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) |
| 175 | |
| 176 | #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ |
| 177 | ICE_PROMISC_MCAST_RX | \ |
| 178 | ICE_PROMISC_VLAN_TX | \ |
| 179 | ICE_PROMISC_VLAN_RX) |
| 180 | |
Brett Creeley | 4015d11 | 2019-11-08 06:23:26 -0800 | [diff] [blame] | 181 | #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) |
| 182 | |
Anirudh Venkataramanan | 40b2476 | 2021-07-16 15:16:41 -0700 | [diff] [blame] | 183 | enum ice_feature { |
| 184 | ICE_F_DSCP, |
Maciej Machnikowski | 325b206 | 2021-08-17 13:09:18 +0200 | [diff] [blame] | 185 | ICE_F_SMA_CTRL, |
Anirudh Venkataramanan | 40b2476 | 2021-07-16 15:16:41 -0700 | [diff] [blame] | 186 | ICE_F_MAX |
| 187 | }; |
| 188 | |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 189 | DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); |
| 190 | |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 191 | struct ice_channel { |
| 192 | struct list_head list; |
| 193 | u8 type; |
| 194 | u16 sw_id; |
| 195 | u16 base_q; |
| 196 | u16 num_rxq; |
| 197 | u16 num_txq; |
| 198 | u16 vsi_num; |
| 199 | u8 ena_tc; |
| 200 | struct ice_aqc_vsi_props info; |
| 201 | u64 max_tx_rate; |
| 202 | u64 min_tx_rate; |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 203 | atomic_t num_sb_fltr; |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 204 | struct ice_vsi *ch_vsi; |
| 205 | }; |
| 206 | |
Anirudh Venkataramanan | eff380a | 2019-10-24 01:11:17 -0700 | [diff] [blame] | 207 | struct ice_txq_meta { |
| 208 | u32 q_teid; /* Tx-scheduler element identifier */ |
| 209 | u16 q_id; /* Entry in VSI's txq_map bitmap */ |
| 210 | u16 q_handle; /* Relative index of Tx queue within TC */ |
| 211 | u16 vsi_idx; /* VSI index that Tx queue belongs to */ |
| 212 | u8 tc; /* TC number that Tx queue belongs to */ |
| 213 | }; |
| 214 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 215 | struct ice_tc_info { |
| 216 | u16 qoffset; |
Usha Ketineni | c5a2a4a | 2018-10-26 11:44:35 -0700 | [diff] [blame] | 217 | u16 qcount_tx; |
| 218 | u16 qcount_rx; |
| 219 | u8 netdev_tc; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 220 | }; |
| 221 | |
| 222 | struct ice_tc_cfg { |
| 223 | u8 numtc; /* Total number of enabled TCs */ |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 224 | u16 ena_tc; /* Tx map */ |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 225 | struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; |
| 226 | }; |
| 227 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 228 | struct ice_res_tracker { |
| 229 | u16 num_entries; |
Brett Creeley | cbe66bf | 2019-04-16 10:30:44 -0700 | [diff] [blame] | 230 | u16 end; |
Gustavo A. R. Silva | e94c0df | 2020-09-29 14:01:56 -0500 | [diff] [blame] | 231 | u16 list[]; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 232 | }; |
| 233 | |
Anirudh Venkataramanan | 03f7a98 | 2018-12-19 10:03:27 -0800 | [diff] [blame] | 234 | struct ice_qs_cfg { |
Anirudh Venkataramanan | 94c4441 | 2019-02-19 15:04:12 -0800 | [diff] [blame] | 235 | struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ |
Anirudh Venkataramanan | 03f7a98 | 2018-12-19 10:03:27 -0800 | [diff] [blame] | 236 | unsigned long *pf_map; |
| 237 | unsigned long pf_map_size; |
| 238 | unsigned int q_count; |
| 239 | unsigned int scatter_count; |
| 240 | u16 *vsi_map; |
| 241 | u16 vsi_map_offset; |
| 242 | u8 mapping_mode; |
| 243 | }; |
| 244 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 245 | struct ice_sw { |
| 246 | struct ice_pf *pf; |
| 247 | u16 sw_id; /* switch ID for this switch */ |
| 248 | u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ |
Brett Creeley | fc0f39b | 2019-12-12 03:12:55 -0800 | [diff] [blame] | 249 | struct ice_vsi *dflt_vsi; /* default VSI for this switch */ |
| 250 | u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 251 | }; |
| 252 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 253 | enum ice_pf_state { |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 254 | ICE_TESTING, |
| 255 | ICE_DOWN, |
| 256 | ICE_NEEDS_RESTART, |
| 257 | ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ |
| 258 | ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 259 | ICE_PFR_REQ, /* set by driver */ |
| 260 | ICE_CORER_REQ, /* set by driver */ |
| 261 | ICE_GLOBR_REQ, /* set by driver */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 262 | ICE_CORER_RECV, /* set by OICR handler */ |
| 263 | ICE_GLOBR_RECV, /* set by OICR handler */ |
| 264 | ICE_EMPR_RECV, /* set by OICR handler */ |
| 265 | ICE_SUSPENDED, /* set on module remove path */ |
| 266 | ICE_RESET_FAILED, /* set by reset/rebuild */ |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 267 | /* When checking for the PF to be in a nominal operating state, the |
| 268 | * bits that are grouped at the beginning of the list need to be |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 269 | * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 270 | * be checked. If you need to add a bit into consideration for nominal |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 271 | * operating state, it must be added before |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 272 | * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 273 | * without appropriate consideration. |
| 274 | */ |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 275 | ICE_STATE_NOMINAL_CHECK_BITS, |
| 276 | ICE_ADMINQ_EVENT_PENDING, |
| 277 | ICE_MAILBOXQ_EVENT_PENDING, |
Jacob Keller | 8f5ee3c | 2021-06-09 09:39:46 -0700 | [diff] [blame] | 278 | ICE_SIDEBANDQ_EVENT_PENDING, |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 279 | ICE_MDD_EVENT_PENDING, |
| 280 | ICE_VFLR_EVENT_PENDING, |
| 281 | ICE_FLTR_OVERFLOW_PROMISC, |
| 282 | ICE_VF_DIS, |
Anirudh Venkataramanan | c503e63 | 2021-08-04 12:12:42 -0700 | [diff] [blame] | 283 | ICE_VF_DEINIT_IN_PROGRESS, |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 284 | ICE_CFG_BUSY, |
| 285 | ICE_SERVICE_SCHED, |
| 286 | ICE_SERVICE_DIS, |
| 287 | ICE_FD_FLUSH_REQ, |
| 288 | ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ |
| 289 | ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ |
| 290 | ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ |
| 291 | ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
| 292 | ICE_PHY_INIT_COMPLETE, |
| 293 | ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ |
| 294 | ICE_STATE_NBITS /* must be last */ |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 295 | }; |
| 296 | |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 297 | enum ice_vsi_state { |
| 298 | ICE_VSI_DOWN, |
| 299 | ICE_VSI_NEEDS_RESTART, |
Anirudh Venkataramanan | a476d72 | 2021-03-02 10:15:41 -0800 | [diff] [blame] | 300 | ICE_VSI_NETDEV_ALLOCD, |
| 301 | ICE_VSI_NETDEV_REGISTERED, |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 302 | ICE_VSI_UMAC_FLTR_CHANGED, |
| 303 | ICE_VSI_MMAC_FLTR_CHANGED, |
| 304 | ICE_VSI_VLAN_FLTR_CHANGED, |
| 305 | ICE_VSI_PROMISC_CHANGED, |
| 306 | ICE_VSI_STATE_NBITS /* must be last */ |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 307 | }; |
| 308 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 309 | /* struct that defines a VSI, associated with a dev */ |
| 310 | struct ice_vsi { |
| 311 | struct net_device *netdev; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 312 | struct ice_sw *vsw; /* switch this VSI is on */ |
| 313 | struct ice_pf *back; /* back pointer to PF */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 314 | struct ice_port_info *port_info; /* back pointer to port_info */ |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 315 | struct ice_rx_ring **rx_rings; /* Rx ring array */ |
| 316 | struct ice_tx_ring **tx_rings; /* Tx ring array */ |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 317 | struct ice_q_vector **q_vectors; /* q_vector array */ |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 318 | |
| 319 | irqreturn_t (*irq_handler)(int irq, void *data); |
| 320 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 321 | u64 tx_linearize; |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 322 | DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 323 | unsigned int current_netdev_flags; |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 324 | u32 tx_restart; |
| 325 | u32 tx_busy; |
| 326 | u32 rx_buf_failed; |
| 327 | u32 rx_page_failed; |
Karol Kolacinski | 88865fc | 2020-05-07 17:41:05 -0700 | [diff] [blame] | 328 | u16 num_q_vectors; |
| 329 | u16 base_vector; /* IRQ base for OS reserved vectors */ |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 330 | enum ice_vsi_type type; |
Anirudh Venkataramanan | df17b7e | 2018-10-26 11:44:46 -0700 | [diff] [blame] | 331 | u16 vsi_num; /* HW (absolute) index of this VSI */ |
| 332 | u16 idx; /* software index in pf->vsi[] */ |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 333 | |
Anirudh Venkataramanan | 8ede017 | 2018-09-19 17:42:56 -0700 | [diff] [blame] | 334 | s16 vf_id; /* VF ID for SR-IOV VSIs */ |
| 335 | |
Akeem G Abodunrin | d95276c | 2019-04-16 10:21:24 -0700 | [diff] [blame] | 336 | u16 ethtype; /* Ethernet protocol for pause frame */ |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 337 | u16 num_gfltr; |
| 338 | u16 num_bfltr; |
Akeem G Abodunrin | d95276c | 2019-04-16 10:21:24 -0700 | [diff] [blame] | 339 | |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 340 | /* RSS config */ |
| 341 | u16 rss_table_size; /* HW RSS table size */ |
| 342 | u16 rss_size; /* Allocated RSS queues */ |
| 343 | u8 *rss_hkey_user; /* User configured hash keys */ |
| 344 | u8 *rss_lut_user; /* User configured lookup table entries */ |
| 345 | u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ |
| 346 | |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 347 | /* aRFS members only allocated for the PF VSI */ |
| 348 | #define ICE_MAX_ARFS_LIST 1024 |
| 349 | #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) |
| 350 | struct hlist_head *arfs_fltr_list; |
| 351 | struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; |
| 352 | spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ |
| 353 | atomic_t *arfs_last_fltr_id; |
| 354 | |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 355 | u16 max_frame; |
| 356 | u16 rx_buf_len; |
| 357 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 358 | struct ice_aqc_vsi_props info; /* VSI properties */ |
| 359 | |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 360 | /* VSI stats */ |
| 361 | struct rtnl_link_stats64 net_stats; |
| 362 | struct ice_eth_stats eth_stats; |
| 363 | struct ice_eth_stats eth_stats_prev; |
| 364 | |
Anirudh Venkataramanan | e94d447 | 2018-03-20 07:58:19 -0700 | [diff] [blame] | 365 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
| 366 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ |
| 367 | |
Jesse Brandeburg | 0ab54c5 | 2019-04-16 10:24:35 -0700 | [diff] [blame] | 368 | u8 irqs_ready:1; |
| 369 | u8 current_isup:1; /* Sync 'link up' logging */ |
| 370 | u8 stat_offsets_loaded:1; |
Brett Creeley | cd6d6b8 | 2019-12-12 03:12:54 -0800 | [diff] [blame] | 371 | u16 num_vlan; |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 372 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 373 | /* queue information */ |
| 374 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
| 375 | u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 376 | u16 *txq_map; /* index in pf->avail_txqs */ |
| 377 | u16 *rxq_map; /* index in pf->avail_rxqs */ |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 378 | u16 alloc_txq; /* Allocated Tx queues */ |
| 379 | u16 num_txq; /* Used Tx queues */ |
| 380 | u16 alloc_rxq; /* Allocated Rx queues */ |
| 381 | u16 num_rxq; /* Used Rx queues */ |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 382 | u16 req_txq; /* User requested Tx queues */ |
| 383 | u16 req_rxq; /* User requested Rx queues */ |
Brett Creeley | ad71b25 | 2019-02-08 12:50:59 -0800 | [diff] [blame] | 384 | u16 num_rx_desc; |
| 385 | u16 num_tx_desc; |
Dave Ertman | 348048e | 2021-05-20 09:37:50 -0500 | [diff] [blame] | 386 | u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 387 | struct ice_tc_cfg tc_cfg; |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 388 | struct bpf_prog *xdp_prog; |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 389 | struct ice_tx_ring **xdp_rings; /* XDP ring array */ |
Maciej Fijalkowski | e102db7 | 2021-04-27 21:52:09 +0200 | [diff] [blame] | 390 | unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 391 | u16 num_xdp_txq; /* Used XDP queues */ |
| 392 | u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 393 | |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 394 | struct net_device **target_netdevs; |
| 395 | |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 396 | struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ |
| 397 | |
| 398 | /* Channel Specific Fields */ |
| 399 | struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; |
| 400 | u16 cnt_q_avail; |
| 401 | u16 next_base_q; /* next queue to be used for channel setup */ |
| 402 | struct list_head ch_list; |
| 403 | u16 num_chnl_rxq; |
| 404 | u16 num_chnl_txq; |
| 405 | u16 ch_rss_size; |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 406 | u16 num_chnl_fltr; |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 407 | /* store away rss size info before configuring ADQ channels so that, |
| 408 | * it can be used after tc-qdisc delete, to get back RSS setting as |
| 409 | * they were before |
| 410 | */ |
| 411 | u16 orig_rss_size; |
| 412 | /* this keeps tracks of all enabled TC with and without DCB |
| 413 | * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue |
| 414 | * information |
| 415 | */ |
| 416 | u8 all_numtc; |
| 417 | u16 all_enatc; |
| 418 | |
| 419 | /* store away TC info, to be used for rebuild logic */ |
| 420 | u8 old_numtc; |
| 421 | u16 old_ena_tc; |
| 422 | |
| 423 | struct ice_channel *ch; |
| 424 | |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 425 | /* setup back reference, to which aggregator node this VSI |
| 426 | * corresponds to |
| 427 | */ |
| 428 | struct ice_agg_node *agg_node; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 429 | } ____cacheline_internodealigned_in_smp; |
| 430 | |
| 431 | /* struct that defines an interrupt vector */ |
| 432 | struct ice_q_vector { |
| 433 | struct ice_vsi *vsi; |
Brett Creeley | 8244dd2 | 2019-02-19 15:04:05 -0800 | [diff] [blame] | 434 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 435 | u16 v_idx; /* index in the vsi->q_vector array. */ |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 436 | u16 reg_idx; |
Anirudh Venkataramanan | d337f2a | 2018-10-26 11:44:47 -0700 | [diff] [blame] | 437 | u8 num_ring_rx; /* total number of Rx rings in vector */ |
Brett Creeley | 8244dd2 | 2019-02-19 15:04:05 -0800 | [diff] [blame] | 438 | u8 num_ring_tx; /* total number of Tx rings in vector */ |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 439 | u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ |
Brett Creeley | 9e4ab4c | 2018-09-19 17:23:19 -0700 | [diff] [blame] | 440 | /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this |
| 441 | * value to the device |
| 442 | */ |
| 443 | u8 intrl; |
Brett Creeley | 8244dd2 | 2019-02-19 15:04:05 -0800 | [diff] [blame] | 444 | |
| 445 | struct napi_struct napi; |
| 446 | |
| 447 | struct ice_ring_container rx; |
| 448 | struct ice_ring_container tx; |
| 449 | |
| 450 | cpumask_t affinity_mask; |
| 451 | struct irq_affinity_notify affinity_notify; |
| 452 | |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 453 | struct ice_channel *ch; |
| 454 | |
Brett Creeley | 8244dd2 | 2019-02-19 15:04:05 -0800 | [diff] [blame] | 455 | char name[ICE_INT_NAME_STR_LEN]; |
Jacob Keller | cdf1f1f | 2021-03-31 14:16:57 -0700 | [diff] [blame] | 456 | |
| 457 | u16 total_events; /* net_dim(): number of interrupts processed */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 458 | } ____cacheline_internodealigned_in_smp; |
| 459 | |
| 460 | enum ice_pf_flags { |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 461 | ICE_FLAG_FLTR_SYNC, |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 462 | ICE_FLAG_RDMA_ENA, |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 463 | ICE_FLAG_RSS_ENA, |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 464 | ICE_FLAG_SRIOV_ENA, |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 465 | ICE_FLAG_SRIOV_CAPABLE, |
Anirudh Venkataramanan | 37b6f64 | 2019-02-28 15:24:22 -0800 | [diff] [blame] | 466 | ICE_FLAG_DCB_CAPABLE, |
| 467 | ICE_FLAG_DCB_ENA, |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 468 | ICE_FLAG_FD_ENA, |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 469 | ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ |
| 470 | ICE_FLAG_PTP, /* PTP is enabled by software */ |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 471 | ICE_FLAG_AUX_ENA, |
Tony Nguyen | 462acf6 | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 472 | ICE_FLAG_ADV_FEATURES, |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 473 | ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 474 | ICE_FLAG_CLS_FLOWER, |
Bruce Allan | ab4ab73 | 2018-12-19 10:03:26 -0800 | [diff] [blame] | 475 | ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, |
Bruce Allan | b4e813d | 2020-07-09 09:16:08 -0700 | [diff] [blame] | 476 | ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, |
Tony Nguyen | 6d59994 | 2019-06-26 02:20:17 -0700 | [diff] [blame] | 477 | ICE_FLAG_NO_MEDIA, |
Dave Ertman | 84a118a | 2019-07-29 02:04:50 -0700 | [diff] [blame] | 478 | ICE_FLAG_FW_LLDP_AGENT, |
Anirudh Venkataramanan | c77849f5 | 2021-05-06 08:40:01 -0700 | [diff] [blame] | 479 | ICE_FLAG_MOD_POWER_UNSUPPORTED, |
Brett Creeley | 99d4075 | 2021-10-13 09:02:19 -0700 | [diff] [blame] | 480 | ICE_FLAG_PHY_FW_LOAD_FAILED, |
Anirudh Venkataramanan | 3a257a1 | 2019-02-28 15:24:31 -0800 | [diff] [blame] | 481 | ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ |
Maciej Fijalkowski | 7237f5b | 2019-10-24 01:11:22 -0700 | [diff] [blame] | 482 | ICE_FLAG_LEGACY_RX, |
Brett Creeley | 01b5e89 | 2020-05-07 17:40:59 -0700 | [diff] [blame] | 483 | ICE_FLAG_VF_TRUE_PROMISC_ENA, |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 484 | ICE_FLAG_MDD_AUTO_RESET_VF, |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 485 | ICE_FLAG_LINK_LENIENT_MODE_ENA, |
Dave Ertman | 5dbbbd0 | 2022-01-20 16:27:56 -0800 | [diff] [blame] | 486 | ICE_FLAG_PLUG_AUX_DEV, |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 487 | ICE_PF_FLAGS_NBITS /* must be last */ |
| 488 | }; |
| 489 | |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 490 | struct ice_switchdev_info { |
| 491 | struct ice_vsi *control_vsi; |
| 492 | struct ice_vsi *uplink_vsi; |
| 493 | bool is_running; |
| 494 | }; |
| 495 | |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 496 | struct ice_agg_node { |
| 497 | u32 agg_id; |
| 498 | #define ICE_MAX_VSIS_IN_AGG_NODE 64 |
| 499 | u32 num_vsis; |
| 500 | u8 valid; |
| 501 | }; |
| 502 | |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 503 | struct ice_pf { |
| 504 | struct pci_dev *pdev; |
Preethi Banala | eb0208e | 2018-09-19 17:23:16 -0700 | [diff] [blame] | 505 | |
Jacob Keller | dce730f | 2020-03-26 11:37:18 -0700 | [diff] [blame] | 506 | struct devlink_region *nvm_region; |
Jacob Keller | 78ad87d | 2021-10-11 17:41:10 -0700 | [diff] [blame] | 507 | struct devlink_region *sram_region; |
Jacob Keller | 8d7aab3 | 2020-06-18 11:46:11 -0700 | [diff] [blame] | 508 | struct devlink_region *devcaps_region; |
Jacob Keller | dce730f | 2020-03-26 11:37:18 -0700 | [diff] [blame] | 509 | |
Wojciech Drewek | 2ae0aa47 | 2021-08-19 17:08:49 -0700 | [diff] [blame] | 510 | /* devlink port data */ |
| 511 | struct devlink_port devlink_port; |
| 512 | |
Preethi Banala | eb0208e | 2018-09-19 17:23:16 -0700 | [diff] [blame] | 513 | /* OS reserved IRQ details */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 514 | struct msix_entry *msix_entries; |
Brett Creeley | cbe66bf | 2019-04-16 10:30:44 -0700 | [diff] [blame] | 515 | struct ice_res_tracker *irq_tracker; |
| 516 | /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the |
| 517 | * number of MSIX vectors needed for all SR-IOV VFs from the number of |
| 518 | * MSIX vectors allowed on this PF. |
| 519 | */ |
| 520 | u16 sriov_base_vector; |
Preethi Banala | eb0208e | 2018-09-19 17:23:16 -0700 | [diff] [blame] | 521 | |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 522 | u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ |
| 523 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 524 | struct ice_vsi **vsi; /* VSIs created by the driver */ |
| 525 | struct ice_sw *first_sw; /* first switch created by firmware */ |
Michal Swiatkowski | 3ea9bd5 | 2021-08-19 17:08:48 -0700 | [diff] [blame] | 526 | u16 eswitch_mode; /* current mode of eswitch */ |
Anirudh Venkataramanan | ddf30f7 | 2018-09-19 17:42:55 -0700 | [diff] [blame] | 527 | /* Virtchnl/SR-IOV config info */ |
| 528 | struct ice_vf *vf; |
Jesse Brandeburg | 53bb669 | 2020-05-07 17:41:06 -0700 | [diff] [blame] | 529 | u16 num_alloc_vfs; /* actual number of VFs allocated */ |
Anirudh Venkataramanan | 75d2b25 | 2018-09-19 17:42:54 -0700 | [diff] [blame] | 530 | u16 num_vfs_supported; /* num VFs supported for this PF */ |
Brett Creeley | 46c276c | 2020-02-27 10:14:53 -0800 | [diff] [blame] | 531 | u16 num_qps_per_vf; |
| 532 | u16 num_msix_per_vf; |
Paul Greenwalt | 9d5c5a5 | 2020-02-13 13:31:16 -0800 | [diff] [blame] | 533 | /* used to ratelimit the MDD event logging */ |
| 534 | unsigned long last_printed_mdd_jiffies; |
Vignesh Sridhar | 0891c89 | 2021-03-02 10:12:00 -0800 | [diff] [blame] | 535 | DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); |
Anirudh Venkataramanan | 40b2476 | 2021-07-16 15:16:41 -0700 | [diff] [blame] | 536 | DECLARE_BITMAP(features, ICE_F_MAX); |
Anirudh Venkataramanan | 7e408e0 | 2021-03-02 10:15:38 -0800 | [diff] [blame] | 537 | DECLARE_BITMAP(state, ICE_STATE_NBITS); |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 538 | DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 539 | unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ |
| 540 | unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 541 | unsigned long serv_tmr_period; |
| 542 | unsigned long serv_tmr_prev; |
| 543 | struct timer_list serv_tmr; |
| 544 | struct work_struct serv_task; |
| 545 | struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ |
| 546 | struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ |
Dave Ertman | b94b013 | 2019-11-06 02:05:29 -0800 | [diff] [blame] | 547 | struct mutex tc_mutex; /* lock to protect TC changes */ |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 548 | u32 msg_enable; |
Jacob Keller | 06c16d8 | 2021-06-09 09:39:50 -0700 | [diff] [blame] | 549 | struct ice_ptp ptp; |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 550 | u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ |
| 551 | u16 rdma_base_vector; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 552 | |
| 553 | /* spinlock to protect the AdminQ wait list */ |
| 554 | spinlock_t aq_wait_lock; |
| 555 | struct hlist_head aq_wait_list; |
| 556 | wait_queue_head_t aq_wait_queue; |
Jacob Keller | 399e27d | 2021-10-27 16:22:55 -0700 | [diff] [blame] | 557 | bool fw_emp_reset_disabled; |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 558 | |
Jacob Keller | 1c08052 | 2021-05-06 08:39:59 -0700 | [diff] [blame] | 559 | wait_queue_head_t reset_wait_queue; |
| 560 | |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 561 | u32 hw_csum_rx_error; |
Karol Kolacinski | 88865fc | 2020-05-07 17:41:05 -0700 | [diff] [blame] | 562 | u16 oicr_idx; /* Other interrupt cause MSIX vector index */ |
| 563 | u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ |
Anirudh Venkataramanan | 78b5713 | 2019-08-02 01:25:21 -0700 | [diff] [blame] | 564 | u16 max_pf_txqs; /* Total Tx queues PF wide */ |
| 565 | u16 max_pf_rxqs; /* Total Rx queues PF wide */ |
Karol Kolacinski | 88865fc | 2020-05-07 17:41:05 -0700 | [diff] [blame] | 566 | u16 num_lan_msix; /* Total MSIX vectors for base driver */ |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 567 | u16 num_lan_tx; /* num LAN Tx queues setup */ |
| 568 | u16 num_lan_rx; /* num LAN Rx queues setup */ |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 569 | u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ |
| 570 | u16 num_alloc_vsi; |
Anirudh Venkataramanan | 0b28b70 | 2018-03-20 07:58:18 -0700 | [diff] [blame] | 571 | u16 corer_count; /* Core reset count */ |
| 572 | u16 globr_count; /* Global reset count */ |
| 573 | u16 empr_count; /* EMP reset count */ |
| 574 | u16 pfr_count; /* PF reset count */ |
| 575 | |
Akeem G Abodunrin | 769c500 | 2020-07-09 09:16:03 -0700 | [diff] [blame] | 576 | u8 wol_ena : 1; /* software state of WoL */ |
| 577 | u32 wakeup_reason; /* last wakeup reason */ |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 578 | struct ice_hw_port_stats stats; |
| 579 | struct ice_hw_port_stats stats_prev; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 580 | struct ice_hw hw; |
Jesse Brandeburg | 0ab54c5 | 2019-04-16 10:24:35 -0700 | [diff] [blame] | 581 | u8 stat_prev_loaded:1; /* has previous stats been loaded */ |
Shiraz Saleem | e523af4 | 2021-10-18 18:16:02 -0500 | [diff] [blame] | 582 | u8 rdma_mode; |
Anirudh Venkataramanan | 7b9ffc7 | 2019-02-28 15:24:24 -0800 | [diff] [blame] | 583 | u16 dcbx_cap; |
Sudheer Mogilappagari | b3969fd | 2018-08-09 06:29:53 -0700 | [diff] [blame] | 584 | u32 tx_timeout_count; |
| 585 | unsigned long tx_timeout_last_recovery; |
| 586 | u32 tx_timeout_recovery_level; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 587 | char int_name[ICE_INT_NAME_STR_LEN]; |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 588 | struct auxiliary_device *adev; |
| 589 | int aux_idx; |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 590 | u32 sw_int_count; |
Kiran Patil | 9fea749 | 2021-10-15 16:35:17 -0700 | [diff] [blame] | 591 | /* count of tc_flower filters specific to channel (aka where filter |
| 592 | * action is "hw_tc <tc_num>") |
| 593 | */ |
| 594 | u16 num_dmac_chnl_fltrs; |
Kiran Patil | 0d08a44 | 2021-08-06 10:49:05 +0200 | [diff] [blame] | 595 | struct hlist_head tc_flower_fltr_list; |
| 596 | |
Paul Greenwalt | 1a3571b | 2020-07-09 09:16:06 -0700 | [diff] [blame] | 597 | __le64 nvm_phy_type_lo; /* NVM PHY type low */ |
| 598 | __le64 nvm_phy_type_hi; /* NVM PHY type high */ |
Paul Greenwalt | ea78ce4 | 2020-07-09 09:16:07 -0700 | [diff] [blame] | 599 | struct ice_link_default_override_tlv link_dflt_override; |
Dave Ertman | df006dd | 2020-11-20 16:39:26 -0800 | [diff] [blame] | 600 | struct ice_lag *lag; /* Link Aggregation information */ |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 601 | |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 602 | struct ice_switchdev_info switchdev; |
| 603 | |
Kiran Patil | b126bd6 | 2020-11-20 16:39:27 -0800 | [diff] [blame] | 604 | #define ICE_INVALID_AGG_NODE_ID 0 |
| 605 | #define ICE_PF_AGG_NODE_ID_START 1 |
| 606 | #define ICE_MAX_PF_AGG_NODES 32 |
| 607 | struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; |
| 608 | #define ICE_VF_AGG_NODE_ID_START 65 |
| 609 | #define ICE_MAX_VF_AGG_NODES 32 |
| 610 | struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 611 | }; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 612 | |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 613 | struct ice_netdev_priv { |
| 614 | struct ice_vsi *vsi; |
Michal Swiatkowski | 37165e3 | 2021-08-19 17:08:50 -0700 | [diff] [blame] | 615 | struct ice_repr *repr; |
Michal Swiatkowski | 195bb48 | 2021-10-12 11:31:03 -0700 | [diff] [blame] | 616 | /* indirect block callbacks on registered higher level devices |
| 617 | * (e.g. tunnel devices) |
| 618 | * |
| 619 | * tc_indr_block_cb_priv_list is used to look up indirect callback |
| 620 | * private data |
| 621 | */ |
| 622 | struct list_head tc_indr_block_priv_list; |
Anirudh Venkataramanan | 3a858ba | 2018-03-20 07:58:11 -0700 | [diff] [blame] | 623 | }; |
| 624 | |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 625 | /** |
Kiran Patil | fbc7b27 | 2021-10-15 16:35:16 -0700 | [diff] [blame] | 626 | * ice_vector_ch_enabled |
| 627 | * @qv: pointer to q_vector, can be NULL |
| 628 | * |
| 629 | * This function returns true if vector is channel enabled otherwise false |
| 630 | */ |
| 631 | static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) |
| 632 | { |
| 633 | return !!qv->ch; /* Enable it to run with TC */ |
| 634 | } |
| 635 | |
| 636 | /** |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 637 | * ice_irq_dynamic_ena - Enable default interrupt generation settings |
Anirudh Venkataramanan | f9867df | 2019-02-19 15:04:13 -0800 | [diff] [blame] | 638 | * @hw: pointer to HW struct |
| 639 | * @vsi: pointer to VSI struct, can be NULL |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 640 | * @q_vector: pointer to q_vector, can be NULL |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 641 | */ |
Bruce Allan | c8b7abd | 2019-02-26 16:35:11 -0800 | [diff] [blame] | 642 | static inline void |
| 643 | ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, |
| 644 | struct ice_q_vector *q_vector) |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 645 | { |
Brett Creeley | b07833a | 2019-02-28 15:25:59 -0800 | [diff] [blame] | 646 | u32 vector = (vsi && q_vector) ? q_vector->reg_idx : |
Brett Creeley | cbe66bf | 2019-04-16 10:30:44 -0700 | [diff] [blame] | 647 | ((struct ice_pf *)hw->back)->oicr_idx; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 648 | int itr = ICE_ITR_NONE; |
| 649 | u32 val; |
| 650 | |
| 651 | /* clear the PBA here, as this function is meant to clean out all |
| 652 | * previous interrupts and enable the interrupt |
| 653 | */ |
| 654 | val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | |
| 655 | (itr << GLINT_DYN_CTL_ITR_INDX_S); |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 656 | if (vsi) |
Anirudh Venkataramanan | e97fb1a | 2021-03-02 10:15:37 -0800 | [diff] [blame] | 657 | if (test_bit(ICE_VSI_DOWN, vsi->state)) |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 658 | return; |
Anirudh Venkataramanan | 940b61a | 2018-03-20 07:58:10 -0700 | [diff] [blame] | 659 | wr32(hw, GLINT_DYN_CTL(vector), val); |
| 660 | } |
Anirudh Venkataramanan | cdedef5 | 2018-03-20 07:58:13 -0700 | [diff] [blame] | 661 | |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 662 | /** |
Tony Nguyen | 462acf6 | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 663 | * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev |
| 664 | * @netdev: pointer to the netdev struct |
| 665 | */ |
| 666 | static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) |
| 667 | { |
| 668 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 669 | |
| 670 | return np->vsi->back; |
| 671 | } |
| 672 | |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 673 | static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) |
| 674 | { |
| 675 | return !!vsi->xdp_prog; |
| 676 | } |
| 677 | |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 678 | static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 679 | { |
| 680 | ring->flags |= ICE_TX_FLAGS_RING_XDP; |
| 681 | } |
| 682 | |
Tony Nguyen | 462acf6 | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 683 | /** |
Magnus Karlsson | 1742b3d | 2020-08-28 10:26:15 +0200 | [diff] [blame] | 684 | * ice_xsk_pool - get XSK buffer pool bound to a ring |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 685 | * @ring: Rx ring to use |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 686 | * |
Magnus Karlsson | 1742b3d | 2020-08-28 10:26:15 +0200 | [diff] [blame] | 687 | * Returns a pointer to xdp_umem structure if there is a buffer pool present, |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 688 | * NULL otherwise. |
| 689 | */ |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 690 | static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 691 | { |
Maciej Fijalkowski | e102db7 | 2021-04-27 21:52:09 +0200 | [diff] [blame] | 692 | struct ice_vsi *vsi = ring->vsi; |
Krzysztof Kazimierczak | 65bb559 | 2019-12-12 03:13:06 -0800 | [diff] [blame] | 693 | u16 qid = ring->q_index; |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 694 | |
Maciej Fijalkowski | e72bba2 | 2021-08-19 13:59:58 +0200 | [diff] [blame] | 695 | if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) |
| 696 | return NULL; |
| 697 | |
| 698 | return xsk_get_pool_from_qid(vsi->netdev, qid); |
| 699 | } |
| 700 | |
| 701 | /** |
| 702 | * ice_tx_xsk_pool - get XSK buffer pool bound to a ring |
| 703 | * @ring: Tx ring to use |
| 704 | * |
| 705 | * Returns a pointer to xdp_umem structure if there is a buffer pool present, |
| 706 | * NULL otherwise. Tx equivalent of ice_xsk_pool. |
| 707 | */ |
| 708 | static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring) |
| 709 | { |
| 710 | struct ice_vsi *vsi = ring->vsi; |
| 711 | u16 qid; |
| 712 | |
| 713 | qid = ring->q_index - vsi->num_xdp_txq; |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 714 | |
Maciej Fijalkowski | e102db7 | 2021-04-27 21:52:09 +0200 | [diff] [blame] | 715 | if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 716 | return NULL; |
| 717 | |
Maciej Fijalkowski | e102db7 | 2021-04-27 21:52:09 +0200 | [diff] [blame] | 718 | return xsk_get_pool_from_qid(vsi->netdev, qid); |
Krzysztof Kazimierczak | 2d4238f | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 719 | } |
| 720 | |
| 721 | /** |
Anirudh Venkataramanan | 208ff75 | 2019-08-08 07:39:33 -0700 | [diff] [blame] | 722 | * ice_get_main_vsi - Get the PF VSI |
| 723 | * @pf: PF instance |
| 724 | * |
| 725 | * returns pf->vsi[0], which by definition is the PF VSI |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 726 | */ |
Anirudh Venkataramanan | 208ff75 | 2019-08-08 07:39:33 -0700 | [diff] [blame] | 727 | static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 728 | { |
Anirudh Venkataramanan | 208ff75 | 2019-08-08 07:39:33 -0700 | [diff] [blame] | 729 | if (pf->vsi) |
| 730 | return pf->vsi[0]; |
Brett Creeley | c2a23e0 | 2019-02-28 15:26:01 -0800 | [diff] [blame] | 731 | |
| 732 | return NULL; |
| 733 | } |
| 734 | |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 735 | /** |
Wojciech Drewek | 7aae80c | 2021-08-19 17:08:59 -0700 | [diff] [blame] | 736 | * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. |
| 737 | * @np: private netdev structure |
| 738 | */ |
| 739 | static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) |
| 740 | { |
| 741 | /* In case of port representor return source port VSI. */ |
| 742 | if (np->repr) |
| 743 | return np->repr->src_vsi; |
| 744 | else |
| 745 | return np->vsi; |
| 746 | } |
| 747 | |
| 748 | /** |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 749 | * ice_get_ctrl_vsi - Get the control VSI |
| 750 | * @pf: PF instance |
| 751 | */ |
| 752 | static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) |
| 753 | { |
| 754 | /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ |
| 755 | if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) |
| 756 | return NULL; |
| 757 | |
| 758 | return pf->vsi[pf->ctrl_vsi_idx]; |
| 759 | } |
| 760 | |
Dave Ertman | df006dd | 2020-11-20 16:39:26 -0800 | [diff] [blame] | 761 | /** |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 762 | * ice_is_switchdev_running - check if switchdev is configured |
| 763 | * @pf: pointer to PF structure |
| 764 | * |
| 765 | * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV |
| 766 | * and switchdev is configured, false otherwise. |
| 767 | */ |
| 768 | static inline bool ice_is_switchdev_running(struct ice_pf *pf) |
| 769 | { |
| 770 | return pf->switchdev.is_running; |
| 771 | } |
| 772 | |
| 773 | /** |
Dave Ertman | df006dd | 2020-11-20 16:39:26 -0800 | [diff] [blame] | 774 | * ice_set_sriov_cap - enable SRIOV in PF flags |
| 775 | * @pf: PF struct |
| 776 | */ |
| 777 | static inline void ice_set_sriov_cap(struct ice_pf *pf) |
| 778 | { |
| 779 | if (pf->hw.func_caps.common_cap.sr_iov_1_1) |
| 780 | set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
| 781 | } |
| 782 | |
| 783 | /** |
| 784 | * ice_clear_sriov_cap - disable SRIOV in PF flags |
| 785 | * @pf: PF struct |
| 786 | */ |
| 787 | static inline void ice_clear_sriov_cap(struct ice_pf *pf) |
| 788 | { |
| 789 | clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
| 790 | } |
| 791 | |
Henry Tieman | 4ab9564 | 2020-05-11 18:01:41 -0700 | [diff] [blame] | 792 | #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 |
| 793 | #define ICE_FD_STAT_PF_IDX(base_idx) \ |
| 794 | ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) |
| 795 | #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 796 | #define ICE_FD_STAT_CH 1 |
| 797 | #define ICE_FD_CH_STAT_IDX(base_idx) \ |
| 798 | (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) |
Henry Tieman | 4ab9564 | 2020-05-11 18:01:41 -0700 | [diff] [blame] | 799 | |
Kiran Patil | 0754d65 | 2021-10-15 16:35:15 -0700 | [diff] [blame] | 800 | /** |
| 801 | * ice_is_adq_active - any active ADQs |
| 802 | * @pf: pointer to PF |
| 803 | * |
| 804 | * This function returns true if there are any ADQs configured (which is |
| 805 | * determined by looking at VSI type (which should be VSI_PF), numtc, and |
| 806 | * TC_MQPRIO flag) otherwise return false |
| 807 | */ |
| 808 | static inline bool ice_is_adq_active(struct ice_pf *pf) |
| 809 | { |
| 810 | struct ice_vsi *vsi; |
| 811 | |
| 812 | vsi = ice_get_main_vsi(pf); |
| 813 | if (!vsi) |
| 814 | return false; |
| 815 | |
| 816 | /* is ADQ configured */ |
| 817 | if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && |
| 818 | test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) |
| 819 | return true; |
| 820 | |
| 821 | return false; |
| 822 | } |
| 823 | |
Dave Ertman | df006dd | 2020-11-20 16:39:26 -0800 | [diff] [blame] | 824 | bool netif_is_ice(struct net_device *dev); |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 825 | int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); |
| 826 | int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 827 | int ice_vsi_open_ctrl(struct ice_vsi *vsi); |
Grzegorz Nitka | 1a1c40d | 2021-08-19 17:08:54 -0700 | [diff] [blame] | 828 | int ice_vsi_open(struct ice_vsi *vsi); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 829 | void ice_set_ethtool_ops(struct net_device *netdev); |
Wojciech Drewek | 7aae80c | 2021-08-19 17:08:59 -0700 | [diff] [blame] | 830 | void ice_set_ethtool_repr_ops(struct net_device *netdev); |
Tony Nguyen | 462acf6 | 2019-09-09 06:47:46 -0700 | [diff] [blame] | 831 | void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); |
Anirudh Venkataramanan | 8c24370 | 2019-09-03 01:31:06 -0700 | [diff] [blame] | 832 | u16 ice_get_avail_txq_count(struct ice_pf *pf); |
| 833 | u16 ice_get_avail_rxq_count(struct ice_pf *pf); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 834 | int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); |
Bruce Allan | 5a4a867 | 2019-07-25 02:53:50 -0700 | [diff] [blame] | 835 | void ice_update_vsi_stats(struct ice_vsi *vsi); |
| 836 | void ice_update_pf_stats(struct ice_pf *pf); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 837 | int ice_up(struct ice_vsi *vsi); |
| 838 | int ice_down(struct ice_vsi *vsi); |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 839 | int ice_vsi_cfg(struct ice_vsi *vsi); |
| 840 | struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); |
Maciej Fijalkowski | 22bf877 | 2021-08-19 14:00:03 +0200 | [diff] [blame] | 841 | int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); |
Maciej Fijalkowski | efc2214 | 2019-11-04 09:38:56 -0800 | [diff] [blame] | 842 | int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); |
| 843 | int ice_destroy_xdp_rings(struct ice_vsi *vsi); |
| 844 | int |
| 845 | ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
| 846 | u32 flags); |
Brett Creeley | b66a972 | 2021-03-02 10:15:36 -0800 | [diff] [blame] | 847 | int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); |
| 848 | int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); |
| 849 | int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); |
| 850 | int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 851 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); |
Henry Tieman | 87324e7 | 2019-11-08 06:23:29 -0800 | [diff] [blame] | 852 | int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); |
Anirudh Venkataramanan | fcea6f3 | 2018-03-20 07:58:16 -0700 | [diff] [blame] | 853 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup); |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 854 | int ice_plug_aux_dev(struct ice_pf *pf); |
| 855 | void ice_unplug_aux_dev(struct ice_pf *pf); |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 856 | int ice_init_rdma(struct ice_pf *pf); |
Lihong Yang | 0fee357 | 2020-05-07 17:41:04 -0700 | [diff] [blame] | 857 | const char *ice_aq_str(enum ice_aq_err aq_err); |
Anirudh Venkataramanan | 3176551 | 2021-02-26 13:19:30 -0800 | [diff] [blame] | 858 | bool ice_is_wol_supported(struct ice_hw *hw); |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 859 | void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 860 | int |
| 861 | ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, |
| 862 | bool is_tun); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 863 | void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); |
Henry Tieman | cac2a27 | 2020-05-11 18:01:42 -0700 | [diff] [blame] | 864 | int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); |
| 865 | int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); |
Henry Tieman | 4ab9564 | 2020-05-11 18:01:41 -0700 | [diff] [blame] | 866 | int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); |
| 867 | int |
| 868 | ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, |
| 869 | u32 *rule_locs); |
Kiran Patil | 4031979 | 2021-12-29 10:54:33 -0800 | [diff] [blame] | 870 | void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 871 | void ice_fdir_release_flows(struct ice_hw *hw); |
Henry Tieman | 83af003 | 2020-05-11 18:01:45 -0700 | [diff] [blame] | 872 | void ice_fdir_replay_flows(struct ice_hw *hw); |
| 873 | void ice_fdir_replay_fltrs(struct ice_pf *pf); |
Henry Tieman | 148beb6 | 2020-05-11 18:01:40 -0700 | [diff] [blame] | 874 | int ice_fdir_create_dflt_rules(struct ice_pf *pf); |
Jacob Keller | d69ea41 | 2020-07-23 17:22:03 -0700 | [diff] [blame] | 875 | int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, |
| 876 | struct ice_rq_event_info *event); |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 877 | int ice_open(struct net_device *netdev); |
Krzysztof Goreczny | e95fc85 | 2021-02-26 13:19:26 -0800 | [diff] [blame] | 878 | int ice_open_internal(struct net_device *netdev); |
Anirudh Venkataramanan | 0e674ae | 2019-04-16 10:30:43 -0700 | [diff] [blame] | 879 | int ice_stop(struct net_device *netdev); |
Brett Creeley | 28bf267 | 2020-05-11 18:01:46 -0700 | [diff] [blame] | 880 | void ice_service_task_schedule(struct ice_pf *pf); |
Anirudh Venkataramanan | d76a60b | 2018-03-20 07:58:15 -0700 | [diff] [blame] | 881 | |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 882 | /** |
| 883 | * ice_set_rdma_cap - enable RDMA support |
| 884 | * @pf: PF struct |
| 885 | */ |
| 886 | static inline void ice_set_rdma_cap(struct ice_pf *pf) |
| 887 | { |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 888 | if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 889 | set_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
Dave Ertman | bfe8443 | 2021-09-09 08:12:23 -0700 | [diff] [blame] | 890 | set_bit(ICE_FLAG_AUX_ENA, pf->flags); |
Dave Ertman | 5dbbbd0 | 2022-01-20 16:27:56 -0800 | [diff] [blame] | 891 | set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 892 | } |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 893 | } |
| 894 | |
| 895 | /** |
| 896 | * ice_clear_rdma_cap - disable RDMA support |
| 897 | * @pf: PF struct |
| 898 | */ |
| 899 | static inline void ice_clear_rdma_cap(struct ice_pf *pf) |
| 900 | { |
Dave Ertman | f9f5301 | 2021-05-20 09:37:51 -0500 | [diff] [blame] | 901 | ice_unplug_aux_dev(pf); |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 902 | clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
Dave Ertman | bfe8443 | 2021-09-09 08:12:23 -0700 | [diff] [blame] | 903 | clear_bit(ICE_FLAG_AUX_ENA, pf->flags); |
Dave Ertman | d25a0fc | 2021-05-20 09:37:49 -0500 | [diff] [blame] | 904 | } |
Anirudh Venkataramanan | 837f08f | 2018-03-20 07:58:05 -0700 | [diff] [blame] | 905 | #endif /* _ICE_H_ */ |