Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 2 | /**************************************************************************** |
Ben Hutchings | f7a6d2c | 2013-08-29 23:32:48 +0100 | [diff] [blame] | 3 | * Driver for Solarflare network controllers and boards |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 4 | * Copyright 2005-2006 Fen Systems Ltd. |
Ben Hutchings | f7a6d2c | 2013-08-29 23:32:48 +0100 | [diff] [blame] | 5 | * Copyright 2006-2013 Solarflare Communications Inc. |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #ifndef EFX_EFX_H |
| 9 | #define EFX_EFX_H |
| 10 | |
Edward Cree | 51b35a4 | 2020-07-27 12:55:55 +0100 | [diff] [blame] | 11 | #include <linux/indirect_call_wrapper.h> |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 12 | #include "net_driver.h" |
Edward Cree | 51b35a4 | 2020-07-27 12:55:55 +0100 | [diff] [blame] | 13 | #include "ef100_rx.h" |
| 14 | #include "ef100_tx.h" |
Ben Hutchings | 64eebcf | 2010-09-20 08:43:07 +0000 | [diff] [blame] | 15 | #include "filter.h" |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 16 | |
Shradha Shah | e340be9 | 2015-05-20 11:11:03 +0100 | [diff] [blame] | 17 | int efx_net_open(struct net_device *net_dev); |
| 18 | int efx_net_stop(struct net_device *net_dev); |
| 19 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 20 | /* TX */ |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 21 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 22 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
| 23 | struct net_device *net_dev); |
Edward Cree | 51b35a4 | 2020-07-27 12:55:55 +0100 | [diff] [blame] | 24 | netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); |
| 25 | static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
| 26 | { |
| 27 | return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, |
| 28 | ef100_enqueue_skb, __efx_enqueue_skb, |
| 29 | tx_queue, skb); |
| 30 | } |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 31 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); |
Tom Zhao | 3b4f06c | 2020-03-05 11:38:45 +0000 | [diff] [blame] | 32 | void efx_xmit_done_single(struct efx_tx_queue *tx_queue); |
Jiri Pirko | 2572ac5 | 2017-08-07 10:15:17 +0200 | [diff] [blame] | 33 | int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, |
Jiri Pirko | de4784c | 2017-08-07 10:15:32 +0200 | [diff] [blame] | 34 | void *type_data); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 35 | extern unsigned int efx_piobuf_size; |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 36 | |
| 37 | /* RX */ |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 38 | void __efx_rx_packet(struct efx_channel *channel); |
| 39 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
| 40 | unsigned int n_frags, unsigned int len, u16 flags); |
Ben Hutchings | ff734ef | 2013-01-29 23:33:14 +0000 | [diff] [blame] | 41 | static inline void efx_rx_flush_packet(struct efx_channel *channel) |
| 42 | { |
Ben Hutchings | 85740cdf | 2013-01-29 23:33:15 +0000 | [diff] [blame] | 43 | if (channel->rx_pkt_n_frags) |
Edward Cree | 51b35a4 | 2020-07-27 12:55:55 +0100 | [diff] [blame] | 44 | INDIRECT_CALL_2(channel->efx->type->rx_packet, |
| 45 | __ef100_rx_packet, __efx_rx_packet, |
| 46 | channel); |
Ben Hutchings | ff734ef | 2013-01-29 23:33:14 +0000 | [diff] [blame] | 47 | } |
Edward Cree | 0688854 | 2020-08-14 13:26:22 +0100 | [diff] [blame] | 48 | static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) |
| 49 | { |
| 50 | if (efx->type->rx_buf_hash_valid) |
| 51 | return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, |
| 52 | ef100_rx_buf_hash_valid, |
| 53 | prefix); |
| 54 | return true; |
| 55 | } |
Steve Hodgson | ecc910f | 2010-09-10 06:42:22 +0000 | [diff] [blame] | 56 | |
Ben Hutchings | 7e6d06f | 2012-07-30 15:57:44 +0000 | [diff] [blame] | 57 | /* Maximum number of TCP segments we support for soft-TSO */ |
| 58 | #define EFX_TSO_MAX_SEGS 100 |
| 59 | |
| 60 | /* The smallest [rt]xq_entries that the driver supports. RX minimum |
| 61 | * is a bit arbitrary. For TX, we must have space for at least 2 |
| 62 | * TSO skbs. |
| 63 | */ |
| 64 | #define EFX_RXQ_MIN_ENT 128U |
| 65 | #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) |
Ben Hutchings | 4642610 | 2010-09-10 06:42:33 +0000 | [diff] [blame] | 66 | |
Bert Kenward | a53d26e | 2017-04-25 13:44:54 +0100 | [diff] [blame] | 67 | /* All EF10 architecture NICs steal one bit of the DMAQ size for various |
| 68 | * other purposes when counting TxQ entries, so we halve the queue size. |
| 69 | */ |
| 70 | #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ |
Ben Hutchings | d9317ae | 2014-01-23 14:35:48 +0000 | [diff] [blame] | 71 | EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) |
| 72 | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 73 | static inline bool efx_rss_enabled(struct efx_nic *efx) |
| 74 | { |
| 75 | return efx->rss_spread > 1; |
| 76 | } |
| 77 | |
Ben Hutchings | 64eebcf | 2010-09-20 08:43:07 +0000 | [diff] [blame] | 78 | /* Filters */ |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 79 | |
| 80 | /** |
| 81 | * efx_filter_insert_filter - add or replace a filter |
| 82 | * @efx: NIC in which to insert the filter |
| 83 | * @spec: Specification for the filter |
| 84 | * @replace_equal: Flag for whether the specified filter may replace an |
| 85 | * existing filter with equal priority |
| 86 | * |
| 87 | * On success, return the filter ID. |
| 88 | * On failure, return a negative error code. |
| 89 | * |
Ben Hutchings | b883d0b | 2013-01-15 22:00:07 +0000 | [diff] [blame] | 90 | * If existing filters have equal match values to the new filter spec, |
| 91 | * then the new filter might replace them or the function might fail, |
| 92 | * as follows. |
| 93 | * |
| 94 | * 1. If the existing filters have lower priority, or @replace_equal |
| 95 | * is set and they have equal priority, replace them. |
| 96 | * |
| 97 | * 2. If the existing filters have higher priority, return -%EPERM. |
| 98 | * |
| 99 | * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not |
| 100 | * support delivery to multiple recipients, return -%EEXIST. |
| 101 | * |
| 102 | * This implies that filters for multiple multicast recipients must |
| 103 | * all be inserted with the same priority and @replace_equal = %false. |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 104 | */ |
| 105 | static inline s32 efx_filter_insert_filter(struct efx_nic *efx, |
| 106 | struct efx_filter_spec *spec, |
| 107 | bool replace_equal) |
| 108 | { |
| 109 | return efx->type->filter_insert(efx, spec, replace_equal); |
| 110 | } |
| 111 | |
| 112 | /** |
| 113 | * efx_filter_remove_id_safe - remove a filter by ID, carefully |
| 114 | * @efx: NIC from which to remove the filter |
| 115 | * @priority: Priority of filter, as passed to @efx_filter_insert_filter |
| 116 | * @filter_id: ID of filter, as returned by @efx_filter_insert_filter |
| 117 | * |
| 118 | * This function will range-check @filter_id, so it is safe to call |
| 119 | * with a value passed from userland. |
| 120 | */ |
| 121 | static inline int efx_filter_remove_id_safe(struct efx_nic *efx, |
| 122 | enum efx_filter_priority priority, |
| 123 | u32 filter_id) |
| 124 | { |
| 125 | return efx->type->filter_remove_safe(efx, priority, filter_id); |
| 126 | } |
| 127 | |
| 128 | /** |
| 129 | * efx_filter_get_filter_safe - retrieve a filter by ID, carefully |
| 130 | * @efx: NIC from which to remove the filter |
| 131 | * @priority: Priority of filter, as passed to @efx_filter_insert_filter |
| 132 | * @filter_id: ID of filter, as returned by @efx_filter_insert_filter |
| 133 | * @spec: Buffer in which to store filter specification |
| 134 | * |
| 135 | * This function will range-check @filter_id, so it is safe to call |
| 136 | * with a value passed from userland. |
| 137 | */ |
| 138 | static inline int |
| 139 | efx_filter_get_filter_safe(struct efx_nic *efx, |
| 140 | enum efx_filter_priority priority, |
| 141 | u32 filter_id, struct efx_filter_spec *spec) |
| 142 | { |
| 143 | return efx->type->filter_get_safe(efx, priority, filter_id, spec); |
| 144 | } |
| 145 | |
Ben Hutchings | add7247 | 2012-11-08 01:46:53 +0000 | [diff] [blame] | 146 | static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, |
| 147 | enum efx_filter_priority priority) |
| 148 | { |
| 149 | return efx->type->filter_count_rx_used(efx, priority); |
| 150 | } |
| 151 | static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) |
| 152 | { |
| 153 | return efx->type->filter_get_rx_id_limit(efx); |
| 154 | } |
| 155 | static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, |
| 156 | enum efx_filter_priority priority, |
| 157 | u32 *buf, u32 size) |
| 158 | { |
| 159 | return efx->type->filter_get_rx_ids(efx, priority, buf, size); |
| 160 | } |
Edward Cree | f8d6203 | 2018-04-24 17:09:30 +0100 | [diff] [blame] | 161 | |
Edward Cree | 42356d9 | 2018-03-08 15:45:17 +0000 | [diff] [blame] | 162 | /* RSS contexts */ |
Edward Cree | 42356d9 | 2018-03-08 15:45:17 +0000 | [diff] [blame] | 163 | static inline bool efx_rss_active(struct efx_rss_context *ctx) |
| 164 | { |
Alex Maftei (amaftei) | f7226e0 | 2020-01-10 13:28:45 +0000 | [diff] [blame] | 165 | return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; |
Edward Cree | 42356d9 | 2018-03-08 15:45:17 +0000 | [diff] [blame] | 166 | } |
| 167 | |
Ben Hutchings | f5e7adc | 2009-11-23 16:07:30 +0000 | [diff] [blame] | 168 | /* Ethtool support */ |
Ben Hutchings | f5e7adc | 2009-11-23 16:07:30 +0000 | [diff] [blame] | 169 | extern const struct ethtool_ops efx_ethtool_ops; |
| 170 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 171 | /* Global */ |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 172 | unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); |
| 173 | unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 174 | int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, |
| 175 | unsigned int rx_usecs, bool rx_adaptive, |
| 176 | bool rx_may_override_tx); |
| 177 | void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, |
| 178 | unsigned int *rx_usecs, bool *rx_adaptive); |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 179 | |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 180 | /* Update the generic software stats in the passed stats array */ |
| 181 | void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); |
| 182 | |
Ben Hutchings | f415072 | 2008-11-04 20:34:28 +0000 | [diff] [blame] | 183 | /* MTD */ |
| 184 | #ifdef CONFIG_SFC_MTD |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 185 | int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, |
| 186 | size_t n_parts, size_t sizeof_part); |
Ben Hutchings | 45a3fd5 | 2012-11-28 04:38:14 +0000 | [diff] [blame] | 187 | static inline int efx_mtd_probe(struct efx_nic *efx) |
| 188 | { |
| 189 | return efx->type->mtd_probe(efx); |
| 190 | } |
Joe Perches | 00aef98 | 2013-09-23 11:37:59 -0700 | [diff] [blame] | 191 | void efx_mtd_rename(struct efx_nic *efx); |
| 192 | void efx_mtd_remove(struct efx_nic *efx); |
Ben Hutchings | f415072 | 2008-11-04 20:34:28 +0000 | [diff] [blame] | 193 | #else |
| 194 | static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } |
| 195 | static inline void efx_mtd_rename(struct efx_nic *efx) {} |
| 196 | static inline void efx_mtd_remove(struct efx_nic *efx) {} |
| 197 | #endif |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 198 | |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 199 | #ifdef CONFIG_SFC_SRIOV |
| 200 | static inline unsigned int efx_vf_size(struct efx_nic *efx) |
| 201 | { |
| 202 | return 1 << efx->vi_scale; |
| 203 | } |
| 204 | #endif |
| 205 | |
Daniel Pieczko | c2f3b8e | 2012-10-17 13:21:23 +0100 | [diff] [blame] | 206 | static inline void efx_device_detach_sync(struct efx_nic *efx) |
| 207 | { |
| 208 | struct net_device *dev = efx->net_dev; |
| 209 | |
| 210 | /* Lock/freeze all TX queues so that we can be sure the |
| 211 | * TX scheduler is stopped when we're done and before |
| 212 | * netif_device_present() becomes false. |
| 213 | */ |
Ben Hutchings | 35205b2 | 2013-03-05 01:03:47 +0000 | [diff] [blame] | 214 | netif_tx_lock_bh(dev); |
Daniel Pieczko | c2f3b8e | 2012-10-17 13:21:23 +0100 | [diff] [blame] | 215 | netif_device_detach(dev); |
Ben Hutchings | 35205b2 | 2013-03-05 01:03:47 +0000 | [diff] [blame] | 216 | netif_tx_unlock_bh(dev); |
Daniel Pieczko | c2f3b8e | 2012-10-17 13:21:23 +0100 | [diff] [blame] | 217 | } |
| 218 | |
Peter Dunning | 9c568fd | 2017-02-17 15:50:43 +0000 | [diff] [blame] | 219 | static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) |
| 220 | { |
| 221 | if ((efx->state != STATE_DISABLED) && !efx->reset_pending) |
| 222 | netif_device_attach(efx->net_dev); |
| 223 | } |
| 224 | |
Edward Cree | dd98708 | 2016-06-15 17:43:43 +0100 | [diff] [blame] | 225 | static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) |
| 226 | { |
| 227 | if (WARN_ON(down_read_trylock(sem))) { |
| 228 | up_read(sem); |
| 229 | return false; |
| 230 | } |
| 231 | return true; |
| 232 | } |
| 233 | |
Charles McLachlan | dfe44c1 | 2019-10-31 10:24:12 +0000 | [diff] [blame] | 234 | int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, |
| 235 | bool flush); |
| 236 | |
Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 237 | #endif /* EFX_EFX_H */ |