Thomas Gleixner | d5bb994 | 2019-05-23 11:14:51 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2 | /**************************************************************************/ |
| 3 | /* */ |
| 4 | /* IBM System i and System p Virtual NIC Device Driver */ |
| 5 | /* Copyright (C) 2014 IBM Corp. */ |
| 6 | /* Santiago Leon (santi_leon@yahoo.com) */ |
| 7 | /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ |
| 8 | /* John Allen (jallen@linux.vnet.ibm.com) */ |
| 9 | /* */ |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 10 | /* */ |
| 11 | /* This module contains the implementation of a virtual ethernet device */ |
| 12 | /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ |
| 13 | /* option of the RS/6000 Platform Architecture to interface with virtual */ |
| 14 | /* ethernet NICs that are presented to the partition by the hypervisor. */ |
| 15 | /* */ |
| 16 | /* Messages are passed between the VNIC driver and the VNIC server using */ |
| 17 | /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ |
| 18 | /* issue and receive commands that initiate communication with the server */ |
| 19 | /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ |
| 20 | /* are used by the driver to notify the server that a packet is */ |
| 21 | /* ready for transmission or that a buffer has been added to receive a */ |
| 22 | /* packet. Subsequently, sCRQs are used by the server to notify the */ |
| 23 | /* driver that a packet transmission has been completed or that a packet */ |
| 24 | /* has been received and placed in a waiting buffer. */ |
| 25 | /* */ |
| 26 | /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ |
| 27 | /* which skbs are DMA mapped and immediately unmapped when the transmit */ |
| 28 | /* or receive has been completed, the VNIC driver is required to use */ |
| 29 | /* "long term mapping". This entails that large, continuous DMA mapped */ |
| 30 | /* buffers are allocated on driver initialization and these buffers are */ |
| 31 | /* then continuously reused to pass skbs to and from the VNIC server. */ |
| 32 | /* */ |
| 33 | /**************************************************************************/ |
| 34 | |
| 35 | #include <linux/module.h> |
| 36 | #include <linux/moduleparam.h> |
| 37 | #include <linux/types.h> |
| 38 | #include <linux/errno.h> |
| 39 | #include <linux/completion.h> |
| 40 | #include <linux/ioport.h> |
| 41 | #include <linux/dma-mapping.h> |
| 42 | #include <linux/kernel.h> |
| 43 | #include <linux/netdevice.h> |
| 44 | #include <linux/etherdevice.h> |
| 45 | #include <linux/skbuff.h> |
| 46 | #include <linux/init.h> |
| 47 | #include <linux/delay.h> |
| 48 | #include <linux/mm.h> |
| 49 | #include <linux/ethtool.h> |
| 50 | #include <linux/proc_fs.h> |
Thomas Falcon | 4eb50ce | 2017-12-18 12:52:40 -0600 | [diff] [blame] | 51 | #include <linux/if_arp.h> |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 52 | #include <linux/in.h> |
| 53 | #include <linux/ip.h> |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 54 | #include <linux/ipv6.h> |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 55 | #include <linux/irq.h> |
| 56 | #include <linux/kthread.h> |
| 57 | #include <linux/seq_file.h> |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 58 | #include <linux/interrupt.h> |
| 59 | #include <net/net_namespace.h> |
| 60 | #include <asm/hvcall.h> |
| 61 | #include <linux/atomic.h> |
| 62 | #include <asm/vio.h> |
| 63 | #include <asm/iommu.h> |
| 64 | #include <linux/uaccess.h> |
| 65 | #include <asm/firmware.h> |
Thomas Falcon | 65dc689 | 2016-07-06 15:35:18 -0500 | [diff] [blame] | 66 | #include <linux/workqueue.h> |
Murilo Fossa Vicentini | 6052d5e | 2017-04-21 15:38:46 -0400 | [diff] [blame] | 67 | #include <linux/if_vlan.h> |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 68 | #include <linux/utsname.h> |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 69 | |
| 70 | #include "ibmvnic.h" |
| 71 | |
| 72 | static const char ibmvnic_driver_name[] = "ibmvnic"; |
| 73 | static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; |
| 74 | |
Thomas Falcon | 78b07ac | 2017-06-01 15:32:34 -0500 | [diff] [blame] | 75 | MODULE_AUTHOR("Santiago Leon"); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 76 | MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); |
| 77 | MODULE_LICENSE("GPL"); |
| 78 | MODULE_VERSION(IBMVNIC_DRIVER_VERSION); |
| 79 | |
| 80 | static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 81 | static void release_sub_crqs(struct ibmvnic_adapter *, bool); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 82 | static int ibmvnic_reset_crq(struct ibmvnic_adapter *); |
| 83 | static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); |
| 84 | static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); |
| 85 | static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 86 | static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 87 | static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); |
| 88 | static int enable_scrq_irq(struct ibmvnic_adapter *, |
| 89 | struct ibmvnic_sub_crq_queue *); |
| 90 | static int disable_scrq_irq(struct ibmvnic_adapter *, |
| 91 | struct ibmvnic_sub_crq_queue *); |
| 92 | static int pending_scrq(struct ibmvnic_adapter *, |
| 93 | struct ibmvnic_sub_crq_queue *); |
| 94 | static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, |
| 95 | struct ibmvnic_sub_crq_queue *); |
| 96 | static int ibmvnic_poll(struct napi_struct *napi, int data); |
Lijun Pan | 69980d0 | 2020-09-27 20:13:28 -0500 | [diff] [blame] | 97 | static void send_query_map(struct ibmvnic_adapter *adapter); |
Lijun Pan | 673ead2 | 2021-06-14 00:20:45 -0500 | [diff] [blame] | 98 | static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 99 | static int send_request_unmap(struct ibmvnic_adapter *, u8); |
Thomas Falcon | 20a8ab7 | 2018-02-26 18:10:59 -0600 | [diff] [blame] | 100 | static int send_login(struct ibmvnic_adapter *adapter); |
Lijun Pan | 491099a | 2020-09-27 20:13:26 -0500 | [diff] [blame] | 101 | static void send_query_cap(struct ibmvnic_adapter *adapter); |
Thomas Falcon | 4d96f12 | 2017-08-01 15:04:36 -0500 | [diff] [blame] | 102 | static int init_sub_crqs(struct ibmvnic_adapter *); |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 103 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 104 | static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 105 | static void release_crq_queue(struct ibmvnic_adapter *); |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 106 | static int __ibmvnic_set_mac(struct net_device *, u8 *); |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 107 | static int init_crq_queue(struct ibmvnic_adapter *adapter); |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 108 | static int send_query_phys_parms(struct ibmvnic_adapter *adapter); |
Sukadev Bhattiprolu | 65d6470 | 2021-06-23 21:13:12 -0700 | [diff] [blame] | 109 | static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, |
| 110 | struct ibmvnic_sub_crq_queue *tx_scrq); |
Sukadev Bhattiprolu | f8ac0bf | 2021-09-14 20:52:57 -0700 | [diff] [blame] | 111 | static void free_long_term_buff(struct ibmvnic_adapter *adapter, |
| 112 | struct ibmvnic_long_term_buff *ltb); |
Sukadev Bhattiprolu | 61772b0 | 2022-02-07 16:19:18 -0800 | [diff] [blame] | 113 | static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 114 | |
| 115 | struct ibmvnic_stat { |
| 116 | char name[ETH_GSTRING_LEN]; |
| 117 | int offset; |
| 118 | }; |
| 119 | |
| 120 | #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ |
| 121 | offsetof(struct ibmvnic_statistics, stat)) |
Lijun Pan | 91dc5d2 | 2021-02-11 00:43:22 -0600 | [diff] [blame] | 122 | #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 123 | |
| 124 | static const struct ibmvnic_stat ibmvnic_stats[] = { |
| 125 | {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, |
| 126 | {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, |
| 127 | {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, |
| 128 | {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, |
| 129 | {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, |
| 130 | {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, |
| 131 | {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, |
| 132 | {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, |
| 133 | {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, |
| 134 | {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, |
| 135 | {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, |
| 136 | {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, |
| 137 | {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, |
| 138 | {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, |
| 139 | {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, |
| 140 | {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, |
| 141 | {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, |
| 142 | {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, |
| 143 | {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, |
| 144 | {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, |
| 145 | {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, |
| 146 | {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, |
| 147 | }; |
| 148 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 149 | static int send_crq_init_complete(struct ibmvnic_adapter *adapter) |
| 150 | { |
| 151 | union ibmvnic_crq crq; |
| 152 | |
| 153 | memset(&crq, 0, sizeof(crq)); |
| 154 | crq.generic.first = IBMVNIC_CRQ_INIT_CMD; |
| 155 | crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; |
| 156 | |
| 157 | return ibmvnic_send_crq(adapter, &crq); |
| 158 | } |
| 159 | |
| 160 | static int send_version_xchg(struct ibmvnic_adapter *adapter) |
| 161 | { |
| 162 | union ibmvnic_crq crq; |
| 163 | |
| 164 | memset(&crq, 0, sizeof(crq)); |
| 165 | crq.version_exchange.first = IBMVNIC_CRQ_CMD; |
| 166 | crq.version_exchange.cmd = VERSION_EXCHANGE; |
| 167 | crq.version_exchange.version = cpu_to_be16(ibmvnic_version); |
| 168 | |
| 169 | return ibmvnic_send_crq(adapter, &crq); |
| 170 | } |
| 171 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 172 | static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, |
| 173 | unsigned long length, unsigned long *number, |
| 174 | unsigned long *irq) |
| 175 | { |
| 176 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 177 | long rc; |
| 178 | |
| 179 | rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); |
| 180 | *number = retbuf[0]; |
| 181 | *irq = retbuf[1]; |
| 182 | |
| 183 | return rc; |
| 184 | } |
| 185 | |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 186 | /** |
| 187 | * ibmvnic_wait_for_completion - Check device state and wait for completion |
| 188 | * @adapter: private device data |
| 189 | * @comp_done: completion structure to wait for |
| 190 | * @timeout: time to wait in milliseconds |
| 191 | * |
| 192 | * Wait for a completion signal or until the timeout limit is reached |
| 193 | * while checking that the device is still active. |
| 194 | */ |
| 195 | static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, |
| 196 | struct completion *comp_done, |
| 197 | unsigned long timeout) |
| 198 | { |
| 199 | struct net_device *netdev; |
| 200 | unsigned long div_timeout; |
| 201 | u8 retry; |
| 202 | |
| 203 | netdev = adapter->netdev; |
| 204 | retry = 5; |
| 205 | div_timeout = msecs_to_jiffies(timeout / retry); |
| 206 | while (true) { |
| 207 | if (!adapter->crq.active) { |
| 208 | netdev_err(netdev, "Device down!\n"); |
| 209 | return -ENODEV; |
| 210 | } |
Thomas Falcon | 8f9cc1e | 2019-12-11 09:38:39 -0600 | [diff] [blame] | 211 | if (!retry--) |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 212 | break; |
| 213 | if (wait_for_completion_timeout(comp_done, div_timeout)) |
| 214 | return 0; |
| 215 | } |
| 216 | netdev_err(netdev, "Operation timed out.\n"); |
| 217 | return -ETIMEDOUT; |
| 218 | } |
| 219 | |
Sukadev Bhattiprolu | f8ac0bf | 2021-09-14 20:52:57 -0700 | [diff] [blame] | 220 | /** |
| 221 | * reuse_ltb() - Check if a long term buffer can be reused |
| 222 | * @ltb: The long term buffer to be checked |
| 223 | * @size: The size of the long term buffer. |
| 224 | * |
| 225 | * An LTB can be reused unless its size has changed. |
| 226 | * |
| 227 | * Return: Return true if the LTB can be reused, false otherwise. |
| 228 | */ |
| 229 | static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) |
| 230 | { |
| 231 | return (ltb->buff && ltb->size == size); |
| 232 | } |
| 233 | |
| 234 | /** |
| 235 | * alloc_long_term_buff() - Allocate a long term buffer (LTB) |
| 236 | * |
| 237 | * @adapter: ibmvnic adapter associated to the LTB |
| 238 | * @ltb: container object for the LTB |
| 239 | * @size: size of the LTB |
| 240 | * |
| 241 | * Allocate an LTB of the specified size and notify VIOS. |
| 242 | * |
| 243 | * If the given @ltb already has the correct size, reuse it. Otherwise if |
| 244 | * its non-NULL, free it. Then allocate a new one of the correct size. |
| 245 | * Notify the VIOS either way since we may now be working with a new VIOS. |
| 246 | * |
| 247 | * Allocating larger chunks of memory during resets, specially LPM or under |
| 248 | * low memory situations can cause resets to fail/timeout and for LPAR to |
| 249 | * lose connectivity. So hold onto the LTB even if we fail to communicate |
| 250 | * with the VIOS and reuse it on next open. Free LTB when adapter is closed. |
| 251 | * |
| 252 | * Return: 0 if we were able to allocate the LTB and notify the VIOS and |
| 253 | * a negative value otherwise. |
| 254 | */ |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 255 | static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, |
| 256 | struct ibmvnic_long_term_buff *ltb, int size) |
| 257 | { |
| 258 | struct device *dev = &adapter->vdev->dev; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 259 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 260 | |
Sukadev Bhattiprolu | f8ac0bf | 2021-09-14 20:52:57 -0700 | [diff] [blame] | 261 | if (!reuse_ltb(ltb, size)) { |
| 262 | dev_dbg(dev, |
| 263 | "LTB size changed from 0x%llx to 0x%x, reallocating\n", |
| 264 | ltb->size, size); |
| 265 | free_long_term_buff(adapter, ltb); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 266 | } |
Sukadev Bhattiprolu | f8ac0bf | 2021-09-14 20:52:57 -0700 | [diff] [blame] | 267 | |
| 268 | if (ltb->buff) { |
| 269 | dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", |
| 270 | ltb->map_id, ltb->size); |
| 271 | } else { |
| 272 | ltb->buff = dma_alloc_coherent(dev, size, <b->addr, |
| 273 | GFP_KERNEL); |
| 274 | if (!ltb->buff) { |
| 275 | dev_err(dev, "Couldn't alloc long term buffer\n"); |
| 276 | return -ENOMEM; |
| 277 | } |
| 278 | ltb->size = size; |
| 279 | |
| 280 | ltb->map_id = find_first_zero_bit(adapter->map_ids, |
| 281 | MAX_MAP_ID); |
| 282 | bitmap_set(adapter->map_ids, ltb->map_id, 1); |
| 283 | |
| 284 | dev_dbg(dev, |
| 285 | "Allocated new LTB [map %d, size 0x%llx]\n", |
| 286 | ltb->map_id, ltb->size); |
| 287 | } |
| 288 | |
| 289 | /* Ensure ltb is zeroed - specially when reusing it. */ |
| 290 | memset(ltb->buff, 0, ltb->size); |
Nathan Fontenot | db5d0b5 | 2017-02-10 13:45:05 -0500 | [diff] [blame] | 291 | |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 292 | mutex_lock(&adapter->fw_lock); |
| 293 | adapter->fw_done_rc = 0; |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 294 | reinit_completion(&adapter->fw_done); |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 295 | |
| 296 | rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 297 | if (rc) { |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 298 | dev_err(dev, "send_request_map failed, rc = %d\n", rc); |
| 299 | goto out; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 300 | } |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 301 | |
| 302 | rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
| 303 | if (rc) { |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 304 | dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 305 | rc); |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 306 | goto out; |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 307 | } |
Thomas Falcon | f3be0cb | 2017-06-21 14:53:01 -0500 | [diff] [blame] | 308 | |
| 309 | if (adapter->fw_done_rc) { |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 310 | dev_err(dev, "Couldn't map LTB, rc = %d\n", |
Thomas Falcon | f3be0cb | 2017-06-21 14:53:01 -0500 | [diff] [blame] | 311 | adapter->fw_done_rc); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 312 | rc = -EIO; |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 313 | goto out; |
| 314 | } |
| 315 | rc = 0; |
| 316 | out: |
Sukadev Bhattiprolu | f8ac0bf | 2021-09-14 20:52:57 -0700 | [diff] [blame] | 317 | /* don't free LTB on communication error - see function header */ |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 318 | mutex_unlock(&adapter->fw_lock); |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 319 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | static void free_long_term_buff(struct ibmvnic_adapter *adapter, |
| 323 | struct ibmvnic_long_term_buff *ltb) |
| 324 | { |
| 325 | struct device *dev = &adapter->vdev->dev; |
| 326 | |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 327 | if (!ltb->buff) |
| 328 | return; |
| 329 | |
Lijun Pan | 7d3a7b9 | 2021-02-12 20:49:00 -0600 | [diff] [blame] | 330 | /* VIOS automatically unmaps the long term buffer at remote |
| 331 | * end for the following resets: |
| 332 | * FAILOVER, MOBILITY, TIMEOUT. |
| 333 | */ |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 334 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
Lijun Pan | 7d3a7b9 | 2021-02-12 20:49:00 -0600 | [diff] [blame] | 335 | adapter->reset_reason != VNIC_RESET_MOBILITY && |
| 336 | adapter->reset_reason != VNIC_RESET_TIMEOUT) |
Thomas Falcon | dfad09a | 2016-08-18 11:37:51 -0500 | [diff] [blame] | 337 | send_request_unmap(adapter, ltb->map_id); |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 338 | |
Brian King | 59af56c | 2017-04-19 13:44:41 -0400 | [diff] [blame] | 339 | dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 340 | |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 341 | ltb->buff = NULL; |
Sukadev Bhattiprolu | 129854f0 | 2021-09-14 20:52:56 -0700 | [diff] [blame] | 342 | /* mark this map_id free */ |
| 343 | bitmap_clear(adapter->map_ids, ltb->map_id, 1); |
Sukadev Bhattiprolu | 552a337 | 2021-06-23 21:13:14 -0700 | [diff] [blame] | 344 | ltb->map_id = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 345 | } |
| 346 | |
Thomas Falcon | f185a49 | 2017-05-26 10:30:48 -0400 | [diff] [blame] | 347 | static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) |
| 348 | { |
| 349 | int i; |
| 350 | |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 351 | for (i = 0; i < adapter->num_active_rx_pools; i++) |
Thomas Falcon | f185a49 | 2017-05-26 10:30:48 -0400 | [diff] [blame] | 352 | adapter->rx_pool[i].active = 0; |
| 353 | } |
| 354 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 355 | static void replenish_rx_pool(struct ibmvnic_adapter *adapter, |
| 356 | struct ibmvnic_rx_pool *pool) |
| 357 | { |
| 358 | int count = pool->size - atomic_read(&pool->available); |
Cristobal Forno | f3ae59c | 2020-08-19 13:16:23 -0500 | [diff] [blame] | 359 | u64 handle = adapter->rx_scrq[pool->index]->handle; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 360 | struct device *dev = &adapter->vdev->dev; |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 361 | struct ibmvnic_ind_xmit_queue *ind_bufp; |
| 362 | struct ibmvnic_sub_crq_queue *rx_scrq; |
| 363 | union sub_crq *sub_crq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 364 | int buffers_added = 0; |
| 365 | unsigned long lpar_rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 366 | struct sk_buff *skb; |
| 367 | unsigned int offset; |
| 368 | dma_addr_t dma_addr; |
| 369 | unsigned char *dst; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 370 | int shift = 0; |
| 371 | int index; |
| 372 | int i; |
| 373 | |
Thomas Falcon | f185a49 | 2017-05-26 10:30:48 -0400 | [diff] [blame] | 374 | if (!pool->active) |
| 375 | return; |
| 376 | |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 377 | rx_scrq = adapter->rx_scrq[pool->index]; |
| 378 | ind_bufp = &rx_scrq->ind_buf; |
Sukadev Bhattiprolu | 72368f8 | 2021-06-23 21:13:13 -0700 | [diff] [blame] | 379 | |
| 380 | /* netdev_skb_alloc() could have failed after we saved a few skbs |
| 381 | * in the indir_buf and we would not have sent them to VIOS yet. |
| 382 | * To account for them, start the loop at ind_bufp->index rather |
| 383 | * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will |
| 384 | * be 0. |
| 385 | */ |
| 386 | for (i = ind_bufp->index; i < count; ++i) { |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 387 | index = pool->free_map[pool->next_free]; |
| 388 | |
| 389 | /* We maybe reusing the skb from earlier resets. Allocate |
| 390 | * only if necessary. But since the LTB may have changed |
| 391 | * during reset (see init_rx_pools()), update LTB below |
| 392 | * even if reusing skb. |
| 393 | */ |
| 394 | skb = pool->rx_buff[index].skb; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 395 | if (!skb) { |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 396 | skb = netdev_alloc_skb(adapter->netdev, |
| 397 | pool->buff_size); |
| 398 | if (!skb) { |
| 399 | dev_err(dev, "Couldn't replenish rx buff\n"); |
| 400 | adapter->replenish_no_mem++; |
| 401 | break; |
| 402 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 403 | } |
| 404 | |
Sukadev Bhattiprolu | 38106b2 | 2021-09-14 20:52:51 -0700 | [diff] [blame] | 405 | pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; |
| 406 | pool->next_free = (pool->next_free + 1) % pool->size; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 407 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 408 | /* Copy the skb to the long term mapped DMA buffer */ |
| 409 | offset = index * pool->buff_size; |
| 410 | dst = pool->long_term_buff.buff + offset; |
| 411 | memset(dst, 0, pool->buff_size); |
| 412 | dma_addr = pool->long_term_buff.addr + offset; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 413 | |
Sukadev Bhattiprolu | 38106b2 | 2021-09-14 20:52:51 -0700 | [diff] [blame] | 414 | /* add the skb to an rx_buff in the pool */ |
| 415 | pool->rx_buff[index].data = dst; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 416 | pool->rx_buff[index].dma = dma_addr; |
| 417 | pool->rx_buff[index].skb = skb; |
| 418 | pool->rx_buff[index].pool_index = pool->index; |
| 419 | pool->rx_buff[index].size = pool->buff_size; |
| 420 | |
Sukadev Bhattiprolu | 38106b2 | 2021-09-14 20:52:51 -0700 | [diff] [blame] | 421 | /* queue the rx_buff for the next send_subcrq_indirect */ |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 422 | sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; |
| 423 | memset(sub_crq, 0, sizeof(*sub_crq)); |
| 424 | sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; |
| 425 | sub_crq->rx_add.correlator = |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 426 | cpu_to_be64((u64)&pool->rx_buff[index]); |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 427 | sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); |
| 428 | sub_crq->rx_add.map_id = pool->long_term_buff.map_id; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 429 | |
| 430 | /* The length field of the sCRQ is defined to be 24 bits so the |
| 431 | * buffer size needs to be left shifted by a byte before it is |
| 432 | * converted to big endian to prevent the last byte from being |
| 433 | * truncated. |
| 434 | */ |
| 435 | #ifdef __LITTLE_ENDIAN__ |
| 436 | shift = 8; |
| 437 | #endif |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 438 | sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); |
Sukadev Bhattiprolu | 38106b2 | 2021-09-14 20:52:51 -0700 | [diff] [blame] | 439 | |
| 440 | /* if send_subcrq_indirect queue is full, flush to VIOS */ |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 441 | if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || |
| 442 | i == count - 1) { |
| 443 | lpar_rc = |
| 444 | send_subcrq_indirect(adapter, handle, |
| 445 | (u64)ind_bufp->indir_dma, |
| 446 | (u64)ind_bufp->index); |
| 447 | if (lpar_rc != H_SUCCESS) |
| 448 | goto failure; |
| 449 | buffers_added += ind_bufp->index; |
| 450 | adapter->replenish_add_buff_success += ind_bufp->index; |
| 451 | ind_bufp->index = 0; |
| 452 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 453 | } |
| 454 | atomic_add(buffers_added, &pool->available); |
| 455 | return; |
| 456 | |
| 457 | failure: |
Thomas Falcon | 2d14d37 | 2018-07-13 12:03:32 -0500 | [diff] [blame] | 458 | if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) |
| 459 | dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 460 | for (i = ind_bufp->index - 1; i >= 0; --i) { |
| 461 | struct ibmvnic_rx_buff *rx_buff; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 462 | |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 463 | pool->next_free = pool->next_free == 0 ? |
| 464 | pool->size - 1 : pool->next_free - 1; |
| 465 | sub_crq = &ind_bufp->indir_arr[i]; |
| 466 | rx_buff = (struct ibmvnic_rx_buff *) |
| 467 | be64_to_cpu(sub_crq->rx_add.correlator); |
| 468 | index = (int)(rx_buff - pool->rx_buff); |
| 469 | pool->free_map[pool->next_free] = index; |
| 470 | dev_kfree_skb_any(pool->rx_buff[index].skb); |
| 471 | pool->rx_buff[index].skb = NULL; |
| 472 | } |
Dwip N. Banerjee | c2af622 | 2020-12-09 20:53:31 -0500 | [diff] [blame] | 473 | adapter->replenish_add_buff_failure += ind_bufp->index; |
| 474 | atomic_add(buffers_added, &pool->available); |
Thomas Falcon | 4f0b681 | 2020-11-18 19:12:18 -0600 | [diff] [blame] | 475 | ind_bufp->index = 0; |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 476 | if (lpar_rc == H_CLOSED || adapter->failover_pending) { |
Thomas Falcon | f185a49 | 2017-05-26 10:30:48 -0400 | [diff] [blame] | 477 | /* Disable buffer pool replenishment and report carrier off if |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 478 | * queue is closed or pending failover. |
| 479 | * Firmware guarantees that a signal will be sent to the |
| 480 | * driver, triggering a reset. |
Thomas Falcon | f185a49 | 2017-05-26 10:30:48 -0400 | [diff] [blame] | 481 | */ |
| 482 | deactivate_rx_pools(adapter); |
| 483 | netif_carrier_off(adapter->netdev); |
| 484 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 485 | } |
| 486 | |
| 487 | static void replenish_pools(struct ibmvnic_adapter *adapter) |
| 488 | { |
| 489 | int i; |
| 490 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 491 | adapter->replenish_task_cycles++; |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 492 | for (i = 0; i < adapter->num_active_rx_pools; i++) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 493 | if (adapter->rx_pool[i].active) |
| 494 | replenish_rx_pool(adapter, &adapter->rx_pool[i]); |
| 495 | } |
Sukadev Bhattiprolu | 38bd5ce | 2020-12-04 18:22:35 -0800 | [diff] [blame] | 496 | |
| 497 | netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 498 | } |
| 499 | |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 500 | static void release_stats_buffers(struct ibmvnic_adapter *adapter) |
| 501 | { |
| 502 | kfree(adapter->tx_stats_buffers); |
| 503 | kfree(adapter->rx_stats_buffers); |
Thomas Falcon | b0992ec | 2018-02-06 17:25:23 -0600 | [diff] [blame] | 504 | adapter->tx_stats_buffers = NULL; |
| 505 | adapter->rx_stats_buffers = NULL; |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | static int init_stats_buffers(struct ibmvnic_adapter *adapter) |
| 509 | { |
| 510 | adapter->tx_stats_buffers = |
Nathan Fontenot | abcae54 | 2018-02-19 13:30:47 -0600 | [diff] [blame] | 511 | kcalloc(IBMVNIC_MAX_QUEUES, |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 512 | sizeof(struct ibmvnic_tx_queue_stats), |
| 513 | GFP_KERNEL); |
| 514 | if (!adapter->tx_stats_buffers) |
| 515 | return -ENOMEM; |
| 516 | |
| 517 | adapter->rx_stats_buffers = |
Nathan Fontenot | abcae54 | 2018-02-19 13:30:47 -0600 | [diff] [blame] | 518 | kcalloc(IBMVNIC_MAX_QUEUES, |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 519 | sizeof(struct ibmvnic_rx_queue_stats), |
| 520 | GFP_KERNEL); |
| 521 | if (!adapter->rx_stats_buffers) |
| 522 | return -ENOMEM; |
| 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | |
Nathan Fontenot | 7bbc27a | 2017-03-30 02:49:23 -0400 | [diff] [blame] | 527 | static void release_stats_token(struct ibmvnic_adapter *adapter) |
| 528 | { |
| 529 | struct device *dev = &adapter->vdev->dev; |
| 530 | |
| 531 | if (!adapter->stats_token) |
| 532 | return; |
| 533 | |
| 534 | dma_unmap_single(dev, adapter->stats_token, |
| 535 | sizeof(struct ibmvnic_statistics), |
| 536 | DMA_FROM_DEVICE); |
| 537 | adapter->stats_token = 0; |
| 538 | } |
| 539 | |
| 540 | static int init_stats_token(struct ibmvnic_adapter *adapter) |
| 541 | { |
| 542 | struct device *dev = &adapter->vdev->dev; |
| 543 | dma_addr_t stok; |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 544 | int rc; |
Nathan Fontenot | 7bbc27a | 2017-03-30 02:49:23 -0400 | [diff] [blame] | 545 | |
| 546 | stok = dma_map_single(dev, &adapter->stats, |
| 547 | sizeof(struct ibmvnic_statistics), |
| 548 | DMA_FROM_DEVICE); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 549 | rc = dma_mapping_error(dev, stok); |
| 550 | if (rc) { |
| 551 | dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); |
| 552 | return rc; |
Nathan Fontenot | 7bbc27a | 2017-03-30 02:49:23 -0400 | [diff] [blame] | 553 | } |
| 554 | |
| 555 | adapter->stats_token = stok; |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 556 | netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); |
Nathan Fontenot | 7bbc27a | 2017-03-30 02:49:23 -0400 | [diff] [blame] | 557 | return 0; |
| 558 | } |
| 559 | |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 560 | /** |
| 561 | * release_rx_pools() - Release any rx pools attached to @adapter. |
| 562 | * @adapter: ibmvnic adapter |
| 563 | * |
| 564 | * Safe to call this multiple times - even if no pools are attached. |
| 565 | */ |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 566 | static void release_rx_pools(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 567 | { |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 568 | struct ibmvnic_rx_pool *rx_pool; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 569 | int i, j; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 570 | |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 571 | if (!adapter->rx_pool) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 572 | return; |
| 573 | |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 574 | for (i = 0; i < adapter->num_active_rx_pools; i++) { |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 575 | rx_pool = &adapter->rx_pool[i]; |
| 576 | |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 577 | netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); |
| 578 | |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 579 | kfree(rx_pool->free_map); |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 580 | |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 581 | free_long_term_buff(adapter, &rx_pool->long_term_buff); |
| 582 | |
| 583 | if (!rx_pool->rx_buff) |
Nathan Fontenot | e0ebe942 | 2017-05-03 14:04:50 -0400 | [diff] [blame] | 584 | continue; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 585 | |
| 586 | for (j = 0; j < rx_pool->size; j++) { |
| 587 | if (rx_pool->rx_buff[j].skb) { |
Thomas Falcon | b7cdec3 | 2018-11-21 11:17:58 -0600 | [diff] [blame] | 588 | dev_kfree_skb_any(rx_pool->rx_buff[j].skb); |
| 589 | rx_pool->rx_buff[j].skb = NULL; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 590 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 591 | } |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 592 | |
| 593 | kfree(rx_pool->rx_buff); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 594 | } |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 595 | |
| 596 | kfree(adapter->rx_pool); |
| 597 | adapter->rx_pool = NULL; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 598 | adapter->num_active_rx_pools = 0; |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 599 | adapter->prev_rx_pool_size = 0; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 600 | } |
| 601 | |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 602 | /** |
| 603 | * reuse_rx_pools() - Check if the existing rx pools can be reused. |
| 604 | * @adapter: ibmvnic adapter |
| 605 | * |
| 606 | * Check if the existing rx pools in the adapter can be reused. The |
| 607 | * pools can be reused if the pool parameters (number of pools, |
| 608 | * number of buffers in the pool and size of each buffer) have not |
| 609 | * changed. |
| 610 | * |
| 611 | * NOTE: This assumes that all pools have the same number of buffers |
| 612 | * which is the case currently. If that changes, we must fix this. |
| 613 | * |
| 614 | * Return: true if the rx pools can be reused, false otherwise. |
| 615 | */ |
| 616 | static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) |
| 617 | { |
| 618 | u64 old_num_pools, new_num_pools; |
| 619 | u64 old_pool_size, new_pool_size; |
| 620 | u64 old_buff_size, new_buff_size; |
| 621 | |
| 622 | if (!adapter->rx_pool) |
| 623 | return false; |
| 624 | |
| 625 | old_num_pools = adapter->num_active_rx_pools; |
| 626 | new_num_pools = adapter->req_rx_queues; |
| 627 | |
| 628 | old_pool_size = adapter->prev_rx_pool_size; |
| 629 | new_pool_size = adapter->req_rx_add_entries_per_subcrq; |
| 630 | |
| 631 | old_buff_size = adapter->prev_rx_buf_sz; |
| 632 | new_buff_size = adapter->cur_rx_buf_sz; |
| 633 | |
Sukadev Bhattiprolu | 0584f49 | 2021-11-30 21:48:35 -0800 | [diff] [blame] | 634 | if (old_buff_size != new_buff_size || |
| 635 | old_num_pools != new_num_pools || |
| 636 | old_pool_size != new_pool_size) |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 637 | return false; |
| 638 | |
| 639 | return true; |
| 640 | } |
| 641 | |
| 642 | /** |
| 643 | * init_rx_pools(): Initialize the set of receiver pools in the adapter. |
| 644 | * @netdev: net device associated with the vnic interface |
| 645 | * |
| 646 | * Initialize the set of receiver pools in the ibmvnic adapter associated |
| 647 | * with the net_device @netdev. If possible, reuse the existing rx pools. |
| 648 | * Otherwise free any existing pools and allocate a new set of pools |
| 649 | * before initializing them. |
| 650 | * |
| 651 | * Return: 0 on success and negative value on error. |
| 652 | */ |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 653 | static int init_rx_pools(struct net_device *netdev) |
| 654 | { |
| 655 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 656 | struct device *dev = &adapter->vdev->dev; |
| 657 | struct ibmvnic_rx_pool *rx_pool; |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 658 | u64 num_pools; |
| 659 | u64 pool_size; /* # of buffers in one pool */ |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 660 | u64 buff_size; |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 661 | int i, j, rc; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 662 | |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 663 | pool_size = adapter->req_rx_add_entries_per_subcrq; |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 664 | num_pools = adapter->req_rx_queues; |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 665 | buff_size = adapter->cur_rx_buf_sz; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 666 | |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 667 | if (reuse_rx_pools(adapter)) { |
| 668 | dev_dbg(dev, "Reusing rx pools\n"); |
| 669 | goto update_ltb; |
| 670 | } |
| 671 | |
| 672 | /* Allocate/populate the pools. */ |
| 673 | release_rx_pools(adapter); |
| 674 | |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 675 | adapter->rx_pool = kcalloc(num_pools, |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 676 | sizeof(struct ibmvnic_rx_pool), |
| 677 | GFP_KERNEL); |
| 678 | if (!adapter->rx_pool) { |
| 679 | dev_err(dev, "Failed to allocate rx pools\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 680 | return -ENOMEM; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 681 | } |
| 682 | |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 683 | /* Set num_active_rx_pools early. If we fail below after partial |
| 684 | * allocation, release_rx_pools() will know how many to look for. |
| 685 | */ |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 686 | adapter->num_active_rx_pools = num_pools; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 687 | |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 688 | for (i = 0; i < num_pools; i++) { |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 689 | rx_pool = &adapter->rx_pool[i]; |
| 690 | |
| 691 | netdev_dbg(adapter->netdev, |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 692 | "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 693 | i, pool_size, buff_size); |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 694 | |
Sukadev Bhattiprolu | 0df7b9a | 2021-09-14 20:52:53 -0700 | [diff] [blame] | 695 | rx_pool->size = pool_size; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 696 | rx_pool->index = i; |
Dwip N. Banerjee | 9a87c3f | 2020-11-18 19:12:22 -0600 | [diff] [blame] | 697 | rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 698 | |
| 699 | rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), |
| 700 | GFP_KERNEL); |
| 701 | if (!rx_pool->free_map) { |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 702 | dev_err(dev, "Couldn't alloc free_map %d\n", i); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 703 | rc = -ENOMEM; |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 704 | goto out_release; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | rx_pool->rx_buff = kcalloc(rx_pool->size, |
| 708 | sizeof(struct ibmvnic_rx_buff), |
| 709 | GFP_KERNEL); |
| 710 | if (!rx_pool->rx_buff) { |
| 711 | dev_err(dev, "Couldn't alloc rx buffers\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 712 | rc = -ENOMEM; |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 713 | goto out_release; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 714 | } |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 715 | } |
| 716 | |
| 717 | adapter->prev_rx_pool_size = pool_size; |
| 718 | adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; |
| 719 | |
| 720 | update_ltb: |
| 721 | for (i = 0; i < num_pools; i++) { |
| 722 | rx_pool = &adapter->rx_pool[i]; |
| 723 | dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", |
| 724 | i, rx_pool->size, rx_pool->buff_size); |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 725 | |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 726 | rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, |
| 727 | rx_pool->size * rx_pool->buff_size); |
| 728 | if (rc) |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 729 | goto out; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 730 | |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 731 | for (j = 0; j < rx_pool->size; ++j) { |
| 732 | struct ibmvnic_rx_buff *rx_buff; |
| 733 | |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 734 | rx_pool->free_map[j] = j; |
| 735 | |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 736 | /* NOTE: Don't clear rx_buff->skb here - will leak |
| 737 | * memory! replenish_rx_pool() will reuse skbs or |
| 738 | * allocate as necessary. |
| 739 | */ |
| 740 | rx_buff = &rx_pool->rx_buff[j]; |
| 741 | rx_buff->dma = 0; |
| 742 | rx_buff->data = 0; |
| 743 | rx_buff->size = 0; |
| 744 | rx_buff->pool_index = 0; |
| 745 | } |
| 746 | |
| 747 | /* Mark pool "empty" so replenish_rx_pools() will |
| 748 | * update the LTB info for each buffer |
| 749 | */ |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 750 | atomic_set(&rx_pool->available, 0); |
| 751 | rx_pool->next_alloc = 0; |
| 752 | rx_pool->next_free = 0; |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 753 | /* replenish_rx_pool() may have called deactivate_rx_pools() |
| 754 | * on failover. Ensure pool is active now. |
| 755 | */ |
| 756 | rx_pool->active = 1; |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 757 | } |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 758 | return 0; |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 759 | out_release: |
| 760 | release_rx_pools(adapter); |
| 761 | out: |
| 762 | /* We failed to allocate one or more LTBs or map them on the VIOS. |
| 763 | * Hold onto the pools and any LTBs that we did allocate/map. |
| 764 | */ |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 765 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 766 | } |
| 767 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 768 | static void release_vpd_data(struct ibmvnic_adapter *adapter) |
| 769 | { |
| 770 | if (!adapter->vpd) |
| 771 | return; |
| 772 | |
| 773 | kfree(adapter->vpd->buff); |
| 774 | kfree(adapter->vpd); |
Thomas Falcon | b0992ec | 2018-02-06 17:25:23 -0600 | [diff] [blame] | 775 | |
| 776 | adapter->vpd = NULL; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 777 | } |
| 778 | |
Thomas Falcon | fb79421 | 2018-03-16 20:00:26 -0500 | [diff] [blame] | 779 | static void release_one_tx_pool(struct ibmvnic_adapter *adapter, |
| 780 | struct ibmvnic_tx_pool *tx_pool) |
| 781 | { |
| 782 | kfree(tx_pool->tx_buff); |
| 783 | kfree(tx_pool->free_map); |
| 784 | free_long_term_buff(adapter, &tx_pool->long_term_buff); |
| 785 | } |
| 786 | |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 787 | /** |
| 788 | * release_tx_pools() - Release any tx pools attached to @adapter. |
| 789 | * @adapter: ibmvnic adapter |
| 790 | * |
| 791 | * Safe to call this multiple times - even if no pools are attached. |
| 792 | */ |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 793 | static void release_tx_pools(struct ibmvnic_adapter *adapter) |
| 794 | { |
John Allen | 896d869 | 2018-01-18 16:26:31 -0600 | [diff] [blame] | 795 | int i; |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 796 | |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 797 | /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are |
| 798 | * both NULL or both non-NULL. So we only need to check one. |
| 799 | */ |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 800 | if (!adapter->tx_pool) |
| 801 | return; |
| 802 | |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 803 | for (i = 0; i < adapter->num_active_tx_pools; i++) { |
Thomas Falcon | fb79421 | 2018-03-16 20:00:26 -0500 | [diff] [blame] | 804 | release_one_tx_pool(adapter, &adapter->tx_pool[i]); |
| 805 | release_one_tx_pool(adapter, &adapter->tso_pool[i]); |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 806 | } |
| 807 | |
| 808 | kfree(adapter->tx_pool); |
| 809 | adapter->tx_pool = NULL; |
Thomas Falcon | fb79421 | 2018-03-16 20:00:26 -0500 | [diff] [blame] | 810 | kfree(adapter->tso_pool); |
| 811 | adapter->tso_pool = NULL; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 812 | adapter->num_active_tx_pools = 0; |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 813 | adapter->prev_tx_pool_size = 0; |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 814 | } |
| 815 | |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 816 | static int init_one_tx_pool(struct net_device *netdev, |
| 817 | struct ibmvnic_tx_pool *tx_pool, |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 818 | int pool_size, int buf_size) |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 819 | { |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 820 | int i; |
| 821 | |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 822 | tx_pool->tx_buff = kcalloc(pool_size, |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 823 | sizeof(struct ibmvnic_tx_buff), |
| 824 | GFP_KERNEL); |
| 825 | if (!tx_pool->tx_buff) |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 826 | return -ENOMEM; |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 827 | |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 828 | tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 829 | if (!tx_pool->free_map) { |
| 830 | kfree(tx_pool->tx_buff); |
| 831 | tx_pool->tx_buff = NULL; |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 832 | return -ENOMEM; |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 833 | } |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 834 | |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 835 | for (i = 0; i < pool_size; i++) |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 836 | tx_pool->free_map[i] = i; |
| 837 | |
| 838 | tx_pool->consumer_index = 0; |
| 839 | tx_pool->producer_index = 0; |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 840 | tx_pool->num_buffers = pool_size; |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 841 | tx_pool->buf_size = buf_size; |
| 842 | |
| 843 | return 0; |
| 844 | } |
| 845 | |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 846 | /** |
| 847 | * reuse_tx_pools() - Check if the existing tx pools can be reused. |
| 848 | * @adapter: ibmvnic adapter |
| 849 | * |
| 850 | * Check if the existing tx pools in the adapter can be reused. The |
| 851 | * pools can be reused if the pool parameters (number of pools, |
| 852 | * number of buffers in the pool and mtu) have not changed. |
| 853 | * |
| 854 | * NOTE: This assumes that all pools have the same number of buffers |
| 855 | * which is the case currently. If that changes, we must fix this. |
| 856 | * |
| 857 | * Return: true if the tx pools can be reused, false otherwise. |
| 858 | */ |
| 859 | static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) |
| 860 | { |
| 861 | u64 old_num_pools, new_num_pools; |
| 862 | u64 old_pool_size, new_pool_size; |
| 863 | u64 old_mtu, new_mtu; |
| 864 | |
| 865 | if (!adapter->tx_pool) |
| 866 | return false; |
| 867 | |
| 868 | old_num_pools = adapter->num_active_tx_pools; |
| 869 | new_num_pools = adapter->num_active_tx_scrqs; |
| 870 | old_pool_size = adapter->prev_tx_pool_size; |
| 871 | new_pool_size = adapter->req_tx_entries_per_subcrq; |
| 872 | old_mtu = adapter->prev_mtu; |
| 873 | new_mtu = adapter->req_mtu; |
| 874 | |
Sukadev Bhattiprolu | 5b08560 | 2021-11-30 21:48:36 -0800 | [diff] [blame] | 875 | if (old_mtu != new_mtu || |
| 876 | old_num_pools != new_num_pools || |
| 877 | old_pool_size != new_pool_size) |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 878 | return false; |
| 879 | |
| 880 | return true; |
| 881 | } |
| 882 | |
| 883 | /** |
| 884 | * init_tx_pools(): Initialize the set of transmit pools in the adapter. |
| 885 | * @netdev: net device associated with the vnic interface |
| 886 | * |
| 887 | * Initialize the set of transmit pools in the ibmvnic adapter associated |
| 888 | * with the net_device @netdev. If possible, reuse the existing tx pools. |
| 889 | * Otherwise free any existing pools and allocate a new set of pools |
| 890 | * before initializing them. |
| 891 | * |
| 892 | * Return: 0 on success and negative value on error. |
| 893 | */ |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 894 | static int init_tx_pools(struct net_device *netdev) |
| 895 | { |
| 896 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 897 | struct device *dev = &adapter->vdev->dev; |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 898 | int num_pools; |
| 899 | u64 pool_size; /* # of buffers in pool */ |
Dwip N. Banerjee | 9a87c3f | 2020-11-18 19:12:22 -0600 | [diff] [blame] | 900 | u64 buff_size; |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 901 | int i, j, rc; |
| 902 | |
| 903 | num_pools = adapter->req_tx_queues; |
| 904 | |
| 905 | /* We must notify the VIOS about the LTB on all resets - but we only |
| 906 | * need to alloc/populate pools if either the number of buffers or |
| 907 | * size of each buffer in the pool has changed. |
| 908 | */ |
| 909 | if (reuse_tx_pools(adapter)) { |
| 910 | netdev_dbg(netdev, "Reusing tx pools\n"); |
| 911 | goto update_ltb; |
| 912 | } |
| 913 | |
| 914 | /* Allocate/populate the pools. */ |
| 915 | release_tx_pools(adapter); |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 916 | |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 917 | pool_size = adapter->req_tx_entries_per_subcrq; |
| 918 | num_pools = adapter->num_active_tx_scrqs; |
| 919 | |
| 920 | adapter->tx_pool = kcalloc(num_pools, |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 921 | sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); |
| 922 | if (!adapter->tx_pool) |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 923 | return -ENOMEM; |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 924 | |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 925 | adapter->tso_pool = kcalloc(num_pools, |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 926 | sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 927 | /* To simplify release_tx_pools() ensure that ->tx_pool and |
| 928 | * ->tso_pool are either both NULL or both non-NULL. |
| 929 | */ |
Sukadev Bhattiprolu | f6ebca8 | 2021-06-23 21:13:15 -0700 | [diff] [blame] | 930 | if (!adapter->tso_pool) { |
| 931 | kfree(adapter->tx_pool); |
| 932 | adapter->tx_pool = NULL; |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 933 | return -ENOMEM; |
Sukadev Bhattiprolu | f6ebca8 | 2021-06-23 21:13:15 -0700 | [diff] [blame] | 934 | } |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 935 | |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 936 | /* Set num_active_tx_pools early. If we fail below after partial |
| 937 | * allocation, release_tx_pools() will know how many to look for. |
| 938 | */ |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 939 | adapter->num_active_tx_pools = num_pools; |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 940 | |
Sukadev Bhattiprolu | 0d1af4f | 2021-09-14 20:52:55 -0700 | [diff] [blame] | 941 | buff_size = adapter->req_mtu + VLAN_HLEN; |
| 942 | buff_size = ALIGN(buff_size, L1_CACHE_BYTES); |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 943 | |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 944 | for (i = 0; i < num_pools; i++) { |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 945 | dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", |
| 946 | i, adapter->req_tx_entries_per_subcrq, buff_size); |
| 947 | |
Thomas Falcon | 3205306 | 2018-03-16 20:00:27 -0500 | [diff] [blame] | 948 | rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], |
Sukadev Bhattiprolu | 8243c7e | 2021-09-14 20:52:54 -0700 | [diff] [blame] | 949 | pool_size, buff_size); |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 950 | if (rc) |
| 951 | goto out_release; |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 952 | |
Thomas Falcon | 7c940b1 | 2019-06-07 16:03:55 -0500 | [diff] [blame] | 953 | rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], |
| 954 | IBMVNIC_TSO_BUFS, |
| 955 | IBMVNIC_TSO_BUF_SZ); |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 956 | if (rc) |
| 957 | goto out_release; |
| 958 | } |
| 959 | |
| 960 | adapter->prev_tx_pool_size = pool_size; |
| 961 | adapter->prev_mtu = adapter->req_mtu; |
| 962 | |
| 963 | update_ltb: |
| 964 | /* NOTE: All tx_pools have the same number of buffers (which is |
| 965 | * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS |
| 966 | * buffers (see calls init_one_tx_pool() for these). |
| 967 | * For consistency, we use tx_pool->num_buffers and |
| 968 | * tso_pool->num_buffers below. |
| 969 | */ |
| 970 | rc = -1; |
| 971 | for (i = 0; i < num_pools; i++) { |
| 972 | struct ibmvnic_tx_pool *tso_pool; |
| 973 | struct ibmvnic_tx_pool *tx_pool; |
| 974 | u32 ltb_size; |
| 975 | |
| 976 | tx_pool = &adapter->tx_pool[i]; |
| 977 | ltb_size = tx_pool->num_buffers * tx_pool->buf_size; |
| 978 | if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, |
| 979 | ltb_size)) |
| 980 | goto out; |
| 981 | |
| 982 | dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", |
| 983 | i, tx_pool->long_term_buff.buff, |
| 984 | tx_pool->num_buffers, tx_pool->buf_size); |
| 985 | |
| 986 | tx_pool->consumer_index = 0; |
| 987 | tx_pool->producer_index = 0; |
| 988 | |
| 989 | for (j = 0; j < tx_pool->num_buffers; j++) |
| 990 | tx_pool->free_map[j] = j; |
| 991 | |
| 992 | tso_pool = &adapter->tso_pool[i]; |
| 993 | ltb_size = tso_pool->num_buffers * tso_pool->buf_size; |
| 994 | if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, |
| 995 | ltb_size)) |
| 996 | goto out; |
| 997 | |
| 998 | dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", |
| 999 | i, tso_pool->long_term_buff.buff, |
| 1000 | tso_pool->num_buffers, tso_pool->buf_size); |
| 1001 | |
| 1002 | tso_pool->consumer_index = 0; |
| 1003 | tso_pool->producer_index = 0; |
| 1004 | |
| 1005 | for (j = 0; j < tso_pool->num_buffers; j++) |
| 1006 | tso_pool->free_map[j] = j; |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 1007 | } |
| 1008 | |
| 1009 | return 0; |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 1010 | out_release: |
| 1011 | release_tx_pools(adapter); |
| 1012 | out: |
| 1013 | /* We failed to allocate one or more LTBs or map them on the VIOS. |
| 1014 | * Hold onto the pools and any LTBs that we did allocate/map. |
| 1015 | */ |
| 1016 | return rc; |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 1017 | } |
| 1018 | |
John Allen | d944c3d6 | 2017-05-26 10:30:13 -0400 | [diff] [blame] | 1019 | static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) |
| 1020 | { |
| 1021 | int i; |
| 1022 | |
| 1023 | if (adapter->napi_enabled) |
| 1024 | return; |
| 1025 | |
| 1026 | for (i = 0; i < adapter->req_rx_queues; i++) |
| 1027 | napi_enable(&adapter->napi[i]); |
| 1028 | |
| 1029 | adapter->napi_enabled = true; |
| 1030 | } |
| 1031 | |
| 1032 | static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) |
| 1033 | { |
| 1034 | int i; |
| 1035 | |
| 1036 | if (!adapter->napi_enabled) |
| 1037 | return; |
| 1038 | |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1039 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 1040 | netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); |
John Allen | d944c3d6 | 2017-05-26 10:30:13 -0400 | [diff] [blame] | 1041 | napi_disable(&adapter->napi[i]); |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1042 | } |
John Allen | d944c3d6 | 2017-05-26 10:30:13 -0400 | [diff] [blame] | 1043 | |
| 1044 | adapter->napi_enabled = false; |
| 1045 | } |
| 1046 | |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1047 | static int init_napi(struct ibmvnic_adapter *adapter) |
| 1048 | { |
| 1049 | int i; |
| 1050 | |
| 1051 | adapter->napi = kcalloc(adapter->req_rx_queues, |
| 1052 | sizeof(struct napi_struct), GFP_KERNEL); |
| 1053 | if (!adapter->napi) |
| 1054 | return -ENOMEM; |
| 1055 | |
| 1056 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 1057 | netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); |
| 1058 | netif_napi_add(adapter->netdev, &adapter->napi[i], |
| 1059 | ibmvnic_poll, NAPI_POLL_WEIGHT); |
| 1060 | } |
| 1061 | |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 1062 | adapter->num_active_rx_napi = adapter->req_rx_queues; |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1063 | return 0; |
| 1064 | } |
| 1065 | |
| 1066 | static void release_napi(struct ibmvnic_adapter *adapter) |
| 1067 | { |
| 1068 | int i; |
| 1069 | |
| 1070 | if (!adapter->napi) |
| 1071 | return; |
| 1072 | |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 1073 | for (i = 0; i < adapter->num_active_rx_napi; i++) { |
Wen Yang | 390de19 | 2018-12-11 12:20:46 +0800 | [diff] [blame] | 1074 | netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); |
| 1075 | netif_napi_del(&adapter->napi[i]); |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1076 | } |
| 1077 | |
| 1078 | kfree(adapter->napi); |
| 1079 | adapter->napi = NULL; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 1080 | adapter->num_active_rx_napi = 0; |
Thomas Falcon | c3f2241 | 2018-05-23 13:37:55 -0500 | [diff] [blame] | 1081 | adapter->napi_enabled = false; |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1082 | } |
| 1083 | |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 1084 | static const char *adapter_state_to_string(enum vnic_state state) |
| 1085 | { |
| 1086 | switch (state) { |
| 1087 | case VNIC_PROBING: |
| 1088 | return "PROBING"; |
| 1089 | case VNIC_PROBED: |
| 1090 | return "PROBED"; |
| 1091 | case VNIC_OPENING: |
| 1092 | return "OPENING"; |
| 1093 | case VNIC_OPEN: |
| 1094 | return "OPEN"; |
| 1095 | case VNIC_CLOSING: |
| 1096 | return "CLOSING"; |
| 1097 | case VNIC_CLOSED: |
| 1098 | return "CLOSED"; |
| 1099 | case VNIC_REMOVING: |
| 1100 | return "REMOVING"; |
| 1101 | case VNIC_REMOVED: |
| 1102 | return "REMOVED"; |
Lijun Pan | 822ebc2 | 2021-06-11 10:35:37 -0500 | [diff] [blame] | 1103 | case VNIC_DOWN: |
| 1104 | return "DOWN"; |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 1105 | } |
Michal Suchanek | 07b5dc1 | 2021-05-20 08:50:34 +0200 | [diff] [blame] | 1106 | return "UNKNOWN"; |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 1107 | } |
| 1108 | |
John Allen | a57a5d2 | 2017-03-17 17:13:41 -0500 | [diff] [blame] | 1109 | static int ibmvnic_login(struct net_device *netdev) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1110 | { |
| 1111 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Dany Madden | 98c41f0 | 2020-11-25 18:04:32 -0600 | [diff] [blame] | 1112 | unsigned long timeout = msecs_to_jiffies(20000); |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1113 | int retry_count = 0; |
Thomas Falcon | dff515a3 | 2020-06-15 10:29:23 -0500 | [diff] [blame] | 1114 | int retries = 10; |
Thomas Falcon | eb11041 | 2018-05-24 14:37:53 -0500 | [diff] [blame] | 1115 | bool retry; |
Thomas Falcon | 4d96f12 | 2017-08-01 15:04:36 -0500 | [diff] [blame] | 1116 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1117 | |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1118 | do { |
Thomas Falcon | eb11041 | 2018-05-24 14:37:53 -0500 | [diff] [blame] | 1119 | retry = false; |
Thomas Falcon | dff515a3 | 2020-06-15 10:29:23 -0500 | [diff] [blame] | 1120 | if (retry_count > retries) { |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1121 | netdev_warn(netdev, "Login attempts exceeded\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 1122 | return -EACCES; |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1123 | } |
| 1124 | |
| 1125 | adapter->init_done_rc = 0; |
| 1126 | reinit_completion(&adapter->init_done); |
| 1127 | rc = send_login(adapter); |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 1128 | if (rc) |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1129 | return rc; |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1130 | |
| 1131 | if (!wait_for_completion_timeout(&adapter->init_done, |
| 1132 | timeout)) { |
Thomas Falcon | dff515a3 | 2020-06-15 10:29:23 -0500 | [diff] [blame] | 1133 | netdev_warn(netdev, "Login timed out, retrying...\n"); |
| 1134 | retry = true; |
| 1135 | adapter->init_done_rc = 0; |
| 1136 | retry_count++; |
| 1137 | continue; |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1138 | } |
| 1139 | |
Thomas Falcon | dff515a3 | 2020-06-15 10:29:23 -0500 | [diff] [blame] | 1140 | if (adapter->init_done_rc == ABORTED) { |
| 1141 | netdev_warn(netdev, "Login aborted, retrying...\n"); |
| 1142 | retry = true; |
| 1143 | adapter->init_done_rc = 0; |
| 1144 | retry_count++; |
| 1145 | /* FW or device may be busy, so |
| 1146 | * wait a bit before retrying login |
| 1147 | */ |
| 1148 | msleep(500); |
| 1149 | } else if (adapter->init_done_rc == PARTIALSUCCESS) { |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1150 | retry_count++; |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 1151 | release_sub_crqs(adapter, 1); |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1152 | |
Thomas Falcon | eb11041 | 2018-05-24 14:37:53 -0500 | [diff] [blame] | 1153 | retry = true; |
| 1154 | netdev_dbg(netdev, |
| 1155 | "Received partial success, retrying...\n"); |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1156 | adapter->init_done_rc = 0; |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1157 | reinit_completion(&adapter->init_done); |
Lijun Pan | 491099a | 2020-09-27 20:13:26 -0500 | [diff] [blame] | 1158 | send_query_cap(adapter); |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1159 | if (!wait_for_completion_timeout(&adapter->init_done, |
| 1160 | timeout)) { |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1161 | netdev_warn(netdev, |
| 1162 | "Capabilities query timed out\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 1163 | return -ETIMEDOUT; |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1164 | } |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1165 | |
Thomas Falcon | 4d96f12 | 2017-08-01 15:04:36 -0500 | [diff] [blame] | 1166 | rc = init_sub_crqs(adapter); |
| 1167 | if (rc) { |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1168 | netdev_warn(netdev, |
| 1169 | "SCRQ initialization failed\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 1170 | return rc; |
Thomas Falcon | 4d96f12 | 2017-08-01 15:04:36 -0500 | [diff] [blame] | 1171 | } |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1172 | |
Thomas Falcon | 4d96f12 | 2017-08-01 15:04:36 -0500 | [diff] [blame] | 1173 | rc = init_sub_crq_irqs(adapter); |
| 1174 | if (rc) { |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1175 | netdev_warn(netdev, |
| 1176 | "SCRQ irq initialization failed\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 1177 | return rc; |
Thomas Falcon | 4d96f12 | 2017-08-01 15:04:36 -0500 | [diff] [blame] | 1178 | } |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 1179 | } else if (adapter->init_done_rc) { |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 1180 | netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", |
| 1181 | adapter->init_done_rc); |
| 1182 | return -EIO; |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1183 | } |
Thomas Falcon | eb11041 | 2018-05-24 14:37:53 -0500 | [diff] [blame] | 1184 | } while (retry); |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1185 | |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 1186 | __ibmvnic_set_mac(netdev, adapter->mac_addr); |
Thomas Falcon | 3d16613 | 2018-01-10 19:39:52 -0600 | [diff] [blame] | 1187 | |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 1188 | netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); |
John Allen | a57a5d2 | 2017-03-17 17:13:41 -0500 | [diff] [blame] | 1189 | return 0; |
| 1190 | } |
| 1191 | |
Thomas Falcon | 34f0f4e | 2018-02-13 18:23:40 -0600 | [diff] [blame] | 1192 | static void release_login_buffer(struct ibmvnic_adapter *adapter) |
| 1193 | { |
| 1194 | kfree(adapter->login_buf); |
| 1195 | adapter->login_buf = NULL; |
| 1196 | } |
| 1197 | |
| 1198 | static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) |
| 1199 | { |
| 1200 | kfree(adapter->login_rsp_buf); |
| 1201 | adapter->login_rsp_buf = NULL; |
| 1202 | } |
| 1203 | |
Nathan Fontenot | 1b8955e | 2017-03-30 02:49:29 -0400 | [diff] [blame] | 1204 | static void release_resources(struct ibmvnic_adapter *adapter) |
| 1205 | { |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1206 | release_vpd_data(adapter); |
| 1207 | |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1208 | release_napi(adapter); |
Lijun Pan | a0c8be5 | 2020-12-19 15:39:19 -0600 | [diff] [blame] | 1209 | release_login_buffer(adapter); |
Thomas Falcon | 34f0f4e | 2018-02-13 18:23:40 -0600 | [diff] [blame] | 1210 | release_login_rsp_buffer(adapter); |
Nathan Fontenot | 1b8955e | 2017-03-30 02:49:29 -0400 | [diff] [blame] | 1211 | } |
| 1212 | |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1213 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) |
| 1214 | { |
| 1215 | struct net_device *netdev = adapter->netdev; |
Dany Madden | 98c41f0 | 2020-11-25 18:04:32 -0600 | [diff] [blame] | 1216 | unsigned long timeout = msecs_to_jiffies(20000); |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1217 | union ibmvnic_crq crq; |
| 1218 | bool resend; |
| 1219 | int rc; |
| 1220 | |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1221 | netdev_dbg(netdev, "setting link state %d\n", link_state); |
| 1222 | |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1223 | memset(&crq, 0, sizeof(crq)); |
| 1224 | crq.logical_link_state.first = IBMVNIC_CRQ_CMD; |
| 1225 | crq.logical_link_state.cmd = LOGICAL_LINK_STATE; |
| 1226 | crq.logical_link_state.link_state = link_state; |
| 1227 | |
| 1228 | do { |
| 1229 | resend = false; |
| 1230 | |
| 1231 | reinit_completion(&adapter->init_done); |
| 1232 | rc = ibmvnic_send_crq(adapter, &crq); |
| 1233 | if (rc) { |
| 1234 | netdev_err(netdev, "Failed to set link state\n"); |
| 1235 | return rc; |
| 1236 | } |
| 1237 | |
| 1238 | if (!wait_for_completion_timeout(&adapter->init_done, |
| 1239 | timeout)) { |
| 1240 | netdev_err(netdev, "timeout setting link state\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 1241 | return -ETIMEDOUT; |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1242 | } |
| 1243 | |
Lijun Pan | 4c5f6af | 2020-08-19 17:52:23 -0500 | [diff] [blame] | 1244 | if (adapter->init_done_rc == PARTIALSUCCESS) { |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1245 | /* Partuial success, delay and re-send */ |
| 1246 | mdelay(1000); |
| 1247 | resend = true; |
Thomas Falcon | ab5ec33 | 2018-05-23 13:37:59 -0500 | [diff] [blame] | 1248 | } else if (adapter->init_done_rc) { |
| 1249 | netdev_warn(netdev, "Unable to set link state, rc=%d\n", |
| 1250 | adapter->init_done_rc); |
| 1251 | return adapter->init_done_rc; |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1252 | } |
| 1253 | } while (resend); |
| 1254 | |
| 1255 | return 0; |
| 1256 | } |
| 1257 | |
Thomas Falcon | 7f3c6e6 | 2017-04-21 15:38:40 -0400 | [diff] [blame] | 1258 | static int set_real_num_queues(struct net_device *netdev) |
| 1259 | { |
| 1260 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 1261 | int rc; |
| 1262 | |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1263 | netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", |
| 1264 | adapter->req_tx_queues, adapter->req_rx_queues); |
| 1265 | |
Thomas Falcon | 7f3c6e6 | 2017-04-21 15:38:40 -0400 | [diff] [blame] | 1266 | rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); |
| 1267 | if (rc) { |
| 1268 | netdev_err(netdev, "failed to set the number of tx queues\n"); |
| 1269 | return rc; |
| 1270 | } |
| 1271 | |
| 1272 | rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); |
| 1273 | if (rc) |
| 1274 | netdev_err(netdev, "failed to set the number of rx queues\n"); |
| 1275 | |
| 1276 | return rc; |
| 1277 | } |
| 1278 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1279 | static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) |
| 1280 | { |
| 1281 | struct device *dev = &adapter->vdev->dev; |
| 1282 | union ibmvnic_crq crq; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1283 | int len = 0; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 1284 | int rc; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1285 | |
| 1286 | if (adapter->vpd->buff) |
| 1287 | len = adapter->vpd->len; |
| 1288 | |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1289 | mutex_lock(&adapter->fw_lock); |
| 1290 | adapter->fw_done_rc = 0; |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 1291 | reinit_completion(&adapter->fw_done); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1292 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1293 | crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; |
| 1294 | crq.get_vpd_size.cmd = GET_VPD_SIZE; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 1295 | rc = ibmvnic_send_crq(adapter, &crq); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1296 | if (rc) { |
| 1297 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 1298 | return rc; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1299 | } |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 1300 | |
| 1301 | rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
| 1302 | if (rc) { |
| 1303 | dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1304 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 1305 | return rc; |
| 1306 | } |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1307 | mutex_unlock(&adapter->fw_lock); |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1308 | |
| 1309 | if (!adapter->vpd->len) |
| 1310 | return -ENODATA; |
| 1311 | |
| 1312 | if (!adapter->vpd->buff) |
| 1313 | adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); |
| 1314 | else if (adapter->vpd->len != len) |
| 1315 | adapter->vpd->buff = |
| 1316 | krealloc(adapter->vpd->buff, |
| 1317 | adapter->vpd->len, GFP_KERNEL); |
| 1318 | |
| 1319 | if (!adapter->vpd->buff) { |
| 1320 | dev_err(dev, "Could allocate VPD buffer\n"); |
| 1321 | return -ENOMEM; |
| 1322 | } |
| 1323 | |
| 1324 | adapter->vpd->dma_addr = |
| 1325 | dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, |
| 1326 | DMA_FROM_DEVICE); |
Desnes Augusto Nunes do Rosario | f743106 | 2017-11-17 09:09:04 -0200 | [diff] [blame] | 1327 | if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1328 | dev_err(dev, "Could not map VPD buffer\n"); |
| 1329 | kfree(adapter->vpd->buff); |
Thomas Falcon | b0992ec | 2018-02-06 17:25:23 -0600 | [diff] [blame] | 1330 | adapter->vpd->buff = NULL; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1331 | return -ENOMEM; |
| 1332 | } |
| 1333 | |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1334 | mutex_lock(&adapter->fw_lock); |
| 1335 | adapter->fw_done_rc = 0; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1336 | reinit_completion(&adapter->fw_done); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1337 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1338 | crq.get_vpd.first = IBMVNIC_CRQ_CMD; |
| 1339 | crq.get_vpd.cmd = GET_VPD; |
| 1340 | crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); |
| 1341 | crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 1342 | rc = ibmvnic_send_crq(adapter, &crq); |
| 1343 | if (rc) { |
| 1344 | kfree(adapter->vpd->buff); |
| 1345 | adapter->vpd->buff = NULL; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1346 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 1347 | return rc; |
| 1348 | } |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 1349 | |
| 1350 | rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
| 1351 | if (rc) { |
| 1352 | dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); |
| 1353 | kfree(adapter->vpd->buff); |
| 1354 | adapter->vpd->buff = NULL; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1355 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 1356 | return rc; |
| 1357 | } |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1358 | |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 1359 | mutex_unlock(&adapter->fw_lock); |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1360 | return 0; |
| 1361 | } |
| 1362 | |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1363 | static int init_resources(struct ibmvnic_adapter *adapter) |
John Allen | a57a5d2 | 2017-03-17 17:13:41 -0500 | [diff] [blame] | 1364 | { |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1365 | struct net_device *netdev = adapter->netdev; |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1366 | int rc; |
John Allen | a57a5d2 | 2017-03-17 17:13:41 -0500 | [diff] [blame] | 1367 | |
Thomas Falcon | 7f3c6e6 | 2017-04-21 15:38:40 -0400 | [diff] [blame] | 1368 | rc = set_real_num_queues(netdev); |
| 1369 | if (rc) |
| 1370 | return rc; |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 1371 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1372 | adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); |
| 1373 | if (!adapter->vpd) |
| 1374 | return -ENOMEM; |
| 1375 | |
John Allen | 69d08dc | 2018-01-18 16:27:58 -0600 | [diff] [blame] | 1376 | /* Vital Product Data (VPD) */ |
| 1377 | rc = ibmvnic_get_vpd(adapter); |
| 1378 | if (rc) { |
| 1379 | netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); |
| 1380 | return rc; |
| 1381 | } |
| 1382 | |
Nathan Fontenot | 86f669b | 2018-02-19 13:30:39 -0600 | [diff] [blame] | 1383 | rc = init_napi(adapter); |
| 1384 | if (rc) |
| 1385 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1386 | |
Lijun Pan | 69980d0 | 2020-09-27 20:13:28 -0500 | [diff] [blame] | 1387 | send_query_map(adapter); |
Nathan Fontenot | 0ffe2cb | 2017-03-30 02:49:12 -0400 | [diff] [blame] | 1388 | |
| 1389 | rc = init_rx_pools(netdev); |
| 1390 | if (rc) |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1391 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1392 | |
Nathan Fontenot | c657e32 | 2017-03-30 02:49:06 -0400 | [diff] [blame] | 1393 | rc = init_tx_pools(netdev); |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1394 | return rc; |
| 1395 | } |
| 1396 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1397 | static int __ibmvnic_open(struct net_device *netdev) |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1398 | { |
| 1399 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1400 | enum vnic_state prev_state = adapter->state; |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1401 | int i, rc; |
| 1402 | |
Nathan Fontenot | 90c8014 | 2017-05-03 14:04:32 -0400 | [diff] [blame] | 1403 | adapter->state = VNIC_OPENING; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1404 | replenish_pools(adapter); |
John Allen | d944c3d6 | 2017-05-26 10:30:13 -0400 | [diff] [blame] | 1405 | ibmvnic_napi_enable(adapter); |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1406 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1407 | /* We're ready to receive frames, enable the sub-crq interrupts and |
| 1408 | * set the logical link state to up |
| 1409 | */ |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1410 | for (i = 0; i < adapter->req_rx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1411 | netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1412 | if (prev_state == VNIC_CLOSED) |
| 1413 | enable_irq(adapter->rx_scrq[i]->irq); |
Thomas Falcon | f23e064 | 2018-04-15 18:53:36 -0500 | [diff] [blame] | 1414 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1415 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1416 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1417 | for (i = 0; i < adapter->req_tx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1418 | netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1419 | if (prev_state == VNIC_CLOSED) |
| 1420 | enable_irq(adapter->tx_scrq[i]->irq); |
Thomas Falcon | f23e064 | 2018-04-15 18:53:36 -0500 | [diff] [blame] | 1421 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1422 | netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1423 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1424 | |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 1425 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1426 | if (rc) { |
Lijun Pan | 0775ebc | 2021-04-14 02:46:14 -0500 | [diff] [blame] | 1427 | ibmvnic_napi_disable(adapter); |
Sukadev Bhattiprolu | 61772b0 | 2022-02-07 16:19:18 -0800 | [diff] [blame] | 1428 | ibmvnic_disable_irqs(adapter); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1429 | return rc; |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1430 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1431 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1432 | netif_tx_start_all_queues(netdev); |
| 1433 | |
Dany Madden | 2ca220f | 2021-06-23 21:13:11 -0700 | [diff] [blame] | 1434 | if (prev_state == VNIC_CLOSED) { |
| 1435 | for (i = 0; i < adapter->req_rx_queues; i++) |
| 1436 | napi_schedule(&adapter->napi[i]); |
| 1437 | } |
| 1438 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1439 | adapter->state = VNIC_OPEN; |
| 1440 | return rc; |
| 1441 | } |
| 1442 | |
| 1443 | static int ibmvnic_open(struct net_device *netdev) |
| 1444 | { |
| 1445 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
John Allen | 69d08dc | 2018-01-18 16:27:58 -0600 | [diff] [blame] | 1446 | int rc; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1447 | |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 1448 | ASSERT_RTNL(); |
| 1449 | |
| 1450 | /* If device failover is pending or we are about to reset, just set |
| 1451 | * device state and return. Device operation will be handled by reset |
| 1452 | * routine. |
| 1453 | * |
| 1454 | * It should be safe to overwrite the adapter->state here. Since |
| 1455 | * we hold the rtnl, either the reset has not actually started or |
| 1456 | * the rtnl got dropped during the set_link_state() in do_reset(). |
| 1457 | * In the former case, no one else is changing the state (again we |
| 1458 | * have the rtnl) and in the latter case, do_reset() will detect and |
| 1459 | * honor our setting below. |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 1460 | */ |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 1461 | if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 1462 | netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n", |
| 1463 | adapter_state_to_string(adapter->state), |
| 1464 | adapter->failover_pending); |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 1465 | adapter->state = VNIC_OPEN; |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 1466 | rc = 0; |
| 1467 | goto out; |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 1468 | } |
| 1469 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1470 | if (adapter->state != VNIC_CLOSED) { |
| 1471 | rc = ibmvnic_login(netdev); |
Juliet Kim | a5681e2 | 2018-11-19 15:59:22 -0600 | [diff] [blame] | 1472 | if (rc) |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 1473 | goto out; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1474 | |
| 1475 | rc = init_resources(adapter); |
| 1476 | if (rc) { |
| 1477 | netdev_err(netdev, "failed to initialize resources\n"); |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 1478 | goto out; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1479 | } |
| 1480 | } |
| 1481 | |
| 1482 | rc = __ibmvnic_open(netdev); |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 1483 | |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 1484 | out: |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 1485 | /* If open failed and there is a pending failover or in-progress reset, |
| 1486 | * set device state and return. Device operation will be handled by |
| 1487 | * reset routine. See also comments above regarding rtnl. |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 1488 | */ |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 1489 | if (rc && |
| 1490 | (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 1491 | adapter->state = VNIC_OPEN; |
| 1492 | rc = 0; |
| 1493 | } |
Sukadev Bhattiprolu | 61772b0 | 2022-02-07 16:19:18 -0800 | [diff] [blame] | 1494 | |
| 1495 | if (rc) { |
| 1496 | release_resources(adapter); |
| 1497 | release_rx_pools(adapter); |
| 1498 | release_tx_pools(adapter); |
| 1499 | } |
| 1500 | |
Nathan Fontenot | bfc32f2 | 2017-05-03 14:04:26 -0400 | [diff] [blame] | 1501 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1502 | } |
| 1503 | |
Thomas Falcon | d0869c0 | 2018-02-13 18:23:43 -0600 | [diff] [blame] | 1504 | static void clean_rx_pools(struct ibmvnic_adapter *adapter) |
| 1505 | { |
| 1506 | struct ibmvnic_rx_pool *rx_pool; |
Thomas Falcon | 637f81d | 2018-02-26 18:10:57 -0600 | [diff] [blame] | 1507 | struct ibmvnic_rx_buff *rx_buff; |
Thomas Falcon | d0869c0 | 2018-02-13 18:23:43 -0600 | [diff] [blame] | 1508 | u64 rx_entries; |
| 1509 | int rx_scrqs; |
| 1510 | int i, j; |
| 1511 | |
| 1512 | if (!adapter->rx_pool) |
| 1513 | return; |
| 1514 | |
Thomas Falcon | 660e309 | 2018-04-20 14:25:32 -0500 | [diff] [blame] | 1515 | rx_scrqs = adapter->num_active_rx_pools; |
Thomas Falcon | d0869c0 | 2018-02-13 18:23:43 -0600 | [diff] [blame] | 1516 | rx_entries = adapter->req_rx_add_entries_per_subcrq; |
| 1517 | |
| 1518 | /* Free any remaining skbs in the rx buffer pools */ |
| 1519 | for (i = 0; i < rx_scrqs; i++) { |
| 1520 | rx_pool = &adapter->rx_pool[i]; |
Thomas Falcon | 637f81d | 2018-02-26 18:10:57 -0600 | [diff] [blame] | 1521 | if (!rx_pool || !rx_pool->rx_buff) |
Thomas Falcon | d0869c0 | 2018-02-13 18:23:43 -0600 | [diff] [blame] | 1522 | continue; |
| 1523 | |
| 1524 | netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); |
| 1525 | for (j = 0; j < rx_entries; j++) { |
Thomas Falcon | 637f81d | 2018-02-26 18:10:57 -0600 | [diff] [blame] | 1526 | rx_buff = &rx_pool->rx_buff[j]; |
| 1527 | if (rx_buff && rx_buff->skb) { |
| 1528 | dev_kfree_skb_any(rx_buff->skb); |
| 1529 | rx_buff->skb = NULL; |
Thomas Falcon | d0869c0 | 2018-02-13 18:23:43 -0600 | [diff] [blame] | 1530 | } |
| 1531 | } |
| 1532 | } |
| 1533 | } |
| 1534 | |
Thomas Falcon | e9e1e97 | 2018-03-16 20:00:30 -0500 | [diff] [blame] | 1535 | static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, |
| 1536 | struct ibmvnic_tx_pool *tx_pool) |
Nathan Fontenot | b41b83e | 2017-05-03 14:04:56 -0400 | [diff] [blame] | 1537 | { |
Thomas Falcon | 637f81d | 2018-02-26 18:10:57 -0600 | [diff] [blame] | 1538 | struct ibmvnic_tx_buff *tx_buff; |
Nathan Fontenot | b41b83e | 2017-05-03 14:04:56 -0400 | [diff] [blame] | 1539 | u64 tx_entries; |
Thomas Falcon | e9e1e97 | 2018-03-16 20:00:30 -0500 | [diff] [blame] | 1540 | int i; |
Nathan Fontenot | b41b83e | 2017-05-03 14:04:56 -0400 | [diff] [blame] | 1541 | |
Dan Carpenter | 050e85c | 2018-03-23 14:36:15 +0300 | [diff] [blame] | 1542 | if (!tx_pool || !tx_pool->tx_buff) |
Thomas Falcon | e9e1e97 | 2018-03-16 20:00:30 -0500 | [diff] [blame] | 1543 | return; |
| 1544 | |
| 1545 | tx_entries = tx_pool->num_buffers; |
| 1546 | |
| 1547 | for (i = 0; i < tx_entries; i++) { |
| 1548 | tx_buff = &tx_pool->tx_buff[i]; |
| 1549 | if (tx_buff && tx_buff->skb) { |
| 1550 | dev_kfree_skb_any(tx_buff->skb); |
| 1551 | tx_buff->skb = NULL; |
| 1552 | } |
| 1553 | } |
| 1554 | } |
| 1555 | |
| 1556 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) |
| 1557 | { |
| 1558 | int tx_scrqs; |
| 1559 | int i; |
| 1560 | |
| 1561 | if (!adapter->tx_pool || !adapter->tso_pool) |
Nathan Fontenot | b41b83e | 2017-05-03 14:04:56 -0400 | [diff] [blame] | 1562 | return; |
| 1563 | |
Thomas Falcon | 660e309 | 2018-04-20 14:25:32 -0500 | [diff] [blame] | 1564 | tx_scrqs = adapter->num_active_tx_pools; |
Nathan Fontenot | b41b83e | 2017-05-03 14:04:56 -0400 | [diff] [blame] | 1565 | |
| 1566 | /* Free any remaining skbs in the tx buffer pools */ |
| 1567 | for (i = 0; i < tx_scrqs; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1568 | netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); |
Thomas Falcon | e9e1e97 | 2018-03-16 20:00:30 -0500 | [diff] [blame] | 1569 | clean_one_tx_pool(adapter, &adapter->tx_pool[i]); |
| 1570 | clean_one_tx_pool(adapter, &adapter->tso_pool[i]); |
Nathan Fontenot | b41b83e | 2017-05-03 14:04:56 -0400 | [diff] [blame] | 1571 | } |
| 1572 | } |
| 1573 | |
John Allen | 6095e59 | 2018-03-30 13:44:21 -0500 | [diff] [blame] | 1574 | static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) |
John Allen | ea5509f | 2017-03-17 17:13:43 -0500 | [diff] [blame] | 1575 | { |
John Allen | 6095e59 | 2018-03-30 13:44:21 -0500 | [diff] [blame] | 1576 | struct net_device *netdev = adapter->netdev; |
John Allen | ea5509f | 2017-03-17 17:13:43 -0500 | [diff] [blame] | 1577 | int i; |
| 1578 | |
Nathan Fontenot | 46293b9 | 2017-05-03 14:05:02 -0400 | [diff] [blame] | 1579 | if (adapter->tx_scrq) { |
| 1580 | for (i = 0; i < adapter->req_tx_queues; i++) |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1581 | if (adapter->tx_scrq[i]->irq) { |
Thomas Falcon | f873866 | 2018-03-07 17:51:45 -0600 | [diff] [blame] | 1582 | netdev_dbg(netdev, |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1583 | "Disabling tx_scrq[%d] irq\n", i); |
Thomas Falcon | f23e064 | 2018-04-15 18:53:36 -0500 | [diff] [blame] | 1584 | disable_scrq_irq(adapter, adapter->tx_scrq[i]); |
Nathan Fontenot | 46293b9 | 2017-05-03 14:05:02 -0400 | [diff] [blame] | 1585 | disable_irq(adapter->tx_scrq[i]->irq); |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1586 | } |
Nathan Fontenot | 46293b9 | 2017-05-03 14:05:02 -0400 | [diff] [blame] | 1587 | } |
| 1588 | |
Nathan Fontenot | 46293b9 | 2017-05-03 14:05:02 -0400 | [diff] [blame] | 1589 | if (adapter->rx_scrq) { |
| 1590 | for (i = 0; i < adapter->req_rx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1591 | if (adapter->rx_scrq[i]->irq) { |
Thomas Falcon | f873866 | 2018-03-07 17:51:45 -0600 | [diff] [blame] | 1592 | netdev_dbg(netdev, |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1593 | "Disabling rx_scrq[%d] irq\n", i); |
Thomas Falcon | f23e064 | 2018-04-15 18:53:36 -0500 | [diff] [blame] | 1594 | disable_scrq_irq(adapter, adapter->rx_scrq[i]); |
Nathan Fontenot | 46293b9 | 2017-05-03 14:05:02 -0400 | [diff] [blame] | 1595 | disable_irq(adapter->rx_scrq[i]->irq); |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 1596 | } |
Nathan Fontenot | 46293b9 | 2017-05-03 14:05:02 -0400 | [diff] [blame] | 1597 | } |
| 1598 | } |
John Allen | 6095e59 | 2018-03-30 13:44:21 -0500 | [diff] [blame] | 1599 | } |
| 1600 | |
| 1601 | static void ibmvnic_cleanup(struct net_device *netdev) |
| 1602 | { |
| 1603 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 1604 | |
| 1605 | /* ensure that transmissions are stopped if called by do_reset */ |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 1606 | if (test_bit(0, &adapter->resetting)) |
John Allen | 6095e59 | 2018-03-30 13:44:21 -0500 | [diff] [blame] | 1607 | netif_tx_disable(netdev); |
| 1608 | else |
| 1609 | netif_tx_stop_all_queues(netdev); |
| 1610 | |
| 1611 | ibmvnic_napi_disable(adapter); |
| 1612 | ibmvnic_disable_irqs(adapter); |
Thomas Falcon | 01d9bd7 | 2018-03-07 17:51:46 -0600 | [diff] [blame] | 1613 | } |
| 1614 | |
| 1615 | static int __ibmvnic_close(struct net_device *netdev) |
| 1616 | { |
| 1617 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 1618 | int rc = 0; |
| 1619 | |
| 1620 | adapter->state = VNIC_CLOSING; |
| 1621 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); |
Nathan Fontenot | 90c8014 | 2017-05-03 14:04:32 -0400 | [diff] [blame] | 1622 | adapter->state = VNIC_CLOSED; |
Sukadev Bhattiprolu | d4083d3 | 2021-02-10 17:41:43 -0800 | [diff] [blame] | 1623 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1624 | } |
| 1625 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1626 | static int ibmvnic_close(struct net_device *netdev) |
| 1627 | { |
| 1628 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 1629 | int rc; |
| 1630 | |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 1631 | netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n", |
| 1632 | adapter_state_to_string(adapter->state), |
| 1633 | adapter->failover_pending, |
Sukadev Bhattiprolu | 38bd5ce | 2020-12-04 18:22:35 -0800 | [diff] [blame] | 1634 | adapter->force_reset_recovery); |
| 1635 | |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 1636 | /* If device failover is pending, just set device state and return. |
| 1637 | * Device operation will be handled by reset routine. |
| 1638 | */ |
| 1639 | if (adapter->failover_pending) { |
| 1640 | adapter->state = VNIC_CLOSED; |
| 1641 | return 0; |
| 1642 | } |
| 1643 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1644 | rc = __ibmvnic_close(netdev); |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 1645 | ibmvnic_cleanup(netdev); |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 1646 | clean_rx_pools(adapter); |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 1647 | clean_tx_pools(adapter); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 1648 | |
| 1649 | return rc; |
| 1650 | } |
| 1651 | |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1652 | /** |
| 1653 | * build_hdr_data - creates L2/L3/L4 header data buffer |
Lee Jones | 8070860 | 2021-01-15 20:09:03 +0000 | [diff] [blame] | 1654 | * @hdr_field: bitfield determining needed headers |
| 1655 | * @skb: socket buffer |
| 1656 | * @hdr_len: array of header lengths |
| 1657 | * @hdr_data: buffer to write the header to |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1658 | * |
| 1659 | * Reads hdr_field to determine which headers are needed by firmware. |
| 1660 | * Builds a buffer containing these headers. Saves individual header |
| 1661 | * lengths and total buffer length to be used to build descriptors. |
| 1662 | */ |
| 1663 | static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, |
| 1664 | int *hdr_len, u8 *hdr_data) |
| 1665 | { |
| 1666 | int len = 0; |
| 1667 | u8 *hdr; |
| 1668 | |
Thomas Falcon | da75e3b | 2018-03-12 11:51:02 -0500 | [diff] [blame] | 1669 | if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) |
| 1670 | hdr_len[0] = sizeof(struct vlan_ethhdr); |
| 1671 | else |
| 1672 | hdr_len[0] = sizeof(struct ethhdr); |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1673 | |
| 1674 | if (skb->protocol == htons(ETH_P_IP)) { |
| 1675 | hdr_len[1] = ip_hdr(skb)->ihl * 4; |
| 1676 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
| 1677 | hdr_len[2] = tcp_hdrlen(skb); |
| 1678 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
| 1679 | hdr_len[2] = sizeof(struct udphdr); |
| 1680 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 1681 | hdr_len[1] = sizeof(struct ipv6hdr); |
| 1682 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
| 1683 | hdr_len[2] = tcp_hdrlen(skb); |
| 1684 | else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) |
| 1685 | hdr_len[2] = sizeof(struct udphdr); |
Thomas Falcon | 4eb50ce | 2017-12-18 12:52:40 -0600 | [diff] [blame] | 1686 | } else if (skb->protocol == htons(ETH_P_ARP)) { |
| 1687 | hdr_len[1] = arp_hdr_len(skb->dev); |
| 1688 | hdr_len[2] = 0; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1689 | } |
| 1690 | |
| 1691 | memset(hdr_data, 0, 120); |
| 1692 | if ((hdr_field >> 6) & 1) { |
| 1693 | hdr = skb_mac_header(skb); |
| 1694 | memcpy(hdr_data, hdr, hdr_len[0]); |
| 1695 | len += hdr_len[0]; |
| 1696 | } |
| 1697 | |
| 1698 | if ((hdr_field >> 5) & 1) { |
| 1699 | hdr = skb_network_header(skb); |
| 1700 | memcpy(hdr_data + len, hdr, hdr_len[1]); |
| 1701 | len += hdr_len[1]; |
| 1702 | } |
| 1703 | |
| 1704 | if ((hdr_field >> 4) & 1) { |
| 1705 | hdr = skb_transport_header(skb); |
| 1706 | memcpy(hdr_data + len, hdr, hdr_len[2]); |
| 1707 | len += hdr_len[2]; |
| 1708 | } |
| 1709 | return len; |
| 1710 | } |
| 1711 | |
| 1712 | /** |
| 1713 | * create_hdr_descs - create header and header extension descriptors |
Lee Jones | 8070860 | 2021-01-15 20:09:03 +0000 | [diff] [blame] | 1714 | * @hdr_field: bitfield determining needed headers |
| 1715 | * @hdr_data: buffer containing header data |
| 1716 | * @len: length of data buffer |
| 1717 | * @hdr_len: array of individual header lengths |
| 1718 | * @scrq_arr: descriptor array |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1719 | * |
| 1720 | * Creates header and, if needed, header extension descriptors and |
| 1721 | * places them in a descriptor array, scrq_arr |
| 1722 | */ |
| 1723 | |
Thomas Falcon | 2de0968 | 2017-10-16 10:02:11 -0500 | [diff] [blame] | 1724 | static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, |
| 1725 | union sub_crq *scrq_arr) |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1726 | { |
| 1727 | union sub_crq hdr_desc; |
| 1728 | int tmp_len = len; |
Thomas Falcon | 2de0968 | 2017-10-16 10:02:11 -0500 | [diff] [blame] | 1729 | int num_descs = 0; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1730 | u8 *data, *cur; |
| 1731 | int tmp; |
| 1732 | |
| 1733 | while (tmp_len > 0) { |
| 1734 | cur = hdr_data + len - tmp_len; |
| 1735 | |
| 1736 | memset(&hdr_desc, 0, sizeof(hdr_desc)); |
| 1737 | if (cur != hdr_data) { |
| 1738 | data = hdr_desc.hdr_ext.data; |
| 1739 | tmp = tmp_len > 29 ? 29 : tmp_len; |
| 1740 | hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; |
| 1741 | hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; |
| 1742 | hdr_desc.hdr_ext.len = tmp; |
| 1743 | } else { |
| 1744 | data = hdr_desc.hdr.data; |
| 1745 | tmp = tmp_len > 24 ? 24 : tmp_len; |
| 1746 | hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; |
| 1747 | hdr_desc.hdr.type = IBMVNIC_HDR_DESC; |
| 1748 | hdr_desc.hdr.len = tmp; |
| 1749 | hdr_desc.hdr.l2_len = (u8)hdr_len[0]; |
| 1750 | hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); |
| 1751 | hdr_desc.hdr.l4_len = (u8)hdr_len[2]; |
| 1752 | hdr_desc.hdr.flag = hdr_field << 1; |
| 1753 | } |
| 1754 | memcpy(data, cur, tmp); |
| 1755 | tmp_len -= tmp; |
| 1756 | *scrq_arr = hdr_desc; |
| 1757 | scrq_arr++; |
Thomas Falcon | 2de0968 | 2017-10-16 10:02:11 -0500 | [diff] [blame] | 1758 | num_descs++; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1759 | } |
Thomas Falcon | 2de0968 | 2017-10-16 10:02:11 -0500 | [diff] [blame] | 1760 | |
| 1761 | return num_descs; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1762 | } |
| 1763 | |
| 1764 | /** |
| 1765 | * build_hdr_descs_arr - build a header descriptor array |
Lijun Pan | 73214a6 | 2021-06-11 10:43:39 -0500 | [diff] [blame] | 1766 | * @skb: tx socket buffer |
| 1767 | * @indir_arr: indirect array |
Lee Jones | 8070860 | 2021-01-15 20:09:03 +0000 | [diff] [blame] | 1768 | * @num_entries: number of descriptors to be sent |
| 1769 | * @hdr_field: bit field determining which headers will be sent |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1770 | * |
| 1771 | * This function will build a TX descriptor array with applicable |
| 1772 | * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. |
| 1773 | */ |
| 1774 | |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 1775 | static void build_hdr_descs_arr(struct sk_buff *skb, |
| 1776 | union sub_crq *indir_arr, |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1777 | int *num_entries, u8 hdr_field) |
| 1778 | { |
| 1779 | int hdr_len[3] = {0, 0, 0}; |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 1780 | u8 hdr_data[140] = {0}; |
Thomas Falcon | 2de0968 | 2017-10-16 10:02:11 -0500 | [diff] [blame] | 1781 | int tot_len; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1782 | |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 1783 | tot_len = build_hdr_data(hdr_field, skb, hdr_len, |
| 1784 | hdr_data); |
Thomas Falcon | 2de0968 | 2017-10-16 10:02:11 -0500 | [diff] [blame] | 1785 | *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 1786 | indir_arr + 1); |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1787 | } |
| 1788 | |
Thomas Falcon | 1f247a6 | 2018-03-12 11:51:04 -0500 | [diff] [blame] | 1789 | static int ibmvnic_xmit_workarounds(struct sk_buff *skb, |
| 1790 | struct net_device *netdev) |
| 1791 | { |
| 1792 | /* For some backing devices, mishandling of small packets |
| 1793 | * can result in a loss of connection or TX stall. Device |
| 1794 | * architects recommend that no packet should be smaller |
| 1795 | * than the minimum MTU value provided to the driver, so |
| 1796 | * pad any packets to that length |
| 1797 | */ |
| 1798 | if (skb->len < netdev->min_mtu) |
| 1799 | return skb_put_padto(skb, netdev->min_mtu); |
Thomas Falcon | 7083a45 | 2018-03-12 21:05:26 -0500 | [diff] [blame] | 1800 | |
| 1801 | return 0; |
Thomas Falcon | 1f247a6 | 2018-03-12 11:51:04 -0500 | [diff] [blame] | 1802 | } |
| 1803 | |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1804 | static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, |
| 1805 | struct ibmvnic_sub_crq_queue *tx_scrq) |
| 1806 | { |
| 1807 | struct ibmvnic_ind_xmit_queue *ind_bufp; |
| 1808 | struct ibmvnic_tx_buff *tx_buff; |
| 1809 | struct ibmvnic_tx_pool *tx_pool; |
| 1810 | union sub_crq tx_scrq_entry; |
| 1811 | int queue_num; |
| 1812 | int entries; |
| 1813 | int index; |
| 1814 | int i; |
| 1815 | |
| 1816 | ind_bufp = &tx_scrq->ind_buf; |
| 1817 | entries = (u64)ind_bufp->index; |
| 1818 | queue_num = tx_scrq->pool_index; |
| 1819 | |
| 1820 | for (i = entries - 1; i >= 0; --i) { |
| 1821 | tx_scrq_entry = ind_bufp->indir_arr[i]; |
| 1822 | if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) |
| 1823 | continue; |
| 1824 | index = be32_to_cpu(tx_scrq_entry.v1.correlator); |
| 1825 | if (index & IBMVNIC_TSO_POOL_MASK) { |
| 1826 | tx_pool = &adapter->tso_pool[queue_num]; |
| 1827 | index &= ~IBMVNIC_TSO_POOL_MASK; |
| 1828 | } else { |
| 1829 | tx_pool = &adapter->tx_pool[queue_num]; |
| 1830 | } |
| 1831 | tx_pool->free_map[tx_pool->consumer_index] = index; |
| 1832 | tx_pool->consumer_index = tx_pool->consumer_index == 0 ? |
| 1833 | tx_pool->num_buffers - 1 : |
| 1834 | tx_pool->consumer_index - 1; |
| 1835 | tx_buff = &tx_pool->tx_buff[index]; |
| 1836 | adapter->netdev->stats.tx_packets--; |
| 1837 | adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; |
| 1838 | adapter->tx_stats_buffers[queue_num].packets--; |
| 1839 | adapter->tx_stats_buffers[queue_num].bytes -= |
| 1840 | tx_buff->skb->len; |
| 1841 | dev_kfree_skb_any(tx_buff->skb); |
| 1842 | tx_buff->skb = NULL; |
| 1843 | adapter->netdev->stats.tx_dropped++; |
| 1844 | } |
| 1845 | ind_bufp->index = 0; |
| 1846 | if (atomic_sub_return(entries, &tx_scrq->used) <= |
| 1847 | (adapter->req_tx_entries_per_subcrq / 2) && |
Sukadev Bhattiprolu | 65d6470 | 2021-06-23 21:13:12 -0700 | [diff] [blame] | 1848 | __netif_subqueue_stopped(adapter->netdev, queue_num) && |
| 1849 | !test_bit(0, &adapter->resetting)) { |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1850 | netif_wake_subqueue(adapter->netdev, queue_num); |
| 1851 | netdev_dbg(adapter->netdev, "Started queue %d\n", |
| 1852 | queue_num); |
| 1853 | } |
| 1854 | } |
| 1855 | |
| 1856 | static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, |
| 1857 | struct ibmvnic_sub_crq_queue *tx_scrq) |
| 1858 | { |
| 1859 | struct ibmvnic_ind_xmit_queue *ind_bufp; |
| 1860 | u64 dma_addr; |
| 1861 | u64 entries; |
| 1862 | u64 handle; |
| 1863 | int rc; |
| 1864 | |
| 1865 | ind_bufp = &tx_scrq->ind_buf; |
| 1866 | dma_addr = (u64)ind_bufp->indir_dma; |
| 1867 | entries = (u64)ind_bufp->index; |
| 1868 | handle = tx_scrq->handle; |
| 1869 | |
| 1870 | if (!entries) |
| 1871 | return 0; |
| 1872 | rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); |
| 1873 | if (rc) |
| 1874 | ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); |
| 1875 | else |
| 1876 | ind_bufp->index = 0; |
| 1877 | return 0; |
| 1878 | } |
| 1879 | |
YueHaibing | 94b2bb2 | 2018-09-18 14:35:47 +0800 | [diff] [blame] | 1880 | static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1881 | { |
| 1882 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 1883 | int queue_num = skb_get_queue_mapping(skb); |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1884 | u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1885 | struct device *dev = &adapter->vdev->dev; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1886 | struct ibmvnic_ind_xmit_queue *ind_bufp; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1887 | struct ibmvnic_tx_buff *tx_buff = NULL; |
Thomas Falcon | 142c0ac | 2017-03-05 12:18:41 -0600 | [diff] [blame] | 1888 | struct ibmvnic_sub_crq_queue *tx_scrq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1889 | struct ibmvnic_tx_pool *tx_pool; |
| 1890 | unsigned int tx_send_failed = 0; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1891 | netdev_tx_t ret = NETDEV_TX_OK; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1892 | unsigned int tx_map_failed = 0; |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 1893 | union sub_crq indir_arr[16]; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1894 | unsigned int tx_dropped = 0; |
| 1895 | unsigned int tx_packets = 0; |
| 1896 | unsigned int tx_bytes = 0; |
| 1897 | dma_addr_t data_dma_addr; |
| 1898 | struct netdev_queue *txq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1899 | unsigned long lpar_rc; |
| 1900 | union sub_crq tx_crq; |
| 1901 | unsigned int offset; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 1902 | int num_entries = 1; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1903 | unsigned char *dst; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1904 | int index = 0; |
Thomas Falcon | a0dca10 | 2018-01-18 19:29:48 -0600 | [diff] [blame] | 1905 | u8 proto = 0; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1906 | |
| 1907 | tx_scrq = adapter->tx_scrq[queue_num]; |
| 1908 | txq = netdev_get_tx_queue(netdev, queue_num); |
| 1909 | ind_bufp = &tx_scrq->ind_buf; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1910 | |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 1911 | if (test_bit(0, &adapter->resetting)) { |
Thomas Falcon | 7f5b030 | 2017-04-21 15:39:16 -0400 | [diff] [blame] | 1912 | dev_kfree_skb_any(skb); |
| 1913 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1914 | tx_send_failed++; |
| 1915 | tx_dropped++; |
Thomas Falcon | 7f5b030 | 2017-04-21 15:39:16 -0400 | [diff] [blame] | 1916 | ret = NETDEV_TX_OK; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1917 | goto out; |
| 1918 | } |
| 1919 | |
Thomas Falcon | 7083a45 | 2018-03-12 21:05:26 -0500 | [diff] [blame] | 1920 | if (ibmvnic_xmit_workarounds(skb, netdev)) { |
Thomas Falcon | 1f247a6 | 2018-03-12 11:51:04 -0500 | [diff] [blame] | 1921 | tx_dropped++; |
| 1922 | tx_send_failed++; |
| 1923 | ret = NETDEV_TX_OK; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 1924 | ibmvnic_tx_scrq_flush(adapter, tx_scrq); |
Thomas Falcon | 1f247a6 | 2018-03-12 11:51:04 -0500 | [diff] [blame] | 1925 | goto out; |
| 1926 | } |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 1927 | if (skb_is_gso(skb)) |
| 1928 | tx_pool = &adapter->tso_pool[queue_num]; |
| 1929 | else |
| 1930 | tx_pool = &adapter->tx_pool[queue_num]; |
Thomas Falcon | 1f247a6 | 2018-03-12 11:51:04 -0500 | [diff] [blame] | 1931 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1932 | index = tx_pool->free_map[tx_pool->consumer_index]; |
Thomas Falcon | fdb0610 | 2017-10-17 12:36:55 -0500 | [diff] [blame] | 1933 | |
Thomas Falcon | 86b61a5 | 2018-03-16 20:00:29 -0500 | [diff] [blame] | 1934 | if (index == IBMVNIC_INVALID_MAP) { |
| 1935 | dev_kfree_skb_any(skb); |
| 1936 | tx_send_failed++; |
| 1937 | tx_dropped++; |
Sukadev Bhattiprolu | bb55362 | 2021-07-20 19:34:39 -0700 | [diff] [blame] | 1938 | ibmvnic_tx_scrq_flush(adapter, tx_scrq); |
Thomas Falcon | 86b61a5 | 2018-03-16 20:00:29 -0500 | [diff] [blame] | 1939 | ret = NETDEV_TX_OK; |
| 1940 | goto out; |
| 1941 | } |
| 1942 | |
| 1943 | tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; |
| 1944 | |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 1945 | offset = index * tx_pool->buf_size; |
| 1946 | dst = tx_pool->long_term_buff.buff + offset; |
| 1947 | memset(dst, 0, tx_pool->buf_size); |
| 1948 | data_dma_addr = tx_pool->long_term_buff.addr + offset; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1949 | |
Thomas Falcon | 1548205 | 2017-10-17 12:36:54 -0500 | [diff] [blame] | 1950 | if (skb_shinfo(skb)->nr_frags) { |
| 1951 | int cur, i; |
| 1952 | |
| 1953 | /* Copy the head */ |
| 1954 | skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); |
| 1955 | cur = skb_headlen(skb); |
| 1956 | |
| 1957 | /* Copy the frags */ |
| 1958 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1959 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 1960 | |
Christophe JAILLET | c3105f8 | 2021-04-04 10:54:37 +0200 | [diff] [blame] | 1961 | memcpy(dst + cur, skb_frag_address(frag), |
| 1962 | skb_frag_size(frag)); |
Thomas Falcon | 1548205 | 2017-10-17 12:36:54 -0500 | [diff] [blame] | 1963 | cur += skb_frag_size(frag); |
| 1964 | } |
| 1965 | } else { |
| 1966 | skb_copy_from_linear_data(skb, dst, skb->len); |
| 1967 | } |
| 1968 | |
Lijun Pan | 42557da | 2021-02-12 20:48:40 -0600 | [diff] [blame] | 1969 | /* post changes to long_term_buff *dst before VIOS accessing it */ |
| 1970 | dma_wmb(); |
| 1971 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1972 | tx_pool->consumer_index = |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 1973 | (tx_pool->consumer_index + 1) % tx_pool->num_buffers; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1974 | |
| 1975 | tx_buff = &tx_pool->tx_buff[index]; |
| 1976 | tx_buff->skb = skb; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1977 | tx_buff->index = index; |
| 1978 | tx_buff->pool_index = queue_num; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1979 | |
| 1980 | memset(&tx_crq, 0, sizeof(tx_crq)); |
| 1981 | tx_crq.v1.first = IBMVNIC_CRQ_CMD; |
| 1982 | tx_crq.v1.type = IBMVNIC_TX_DESC; |
| 1983 | tx_crq.v1.n_crq_elem = 1; |
| 1984 | tx_crq.v1.n_sge = 1; |
| 1985 | tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 1986 | |
Thomas Falcon | fdb0610 | 2017-10-17 12:36:55 -0500 | [diff] [blame] | 1987 | if (skb_is_gso(skb)) |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 1988 | tx_crq.v1.correlator = |
| 1989 | cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); |
Thomas Falcon | fdb0610 | 2017-10-17 12:36:55 -0500 | [diff] [blame] | 1990 | else |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 1991 | tx_crq.v1.correlator = cpu_to_be32(index); |
| 1992 | tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1993 | tx_crq.v1.sge_len = cpu_to_be32(skb->len); |
| 1994 | tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); |
| 1995 | |
Michał Mirosław | e84b479 | 2018-11-07 17:50:52 +0100 | [diff] [blame] | 1996 | if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 1997 | tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; |
| 1998 | tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); |
| 1999 | } |
| 2000 | |
| 2001 | if (skb->protocol == htons(ETH_P_IP)) { |
Thomas Falcon | a0dca10 | 2018-01-18 19:29:48 -0600 | [diff] [blame] | 2002 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; |
| 2003 | proto = ip_hdr(skb)->protocol; |
| 2004 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
| 2005 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; |
| 2006 | proto = ipv6_hdr(skb)->nexthdr; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2007 | } |
| 2008 | |
Thomas Falcon | a0dca10 | 2018-01-18 19:29:48 -0600 | [diff] [blame] | 2009 | if (proto == IPPROTO_TCP) |
| 2010 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; |
| 2011 | else if (proto == IPPROTO_UDP) |
| 2012 | tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; |
| 2013 | |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 2014 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2015 | tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 2016 | hdrs += 2; |
| 2017 | } |
Thomas Falcon | fdb0610 | 2017-10-17 12:36:55 -0500 | [diff] [blame] | 2018 | if (skb_is_gso(skb)) { |
| 2019 | tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; |
| 2020 | tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); |
| 2021 | hdrs += 2; |
| 2022 | } |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 2023 | |
| 2024 | if ((*hdrs >> 7) & 1) |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 2025 | build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 2026 | |
| 2027 | tx_crq.v1.n_crq_elem = num_entries; |
| 2028 | tx_buff->num_entries = num_entries; |
| 2029 | /* flush buffer if current entry can not fit */ |
| 2030 | if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { |
| 2031 | lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); |
| 2032 | if (lpar_rc != H_SUCCESS) |
| 2033 | goto tx_flush_err; |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 2034 | } |
Thomas Falcon | 7f5b030 | 2017-04-21 15:39:16 -0400 | [diff] [blame] | 2035 | |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 2036 | indir_arr[0] = tx_crq; |
| 2037 | memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 2038 | num_entries * sizeof(struct ibmvnic_generic_scrq)); |
| 2039 | ind_bufp->index += num_entries; |
| 2040 | if (__netdev_tx_sent_queue(txq, skb->len, |
| 2041 | netdev_xmit_more() && |
| 2042 | ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { |
| 2043 | lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); |
| 2044 | if (lpar_rc != H_SUCCESS) |
| 2045 | goto tx_err; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2046 | } |
Thomas Falcon | 142c0ac | 2017-03-05 12:18:41 -0600 | [diff] [blame] | 2047 | |
Thomas Falcon | ffc385b | 2018-02-18 10:08:41 -0600 | [diff] [blame] | 2048 | if (atomic_add_return(num_entries, &tx_scrq->used) |
Brian King | 58c8c0c | 2017-04-19 13:44:47 -0400 | [diff] [blame] | 2049 | >= adapter->req_tx_entries_per_subcrq) { |
Thomas Falcon | 0aecb13 | 2018-02-26 18:10:58 -0600 | [diff] [blame] | 2050 | netdev_dbg(netdev, "Stopping queue %d\n", queue_num); |
Thomas Falcon | 142c0ac | 2017-03-05 12:18:41 -0600 | [diff] [blame] | 2051 | netif_stop_subqueue(netdev, queue_num); |
| 2052 | } |
| 2053 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2054 | tx_packets++; |
| 2055 | tx_bytes += skb->len; |
Eric Dumazet | 5337824 | 2021-11-16 19:29:22 -0800 | [diff] [blame] | 2056 | txq_trans_cond_update(txq); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2057 | ret = NETDEV_TX_OK; |
Thomas Falcon | 86b61a5 | 2018-03-16 20:00:29 -0500 | [diff] [blame] | 2058 | goto out; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2059 | |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 2060 | tx_flush_err: |
| 2061 | dev_kfree_skb_any(skb); |
| 2062 | tx_buff->skb = NULL; |
| 2063 | tx_pool->consumer_index = tx_pool->consumer_index == 0 ? |
| 2064 | tx_pool->num_buffers - 1 : |
| 2065 | tx_pool->consumer_index - 1; |
| 2066 | tx_dropped++; |
| 2067 | tx_err: |
| 2068 | if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) |
| 2069 | dev_err_ratelimited(dev, "tx: send failed\n"); |
| 2070 | |
| 2071 | if (lpar_rc == H_CLOSED || adapter->failover_pending) { |
| 2072 | /* Disable TX and report carrier off if queue is closed |
| 2073 | * or pending failover. |
| 2074 | * Firmware guarantees that a signal will be sent to the |
| 2075 | * driver, triggering a reset or some other action. |
| 2076 | */ |
| 2077 | netif_tx_stop_all_queues(netdev); |
| 2078 | netif_carrier_off(netdev); |
| 2079 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2080 | out: |
| 2081 | netdev->stats.tx_dropped += tx_dropped; |
| 2082 | netdev->stats.tx_bytes += tx_bytes; |
| 2083 | netdev->stats.tx_packets += tx_packets; |
| 2084 | adapter->tx_send_failed += tx_send_failed; |
| 2085 | adapter->tx_map_failed += tx_map_failed; |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 2086 | adapter->tx_stats_buffers[queue_num].packets += tx_packets; |
| 2087 | adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; |
| 2088 | adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2089 | |
| 2090 | return ret; |
| 2091 | } |
| 2092 | |
| 2093 | static void ibmvnic_set_multi(struct net_device *netdev) |
| 2094 | { |
| 2095 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 2096 | struct netdev_hw_addr *ha; |
| 2097 | union ibmvnic_crq crq; |
| 2098 | |
| 2099 | memset(&crq, 0, sizeof(crq)); |
| 2100 | crq.request_capability.first = IBMVNIC_CRQ_CMD; |
| 2101 | crq.request_capability.cmd = REQUEST_CAPABILITY; |
| 2102 | |
| 2103 | if (netdev->flags & IFF_PROMISC) { |
| 2104 | if (!adapter->promisc_supported) |
| 2105 | return; |
| 2106 | } else { |
| 2107 | if (netdev->flags & IFF_ALLMULTI) { |
| 2108 | /* Accept all multicast */ |
| 2109 | memset(&crq, 0, sizeof(crq)); |
| 2110 | crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; |
| 2111 | crq.multicast_ctrl.cmd = MULTICAST_CTRL; |
| 2112 | crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; |
| 2113 | ibmvnic_send_crq(adapter, &crq); |
| 2114 | } else if (netdev_mc_empty(netdev)) { |
| 2115 | /* Reject all multicast */ |
| 2116 | memset(&crq, 0, sizeof(crq)); |
| 2117 | crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; |
| 2118 | crq.multicast_ctrl.cmd = MULTICAST_CTRL; |
| 2119 | crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; |
| 2120 | ibmvnic_send_crq(adapter, &crq); |
| 2121 | } else { |
| 2122 | /* Accept one or more multicast(s) */ |
| 2123 | netdev_for_each_mc_addr(ha, netdev) { |
| 2124 | memset(&crq, 0, sizeof(crq)); |
| 2125 | crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; |
| 2126 | crq.multicast_ctrl.cmd = MULTICAST_CTRL; |
| 2127 | crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; |
| 2128 | ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], |
| 2129 | ha->addr); |
| 2130 | ibmvnic_send_crq(adapter, &crq); |
| 2131 | } |
| 2132 | } |
| 2133 | } |
| 2134 | } |
| 2135 | |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2136 | static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2137 | { |
| 2138 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2139 | union ibmvnic_crq crq; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 2140 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2141 | |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2142 | if (!is_valid_ether_addr(dev_addr)) { |
| 2143 | rc = -EADDRNOTAVAIL; |
| 2144 | goto err; |
| 2145 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2146 | |
| 2147 | memset(&crq, 0, sizeof(crq)); |
| 2148 | crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; |
| 2149 | crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2150 | ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 2151 | |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 2152 | mutex_lock(&adapter->fw_lock); |
| 2153 | adapter->fw_done_rc = 0; |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 2154 | reinit_completion(&adapter->fw_done); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 2155 | |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 2156 | rc = ibmvnic_send_crq(adapter, &crq); |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2157 | if (rc) { |
| 2158 | rc = -EIO; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 2159 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2160 | goto err; |
| 2161 | } |
| 2162 | |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 2163 | rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2164 | /* netdev->dev_addr is changed in handle_change_mac_rsp function */ |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 2165 | if (rc || adapter->fw_done_rc) { |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2166 | rc = -EIO; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 2167 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2168 | goto err; |
| 2169 | } |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 2170 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2171 | return 0; |
| 2172 | err: |
| 2173 | ether_addr_copy(adapter->mac_addr, netdev->dev_addr); |
| 2174 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2175 | } |
| 2176 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2177 | static int ibmvnic_set_mac(struct net_device *netdev, void *p) |
| 2178 | { |
| 2179 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 2180 | struct sockaddr *addr = p; |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 2181 | int rc; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2182 | |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2183 | rc = 0; |
Lijun Pan | 8fc3672 | 2020-10-27 17:04:56 -0500 | [diff] [blame] | 2184 | if (!is_valid_ether_addr(addr->sa_data)) |
| 2185 | return -EADDRNOTAVAIL; |
| 2186 | |
Jiri Wiesner | 67eb211 | 2021-03-04 17:18:28 +0100 | [diff] [blame] | 2187 | ether_addr_copy(adapter->mac_addr, addr->sa_data); |
| 2188 | if (adapter->state != VNIC_PROBED) |
Thomas Falcon | 62740e9 | 2019-05-09 23:13:43 -0500 | [diff] [blame] | 2189 | rc = __ibmvnic_set_mac(netdev, addr->sa_data); |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2190 | |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 2191 | return rc; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2192 | } |
| 2193 | |
Lijun Pan | caee7bf | 2021-04-12 02:41:27 -0500 | [diff] [blame] | 2194 | static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) |
| 2195 | { |
| 2196 | switch (reason) { |
| 2197 | case VNIC_RESET_FAILOVER: |
| 2198 | return "FAILOVER"; |
| 2199 | case VNIC_RESET_MOBILITY: |
| 2200 | return "MOBILITY"; |
| 2201 | case VNIC_RESET_FATAL: |
| 2202 | return "FATAL"; |
| 2203 | case VNIC_RESET_NON_FATAL: |
| 2204 | return "NON_FATAL"; |
| 2205 | case VNIC_RESET_TIMEOUT: |
| 2206 | return "TIMEOUT"; |
| 2207 | case VNIC_RESET_CHANGE_PARAM: |
| 2208 | return "CHANGE_PARAM"; |
Lijun Pan | 822ebc2 | 2021-06-11 10:35:37 -0500 | [diff] [blame] | 2209 | case VNIC_RESET_PASSIVE_INIT: |
| 2210 | return "PASSIVE_INIT"; |
Lijun Pan | caee7bf | 2021-04-12 02:41:27 -0500 | [diff] [blame] | 2211 | } |
Michal Suchanek | 07b5dc1 | 2021-05-20 08:50:34 +0200 | [diff] [blame] | 2212 | return "UNKNOWN"; |
Lijun Pan | caee7bf | 2021-04-12 02:41:27 -0500 | [diff] [blame] | 2213 | } |
| 2214 | |
Lee Jones | 8070860 | 2021-01-15 20:09:03 +0000 | [diff] [blame] | 2215 | /* |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2216 | * do_reset returns zero if we are able to keep processing reset events, or |
| 2217 | * non-zero if we hit a fatal error and must halt. |
| 2218 | */ |
| 2219 | static int do_reset(struct ibmvnic_adapter *adapter, |
| 2220 | struct ibmvnic_rwi *rwi, u32 reset_state) |
| 2221 | { |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 2222 | struct net_device *netdev = adapter->netdev; |
John Allen | 896d869 | 2018-01-18 16:26:31 -0600 | [diff] [blame] | 2223 | u64 old_num_rx_queues, old_num_tx_queues; |
Thomas Falcon | 5bf032e | 2018-11-21 11:17:59 -0600 | [diff] [blame] | 2224 | u64 old_num_rx_slots, old_num_tx_slots; |
Lijun Pan | d3a6abc | 2021-04-14 02:46:15 -0500 | [diff] [blame] | 2225 | int rc; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2226 | |
Sukadev Bhattiprolu | 38bd5ce | 2020-12-04 18:22:35 -0800 | [diff] [blame] | 2227 | netdev_dbg(adapter->netdev, |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 2228 | "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n", |
| 2229 | adapter_state_to_string(adapter->state), |
| 2230 | adapter->failover_pending, |
| 2231 | reset_reason_to_string(rwi->reset_reason), |
| 2232 | adapter_state_to_string(reset_state)); |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 2233 | |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2234 | adapter->reset_reason = rwi->reset_reason; |
| 2235 | /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */ |
| 2236 | if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) |
| 2237 | rtnl_lock(); |
| 2238 | |
Lijun Pan | bab08be | 2021-02-11 00:43:19 -0600 | [diff] [blame] | 2239 | /* Now that we have the rtnl lock, clear any pending failover. |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 2240 | * This will ensure ibmvnic_open() has either completed or will |
| 2241 | * block until failover is complete. |
| 2242 | */ |
| 2243 | if (rwi->reset_reason == VNIC_RESET_FAILOVER) |
| 2244 | adapter->failover_pending = false; |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2245 | |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 2246 | /* read the state and check (again) after getting rtnl */ |
| 2247 | reset_state = adapter->state; |
| 2248 | |
| 2249 | if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { |
| 2250 | rc = -EBUSY; |
| 2251 | goto out; |
| 2252 | } |
| 2253 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2254 | netif_carrier_off(netdev); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2255 | |
John Allen | 896d869 | 2018-01-18 16:26:31 -0600 | [diff] [blame] | 2256 | old_num_rx_queues = adapter->req_rx_queues; |
| 2257 | old_num_tx_queues = adapter->req_tx_queues; |
Thomas Falcon | 5bf032e | 2018-11-21 11:17:59 -0600 | [diff] [blame] | 2258 | old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; |
| 2259 | old_num_tx_slots = adapter->req_tx_entries_per_subcrq; |
John Allen | 896d869 | 2018-01-18 16:26:31 -0600 | [diff] [blame] | 2260 | |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 2261 | ibmvnic_cleanup(netdev); |
| 2262 | |
Thomas Falcon | 1f94608 | 2019-06-07 16:03:53 -0500 | [diff] [blame] | 2263 | if (reset_state == VNIC_OPEN && |
| 2264 | adapter->reset_reason != VNIC_RESET_MOBILITY && |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 2265 | adapter->reset_reason != VNIC_RESET_FAILOVER) { |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2266 | if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { |
| 2267 | rc = __ibmvnic_close(netdev); |
| 2268 | if (rc) |
| 2269 | goto out; |
| 2270 | } else { |
| 2271 | adapter->state = VNIC_CLOSING; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2272 | |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2273 | /* Release the RTNL lock before link state change and |
| 2274 | * re-acquire after the link state change to allow |
| 2275 | * linkwatch_event to grab the RTNL lock and run during |
| 2276 | * a reset. |
| 2277 | */ |
| 2278 | rtnl_unlock(); |
| 2279 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); |
| 2280 | rtnl_lock(); |
| 2281 | if (rc) |
| 2282 | goto out; |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2283 | |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 2284 | if (adapter->state == VNIC_OPEN) { |
| 2285 | /* When we dropped rtnl, ibmvnic_open() got |
| 2286 | * it and noticed that we are resetting and |
| 2287 | * set the adapter state to OPEN. Update our |
| 2288 | * new "target" state, and resume the reset |
| 2289 | * from VNIC_CLOSING state. |
| 2290 | */ |
| 2291 | netdev_dbg(netdev, |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 2292 | "Open changed state from %s, updating.\n", |
| 2293 | adapter_state_to_string(reset_state)); |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 2294 | reset_state = VNIC_OPEN; |
| 2295 | adapter->state = VNIC_CLOSING; |
| 2296 | } |
| 2297 | |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2298 | if (adapter->state != VNIC_CLOSING) { |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 2299 | /* If someone else changed the adapter state |
| 2300 | * when we dropped the rtnl, fail the reset |
| 2301 | */ |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 2302 | rc = -EAGAIN; |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2303 | goto out; |
| 2304 | } |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2305 | adapter->state = VNIC_CLOSED; |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2306 | } |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2307 | } |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2308 | |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2309 | if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { |
| 2310 | release_resources(adapter); |
| 2311 | release_sub_crqs(adapter, 1); |
| 2312 | release_crq_queue(adapter); |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2313 | } |
| 2314 | |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 2315 | if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { |
| 2316 | /* remove the closed state so when we call open it appears |
| 2317 | * we are coming from the probed state. |
| 2318 | */ |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2319 | adapter->state = VNIC_PROBED; |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 2320 | |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2321 | if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { |
| 2322 | rc = init_crq_queue(adapter); |
| 2323 | } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 2324 | rc = ibmvnic_reenable_crq_queue(adapter); |
| 2325 | release_sub_crqs(adapter, 1); |
| 2326 | } else { |
| 2327 | rc = ibmvnic_reset_crq(adapter); |
Dany Madden | 8b40eb73 | 2020-06-18 15:24:13 -0400 | [diff] [blame] | 2328 | if (rc == H_CLOSED || rc == H_SUCCESS) { |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 2329 | rc = vio_enable_interrupts(adapter->vdev); |
Dany Madden | 8b40eb73 | 2020-06-18 15:24:13 -0400 | [diff] [blame] | 2330 | if (rc) |
| 2331 | netdev_err(adapter->netdev, |
| 2332 | "Reset failed to enable interrupts. rc=%d\n", |
| 2333 | rc); |
| 2334 | } |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 2335 | } |
| 2336 | |
| 2337 | if (rc) { |
| 2338 | netdev_err(adapter->netdev, |
Dany Madden | 8b40eb73 | 2020-06-18 15:24:13 -0400 | [diff] [blame] | 2339 | "Reset couldn't initialize crq. rc=%d\n", rc); |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2340 | goto out; |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 2341 | } |
| 2342 | |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 2343 | rc = ibmvnic_reset_init(adapter, true); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 2344 | if (rc) |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2345 | goto out; |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 2346 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 2347 | /* If the adapter was in PROBE or DOWN state prior to the reset, |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 2348 | * exit here. |
| 2349 | */ |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 2350 | if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) { |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2351 | rc = 0; |
| 2352 | goto out; |
| 2353 | } |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 2354 | |
| 2355 | rc = ibmvnic_login(netdev); |
Lijun Pan | f78afaa | 2021-02-11 00:43:20 -0600 | [diff] [blame] | 2356 | if (rc) |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2357 | goto out; |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 2358 | |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2359 | if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { |
| 2360 | rc = init_resources(adapter); |
| 2361 | if (rc) |
| 2362 | goto out; |
| 2363 | } else if (adapter->req_rx_queues != old_num_rx_queues || |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2364 | adapter->req_tx_queues != old_num_tx_queues || |
| 2365 | adapter->req_rx_add_entries_per_subcrq != |
| 2366 | old_num_rx_slots || |
| 2367 | adapter->req_tx_entries_per_subcrq != |
Mingming Cao | 9f13457 | 2020-08-25 13:26:41 -0400 | [diff] [blame] | 2368 | old_num_tx_slots || |
| 2369 | !adapter->rx_pool || |
| 2370 | !adapter->tso_pool || |
| 2371 | !adapter->tx_pool) { |
Juliet Kim | a5681e2 | 2018-11-19 15:59:22 -0600 | [diff] [blame] | 2372 | release_napi(adapter); |
| 2373 | release_vpd_data(adapter); |
| 2374 | |
| 2375 | rc = init_resources(adapter); |
Thomas Falcon | f611a5b | 2018-08-30 13:19:53 -0500 | [diff] [blame] | 2376 | if (rc) |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2377 | goto out; |
Nathan Fontenot | d9043c1 | 2018-02-19 13:30:14 -0600 | [diff] [blame] | 2378 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2379 | } else { |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 2380 | rc = init_tx_pools(netdev); |
Jakub Kicinski | 8ae4dff | 2020-09-04 21:07:49 -0700 | [diff] [blame] | 2381 | if (rc) { |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 2382 | netdev_dbg(netdev, |
| 2383 | "init tx pools failed (%d)\n", |
Lijun Pan | 91dc5d2 | 2021-02-11 00:43:22 -0600 | [diff] [blame] | 2384 | rc); |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2385 | goto out; |
Jakub Kicinski | 8ae4dff | 2020-09-04 21:07:49 -0700 | [diff] [blame] | 2386 | } |
Nathan Fontenot | 8c0543a | 2017-05-26 10:31:06 -0400 | [diff] [blame] | 2387 | |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 2388 | rc = init_rx_pools(netdev); |
Jakub Kicinski | 8ae4dff | 2020-09-04 21:07:49 -0700 | [diff] [blame] | 2389 | if (rc) { |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 2390 | netdev_dbg(netdev, |
| 2391 | "init rx pools failed (%d)\n", |
Lijun Pan | 91dc5d2 | 2021-02-11 00:43:22 -0600 | [diff] [blame] | 2392 | rc); |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2393 | goto out; |
Jakub Kicinski | 8ae4dff | 2020-09-04 21:07:49 -0700 | [diff] [blame] | 2394 | } |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2395 | } |
Thomas Falcon | 134bbe7 | 2018-05-16 15:49:04 -0500 | [diff] [blame] | 2396 | ibmvnic_disable_irqs(adapter); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2397 | } |
John Allen | e676d81 | 2018-03-14 10:41:29 -0500 | [diff] [blame] | 2398 | adapter->state = VNIC_CLOSED; |
| 2399 | |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2400 | if (reset_state == VNIC_CLOSED) { |
| 2401 | rc = 0; |
| 2402 | goto out; |
| 2403 | } |
John Allen | e676d81 | 2018-03-14 10:41:29 -0500 | [diff] [blame] | 2404 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2405 | rc = __ibmvnic_open(netdev); |
| 2406 | if (rc) { |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2407 | rc = IBMVNIC_OPEN_FAILED; |
| 2408 | goto out; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2409 | } |
| 2410 | |
Thomas Falcon | be32a24 | 2019-06-07 16:03:54 -0500 | [diff] [blame] | 2411 | /* refresh device's multicast list */ |
| 2412 | ibmvnic_set_multi(netdev); |
| 2413 | |
Lijun Pan | 98025bc | 2020-11-20 16:40:12 -0600 | [diff] [blame] | 2414 | if (adapter->reset_reason == VNIC_RESET_FAILOVER || |
Lijun Pan | 6be4666 | 2020-12-14 15:19:29 -0600 | [diff] [blame] | 2415 | adapter->reset_reason == VNIC_RESET_MOBILITY) |
| 2416 | __netdev_notify_peers(netdev); |
Nathan Fontenot | 61d3e1d | 2017-06-12 20:47:45 -0400 | [diff] [blame] | 2417 | |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2418 | rc = 0; |
| 2419 | |
| 2420 | out: |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2421 | /* restore the adapter state if reset failed */ |
| 2422 | if (rc) |
| 2423 | adapter->state = reset_state; |
Lijun Pan | 3f5ec37 | 2021-01-06 15:35:14 -0600 | [diff] [blame] | 2424 | /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */ |
| 2425 | if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) |
| 2426 | rtnl_unlock(); |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2427 | |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 2428 | netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", |
| 2429 | adapter_state_to_string(adapter->state), |
| 2430 | adapter->failover_pending, rc); |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2431 | return rc; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2432 | } |
| 2433 | |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2434 | static int do_hard_reset(struct ibmvnic_adapter *adapter, |
| 2435 | struct ibmvnic_rwi *rwi, u32 reset_state) |
| 2436 | { |
| 2437 | struct net_device *netdev = adapter->netdev; |
| 2438 | int rc; |
| 2439 | |
Lijun Pan | caee7bf | 2021-04-12 02:41:27 -0500 | [diff] [blame] | 2440 | netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", |
| 2441 | reset_reason_to_string(rwi->reset_reason)); |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2442 | |
Sukadev Bhattiprolu | 8f1c0fd | 2021-02-23 21:02:29 -0800 | [diff] [blame] | 2443 | /* read the state and check (again) after getting rtnl */ |
| 2444 | reset_state = adapter->state; |
| 2445 | |
| 2446 | if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) { |
| 2447 | rc = -EBUSY; |
| 2448 | goto out; |
| 2449 | } |
| 2450 | |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2451 | netif_carrier_off(netdev); |
| 2452 | adapter->reset_reason = rwi->reset_reason; |
| 2453 | |
| 2454 | ibmvnic_cleanup(netdev); |
| 2455 | release_resources(adapter); |
| 2456 | release_sub_crqs(adapter, 0); |
| 2457 | release_crq_queue(adapter); |
| 2458 | |
| 2459 | /* remove the closed state so when we call open it appears |
| 2460 | * we are coming from the probed state. |
| 2461 | */ |
| 2462 | adapter->state = VNIC_PROBED; |
| 2463 | |
Thomas Falcon | bbd669a | 2019-04-04 18:58:26 -0500 | [diff] [blame] | 2464 | reinit_completion(&adapter->init_done); |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2465 | rc = init_crq_queue(adapter); |
| 2466 | if (rc) { |
| 2467 | netdev_err(adapter->netdev, |
| 2468 | "Couldn't initialize crq. rc=%d\n", rc); |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2469 | goto out; |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2470 | } |
| 2471 | |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 2472 | rc = ibmvnic_reset_init(adapter, false); |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2473 | if (rc) |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2474 | goto out; |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2475 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 2476 | /* If the adapter was in PROBE or DOWN state prior to the reset, |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2477 | * exit here. |
| 2478 | */ |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 2479 | if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2480 | goto out; |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2481 | |
| 2482 | rc = ibmvnic_login(netdev); |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2483 | if (rc) |
| 2484 | goto out; |
Juliet Kim | a5681e2 | 2018-11-19 15:59:22 -0600 | [diff] [blame] | 2485 | |
| 2486 | rc = init_resources(adapter); |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2487 | if (rc) |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2488 | goto out; |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2489 | |
| 2490 | ibmvnic_disable_irqs(adapter); |
| 2491 | adapter->state = VNIC_CLOSED; |
| 2492 | |
| 2493 | if (reset_state == VNIC_CLOSED) |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2494 | goto out; |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2495 | |
| 2496 | rc = __ibmvnic_open(netdev); |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2497 | if (rc) { |
| 2498 | rc = IBMVNIC_OPEN_FAILED; |
| 2499 | goto out; |
| 2500 | } |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2501 | |
Lijun Pan | 6be4666 | 2020-12-14 15:19:29 -0600 | [diff] [blame] | 2502 | __netdev_notify_peers(netdev); |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2503 | out: |
| 2504 | /* restore adapter state if reset failed */ |
| 2505 | if (rc) |
| 2506 | adapter->state = reset_state; |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 2507 | netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", |
| 2508 | adapter_state_to_string(adapter->state), |
| 2509 | adapter->failover_pending, rc); |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2510 | return rc; |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2511 | } |
| 2512 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2513 | static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) |
| 2514 | { |
| 2515 | struct ibmvnic_rwi *rwi; |
Thomas Falcon | 6c5c748 | 2018-12-10 15:22:22 -0600 | [diff] [blame] | 2516 | unsigned long flags; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2517 | |
Thomas Falcon | 6c5c748 | 2018-12-10 15:22:22 -0600 | [diff] [blame] | 2518 | spin_lock_irqsave(&adapter->rwi_lock, flags); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2519 | |
| 2520 | if (!list_empty(&adapter->rwi_list)) { |
| 2521 | rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, |
| 2522 | list); |
| 2523 | list_del(&rwi->list); |
| 2524 | } else { |
| 2525 | rwi = NULL; |
| 2526 | } |
| 2527 | |
Thomas Falcon | 6c5c748 | 2018-12-10 15:22:22 -0600 | [diff] [blame] | 2528 | spin_unlock_irqrestore(&adapter->rwi_lock, flags); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2529 | return rwi; |
| 2530 | } |
| 2531 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 2532 | /** |
| 2533 | * do_passive_init - complete probing when partner device is detected. |
| 2534 | * @adapter: ibmvnic_adapter struct |
| 2535 | * |
| 2536 | * If the ibmvnic device does not have a partner device to communicate with at boot |
| 2537 | * and that partner device comes online at a later time, this function is called |
| 2538 | * to complete the initialization process of ibmvnic device. |
| 2539 | * Caller is expected to hold rtnl_lock(). |
| 2540 | * |
| 2541 | * Returns non-zero if sub-CRQs are not initialized properly leaving the device |
| 2542 | * in the down state. |
| 2543 | * Returns 0 upon success and the device is in PROBED state. |
| 2544 | */ |
| 2545 | |
| 2546 | static int do_passive_init(struct ibmvnic_adapter *adapter) |
| 2547 | { |
| 2548 | unsigned long timeout = msecs_to_jiffies(30000); |
| 2549 | struct net_device *netdev = adapter->netdev; |
| 2550 | struct device *dev = &adapter->vdev->dev; |
| 2551 | int rc; |
| 2552 | |
| 2553 | netdev_dbg(netdev, "Partner device found, probing.\n"); |
| 2554 | |
| 2555 | adapter->state = VNIC_PROBING; |
| 2556 | reinit_completion(&adapter->init_done); |
| 2557 | adapter->init_done_rc = 0; |
| 2558 | adapter->crq.active = true; |
| 2559 | |
| 2560 | rc = send_crq_init_complete(adapter); |
| 2561 | if (rc) |
| 2562 | goto out; |
| 2563 | |
| 2564 | rc = send_version_xchg(adapter); |
| 2565 | if (rc) |
| 2566 | netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); |
| 2567 | |
| 2568 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
| 2569 | dev_err(dev, "Initialization sequence timed out\n"); |
| 2570 | rc = -ETIMEDOUT; |
| 2571 | goto out; |
| 2572 | } |
| 2573 | |
| 2574 | rc = init_sub_crqs(adapter); |
| 2575 | if (rc) { |
| 2576 | dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc); |
| 2577 | goto out; |
| 2578 | } |
| 2579 | |
| 2580 | rc = init_sub_crq_irqs(adapter); |
| 2581 | if (rc) { |
| 2582 | dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc); |
| 2583 | goto init_failed; |
| 2584 | } |
| 2585 | |
| 2586 | netdev->mtu = adapter->req_mtu - ETH_HLEN; |
| 2587 | netdev->min_mtu = adapter->min_mtu - ETH_HLEN; |
| 2588 | netdev->max_mtu = adapter->max_mtu - ETH_HLEN; |
| 2589 | |
| 2590 | adapter->state = VNIC_PROBED; |
| 2591 | netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n"); |
| 2592 | |
| 2593 | return 0; |
| 2594 | |
| 2595 | init_failed: |
| 2596 | release_sub_crqs(adapter, 1); |
| 2597 | out: |
| 2598 | adapter->state = VNIC_DOWN; |
| 2599 | return rc; |
| 2600 | } |
| 2601 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2602 | static void __ibmvnic_reset(struct work_struct *work) |
| 2603 | { |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2604 | struct ibmvnic_adapter *adapter; |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 2605 | bool saved_state = false; |
Sukadev Bhattiprolu | 4f408e1 | 2021-06-30 14:36:17 -0400 | [diff] [blame] | 2606 | struct ibmvnic_rwi *tmprwi; |
| 2607 | struct ibmvnic_rwi *rwi; |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 2608 | unsigned long flags; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2609 | u32 reset_state; |
Sukadev Bhattiprolu | db9f0e8 | 2022-01-21 18:59:18 -0800 | [diff] [blame] | 2610 | int num_fails = 0; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2611 | int rc = 0; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2612 | |
| 2613 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2614 | |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2615 | if (test_and_set_bit_lock(0, &adapter->resetting)) { |
Lijun Pan | 870e04a | 2021-04-13 14:33:39 -0500 | [diff] [blame] | 2616 | queue_delayed_work(system_long_wq, |
| 2617 | &adapter->ibmvnic_delayed_reset, |
| 2618 | IBMVNIC_RESET_DELAY); |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2619 | return; |
| 2620 | } |
| 2621 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2622 | rwi = get_next_rwi(adapter); |
| 2623 | while (rwi) { |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 2624 | spin_lock_irqsave(&adapter->state_lock, flags); |
| 2625 | |
Thomas Falcon | 36f1031 | 2019-08-27 11:10:04 -0500 | [diff] [blame] | 2626 | if (adapter->state == VNIC_REMOVING || |
Michal Suchanek | c8dc559 | 2019-09-09 22:44:51 +0200 | [diff] [blame] | 2627 | adapter->state == VNIC_REMOVED) { |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 2628 | spin_unlock_irqrestore(&adapter->state_lock, flags); |
Juliet Kim | 1c2977c | 2019-09-05 17:30:01 -0400 | [diff] [blame] | 2629 | kfree(rwi); |
| 2630 | rc = EBUSY; |
| 2631 | break; |
| 2632 | } |
Thomas Falcon | 36f1031 | 2019-08-27 11:10:04 -0500 | [diff] [blame] | 2633 | |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 2634 | if (!saved_state) { |
| 2635 | reset_state = adapter->state; |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 2636 | saved_state = true; |
| 2637 | } |
| 2638 | spin_unlock_irqrestore(&adapter->state_lock, flags); |
| 2639 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 2640 | if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { |
| 2641 | rtnl_lock(); |
| 2642 | rc = do_passive_init(adapter); |
| 2643 | rtnl_unlock(); |
| 2644 | if (!rc) |
| 2645 | netif_carrier_on(adapter->netdev); |
| 2646 | } else if (adapter->force_reset_recovery) { |
Lijun Pan | bab08be | 2021-02-11 00:43:19 -0600 | [diff] [blame] | 2647 | /* Since we are doing a hard reset now, clear the |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 2648 | * failover_pending flag so we don't ignore any |
| 2649 | * future MOBILITY or other resets. |
| 2650 | */ |
| 2651 | adapter->failover_pending = false; |
| 2652 | |
Juliet Kim | b27507b | 2019-09-20 16:11:22 -0400 | [diff] [blame] | 2653 | /* Transport event occurred during previous reset */ |
| 2654 | if (adapter->wait_for_reset) { |
| 2655 | /* Previous was CHANGE_PARAM; caller locked */ |
| 2656 | adapter->force_reset_recovery = false; |
| 2657 | rc = do_hard_reset(adapter, rwi, reset_state); |
| 2658 | } else { |
| 2659 | rtnl_lock(); |
| 2660 | adapter->force_reset_recovery = false; |
| 2661 | rc = do_hard_reset(adapter, rwi, reset_state); |
| 2662 | rtnl_unlock(); |
| 2663 | } |
Sukadev Bhattiprolu | db9f0e8 | 2022-01-21 18:59:18 -0800 | [diff] [blame] | 2664 | if (rc) |
| 2665 | num_fails++; |
| 2666 | else |
| 2667 | num_fails = 0; |
| 2668 | |
| 2669 | /* If auto-priority-failover is enabled we can get |
| 2670 | * back to back failovers during resets, resulting |
| 2671 | * in at least two failed resets (from high-priority |
| 2672 | * backing device to low-priority one and then back) |
| 2673 | * If resets continue to fail beyond that, give the |
| 2674 | * adapter some time to settle down before retrying. |
| 2675 | */ |
| 2676 | if (num_fails >= 3) { |
Sukadev Bhattiprolu | f15fde9 | 2020-11-25 18:04:28 -0600 | [diff] [blame] | 2677 | netdev_dbg(adapter->netdev, |
Sukadev Bhattiprolu | db9f0e8 | 2022-01-21 18:59:18 -0800 | [diff] [blame] | 2678 | "[S:%s] Hard reset failed %d times, waiting 60 secs\n", |
| 2679 | adapter_state_to_string(adapter->state), |
| 2680 | num_fails); |
Sukadev Bhattiprolu | f15fde9 | 2020-11-25 18:04:28 -0600 | [diff] [blame] | 2681 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 2682 | schedule_timeout(60 * HZ); |
| 2683 | } |
Lijun Pan | 1f45dc2 | 2020-12-23 14:49:04 -0600 | [diff] [blame] | 2684 | } else { |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2685 | rc = do_reset(adapter, rwi, reset_state); |
| 2686 | } |
Sukadev Bhattiprolu | 4f408e1 | 2021-06-30 14:36:17 -0400 | [diff] [blame] | 2687 | tmprwi = rwi; |
Dany Madden | a86d5c6 | 2020-11-25 18:04:31 -0600 | [diff] [blame] | 2688 | adapter->last_reset_time = jiffies; |
Dany Madden | 0cb4bc6 | 2020-11-25 18:04:27 -0600 | [diff] [blame] | 2689 | |
Dany Madden | 18f141b | 2020-11-25 18:04:25 -0600 | [diff] [blame] | 2690 | if (rc) |
| 2691 | netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2692 | |
| 2693 | rwi = get_next_rwi(adapter); |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2694 | |
Sukadev Bhattiprolu | 4f408e1 | 2021-06-30 14:36:17 -0400 | [diff] [blame] | 2695 | /* |
| 2696 | * If there is another reset queued, free the previous rwi |
| 2697 | * and process the new reset even if previous reset failed |
| 2698 | * (the previous reset could have failed because of a fail |
| 2699 | * over for instance, so process the fail over). |
| 2700 | * |
| 2701 | * If there are no resets queued and the previous reset failed, |
| 2702 | * the adapter would be in an undefined state. So retry the |
| 2703 | * previous reset as a hard reset. |
| 2704 | */ |
| 2705 | if (rwi) |
| 2706 | kfree(tmprwi); |
| 2707 | else if (rc) |
| 2708 | rwi = tmprwi; |
| 2709 | |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2710 | if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || |
Sukadev Bhattiprolu | 4f408e1 | 2021-06-30 14:36:17 -0400 | [diff] [blame] | 2711 | rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2712 | adapter->force_reset_recovery = true; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2713 | } |
| 2714 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2715 | if (adapter->wait_for_reset) { |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2716 | adapter->reset_done_rc = rc; |
| 2717 | complete(&adapter->reset_done); |
| 2718 | } |
| 2719 | |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2720 | clear_bit_unlock(0, &adapter->resetting); |
Sukadev Bhattiprolu | 38bd5ce | 2020-12-04 18:22:35 -0800 | [diff] [blame] | 2721 | |
| 2722 | netdev_dbg(adapter->netdev, |
Lijun Pan | 0666ef7 | 2021-04-12 02:41:28 -0500 | [diff] [blame] | 2723 | "[S:%s FRR:%d WFR:%d] Done processing resets\n", |
| 2724 | adapter_state_to_string(adapter->state), |
| 2725 | adapter->force_reset_recovery, |
Sukadev Bhattiprolu | 38bd5ce | 2020-12-04 18:22:35 -0800 | [diff] [blame] | 2726 | adapter->wait_for_reset); |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2727 | } |
| 2728 | |
| 2729 | static void __ibmvnic_delayed_reset(struct work_struct *work) |
| 2730 | { |
| 2731 | struct ibmvnic_adapter *adapter; |
| 2732 | |
| 2733 | adapter = container_of(work, struct ibmvnic_adapter, |
| 2734 | ibmvnic_delayed_reset.work); |
| 2735 | __ibmvnic_reset(&adapter->ibmvnic_reset); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2736 | } |
| 2737 | |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2738 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, |
| 2739 | enum ibmvnic_reset_reason reason) |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2740 | { |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2741 | struct list_head *entry, *tmp_entry; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2742 | struct ibmvnic_rwi *rwi, *tmp; |
| 2743 | struct net_device *netdev = adapter->netdev; |
Thomas Falcon | 6c5c748 | 2018-12-10 15:22:22 -0600 | [diff] [blame] | 2744 | unsigned long flags; |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2745 | int ret; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2746 | |
Jakub Kicinski | b646acd5 | 2021-02-16 22:58:44 -0800 | [diff] [blame] | 2747 | spin_lock_irqsave(&adapter->rwi_lock, flags); |
| 2748 | |
| 2749 | /* If failover is pending don't schedule any other reset. |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 2750 | * Instead let the failover complete. If there is already a |
| 2751 | * a failover reset scheduled, we will detect and drop the |
| 2752 | * duplicate reset when walking the ->rwi_list below. |
| 2753 | */ |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2754 | if (adapter->state == VNIC_REMOVING || |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 2755 | adapter->state == VNIC_REMOVED || |
Sukadev Bhattiprolu | 1d85049 | 2020-10-30 10:07:11 -0700 | [diff] [blame] | 2756 | (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2757 | ret = EBUSY; |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 2758 | netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2759 | goto err; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2760 | } |
| 2761 | |
Nathan Fontenot | 6a2fb0e | 2017-06-15 14:48:09 -0400 | [diff] [blame] | 2762 | if (adapter->state == VNIC_PROBING) { |
| 2763 | netdev_warn(netdev, "Adapter reset during probe\n"); |
Sukadev Bhattiprolu | 6b278c0 | 2021-10-29 15:03:16 -0700 | [diff] [blame] | 2764 | adapter->init_done_rc = -EAGAIN; |
Lijun Pan | 91dc5d2 | 2021-02-11 00:43:22 -0600 | [diff] [blame] | 2765 | ret = EAGAIN; |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2766 | goto err; |
Nathan Fontenot | 6a2fb0e | 2017-06-15 14:48:09 -0400 | [diff] [blame] | 2767 | } |
| 2768 | |
Wang Hai | 3e98ae0 | 2021-06-10 20:54:17 +0800 | [diff] [blame] | 2769 | list_for_each_entry(tmp, &adapter->rwi_list, list) { |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2770 | if (tmp->reset_reason == reason) { |
Lijun Pan | caee7bf | 2021-04-12 02:41:27 -0500 | [diff] [blame] | 2771 | netdev_dbg(netdev, "Skipping matching reset, reason=%s\n", |
| 2772 | reset_reason_to_string(reason)); |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2773 | ret = EBUSY; |
| 2774 | goto err; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2775 | } |
| 2776 | } |
| 2777 | |
Thomas Falcon | 1d1bbc3 | 2018-12-10 15:22:23 -0600 | [diff] [blame] | 2778 | rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2779 | if (!rwi) { |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2780 | ret = ENOMEM; |
| 2781 | goto err; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2782 | } |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 2783 | /* if we just received a transport event, |
| 2784 | * flush reset queue and process this reset |
| 2785 | */ |
| 2786 | if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { |
| 2787 | list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) |
| 2788 | list_del(entry); |
| 2789 | } |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2790 | rwi->reset_reason = reason; |
| 2791 | list_add_tail(&rwi->list, &adapter->rwi_list); |
Lijun Pan | caee7bf | 2021-04-12 02:41:27 -0500 | [diff] [blame] | 2792 | netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", |
| 2793 | reset_reason_to_string(reason)); |
Lijun Pan | 870e04a | 2021-04-13 14:33:39 -0500 | [diff] [blame] | 2794 | queue_work(system_long_wq, &adapter->ibmvnic_reset); |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2795 | |
Sukadev Bhattiprolu | 4a41c42 | 2021-02-12 20:42:50 -0800 | [diff] [blame] | 2796 | ret = 0; |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2797 | err: |
Sukadev Bhattiprolu | 4a41c42 | 2021-02-12 20:42:50 -0800 | [diff] [blame] | 2798 | /* ibmvnic_close() below can block, so drop the lock first */ |
| 2799 | spin_unlock_irqrestore(&adapter->rwi_lock, flags); |
| 2800 | |
| 2801 | if (ret == ENOMEM) |
| 2802 | ibmvnic_close(netdev); |
| 2803 | |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2804 | return -ret; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2805 | } |
| 2806 | |
Michael S. Tsirkin | 0290bd2 | 2019-12-10 09:23:51 -0500 | [diff] [blame] | 2807 | static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2808 | { |
| 2809 | struct ibmvnic_adapter *adapter = netdev_priv(dev); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2810 | |
Lijun Pan | 855a631 | 2020-11-20 16:40:13 -0600 | [diff] [blame] | 2811 | if (test_bit(0, &adapter->resetting)) { |
| 2812 | netdev_err(adapter->netdev, |
| 2813 | "Adapter is resetting, skip timeout reset\n"); |
| 2814 | return; |
| 2815 | } |
Dany Madden | a86d5c6 | 2020-11-25 18:04:31 -0600 | [diff] [blame] | 2816 | /* No queuing up reset until at least 5 seconds (default watchdog val) |
| 2817 | * after last reset |
| 2818 | */ |
| 2819 | if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { |
| 2820 | netdev_dbg(dev, "Not yet time to tx timeout.\n"); |
| 2821 | return; |
| 2822 | } |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 2823 | ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2824 | } |
| 2825 | |
| 2826 | static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, |
| 2827 | struct ibmvnic_rx_buff *rx_buff) |
| 2828 | { |
| 2829 | struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; |
| 2830 | |
| 2831 | rx_buff->skb = NULL; |
| 2832 | |
| 2833 | pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); |
| 2834 | pool->next_alloc = (pool->next_alloc + 1) % pool->size; |
| 2835 | |
| 2836 | atomic_dec(&pool->available); |
| 2837 | } |
| 2838 | |
| 2839 | static int ibmvnic_poll(struct napi_struct *napi, int budget) |
| 2840 | { |
Dwip N. Banerjee | ec20f36 | 2020-11-18 19:12:23 -0600 | [diff] [blame] | 2841 | struct ibmvnic_sub_crq_queue *rx_scrq; |
| 2842 | struct ibmvnic_adapter *adapter; |
| 2843 | struct net_device *netdev; |
| 2844 | int frames_processed; |
| 2845 | int scrq_num; |
| 2846 | |
| 2847 | netdev = napi->dev; |
| 2848 | adapter = netdev_priv(netdev); |
| 2849 | scrq_num = (int)(napi - adapter->napi); |
| 2850 | frames_processed = 0; |
| 2851 | rx_scrq = adapter->rx_scrq[scrq_num]; |
Nathan Fontenot | 152ce47 | 2017-05-26 10:30:54 -0400 | [diff] [blame] | 2852 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2853 | restart_poll: |
| 2854 | while (frames_processed < budget) { |
| 2855 | struct sk_buff *skb; |
| 2856 | struct ibmvnic_rx_buff *rx_buff; |
| 2857 | union sub_crq *next; |
| 2858 | u32 length; |
| 2859 | u16 offset; |
| 2860 | u8 flags = 0; |
| 2861 | |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 2862 | if (unlikely(test_bit(0, &adapter->resetting) && |
John Allen | 3468656 | 2018-02-06 16:21:49 -0600 | [diff] [blame] | 2863 | adapter->reset_reason != VNIC_RESET_NON_FATAL)) { |
Dwip N. Banerjee | ec20f36 | 2020-11-18 19:12:23 -0600 | [diff] [blame] | 2864 | enable_scrq_irq(adapter, rx_scrq); |
Thomas Falcon | 21ecba6 | 2017-06-14 23:50:09 -0500 | [diff] [blame] | 2865 | napi_complete_done(napi, frames_processed); |
| 2866 | return frames_processed; |
| 2867 | } |
| 2868 | |
Dwip N. Banerjee | ec20f36 | 2020-11-18 19:12:23 -0600 | [diff] [blame] | 2869 | if (!pending_scrq(adapter, rx_scrq)) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2870 | break; |
Dwip N. Banerjee | ec20f36 | 2020-11-18 19:12:23 -0600 | [diff] [blame] | 2871 | next = ibmvnic_next_scrq(adapter, rx_scrq); |
Lijun Pan | 914789a | 2021-02-11 00:43:21 -0600 | [diff] [blame] | 2872 | rx_buff = (struct ibmvnic_rx_buff *) |
| 2873 | be64_to_cpu(next->rx_comp.correlator); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2874 | /* do error checking */ |
| 2875 | if (next->rx_comp.rc) { |
John Allen | e1cea2e | 2017-08-07 15:42:30 -0500 | [diff] [blame] | 2876 | netdev_dbg(netdev, "rx buffer returned with rc %x\n", |
| 2877 | be16_to_cpu(next->rx_comp.rc)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2878 | /* free the entry */ |
| 2879 | next->rx_comp.first = 0; |
Thomas Falcon | 4b9b0f0 | 2018-02-13 18:23:42 -0600 | [diff] [blame] | 2880 | dev_kfree_skb_any(rx_buff->skb); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2881 | remove_buff_from_pool(adapter, rx_buff); |
Nathan Fontenot | ca05e31 | 2017-05-03 14:05:14 -0400 | [diff] [blame] | 2882 | continue; |
Thomas Falcon | abe27a8 | 2018-02-19 20:12:57 -0600 | [diff] [blame] | 2883 | } else if (!rx_buff->skb) { |
| 2884 | /* free the entry */ |
| 2885 | next->rx_comp.first = 0; |
| 2886 | remove_buff_from_pool(adapter, rx_buff); |
| 2887 | continue; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2888 | } |
| 2889 | |
| 2890 | length = be32_to_cpu(next->rx_comp.len); |
| 2891 | offset = be16_to_cpu(next->rx_comp.off_frame_data); |
| 2892 | flags = next->rx_comp.flags; |
| 2893 | skb = rx_buff->skb; |
Lijun Pan | 42557da | 2021-02-12 20:48:40 -0600 | [diff] [blame] | 2894 | /* load long_term_buff before copying to skb */ |
| 2895 | dma_rmb(); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2896 | skb_copy_to_linear_data(skb, rx_buff->data + offset, |
| 2897 | length); |
Murilo Fossa Vicentini | 6052d5e | 2017-04-21 15:38:46 -0400 | [diff] [blame] | 2898 | |
| 2899 | /* VLAN Header has been stripped by the system firmware and |
| 2900 | * needs to be inserted by the driver |
| 2901 | */ |
| 2902 | if (adapter->rx_vlan_header_insertion && |
| 2903 | (flags & IBMVNIC_VLAN_STRIPPED)) |
| 2904 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
| 2905 | ntohs(next->rx_comp.vlan_tci)); |
| 2906 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2907 | /* free the entry */ |
| 2908 | next->rx_comp.first = 0; |
| 2909 | remove_buff_from_pool(adapter, rx_buff); |
| 2910 | |
| 2911 | skb_put(skb, length); |
| 2912 | skb->protocol = eth_type_trans(skb, netdev); |
Thomas Falcon | 94ca305 | 2017-05-03 14:05:20 -0400 | [diff] [blame] | 2913 | skb_record_rx_queue(skb, scrq_num); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2914 | |
| 2915 | if (flags & IBMVNIC_IP_CHKSUM_GOOD && |
| 2916 | flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { |
| 2917 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 2918 | } |
| 2919 | |
| 2920 | length = skb->len; |
| 2921 | napi_gro_receive(napi, skb); /* send it up */ |
| 2922 | netdev->stats.rx_packets++; |
| 2923 | netdev->stats.rx_bytes += length; |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 2924 | adapter->rx_stats_buffers[scrq_num].packets++; |
| 2925 | adapter->rx_stats_buffers[scrq_num].bytes += length; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2926 | frames_processed++; |
| 2927 | } |
Nathan Fontenot | 152ce47 | 2017-05-26 10:30:54 -0400 | [diff] [blame] | 2928 | |
Dwip N. Banerjee | 41ed0a0 | 2020-11-18 19:12:25 -0600 | [diff] [blame] | 2929 | if (adapter->state != VNIC_CLOSING && |
| 2930 | ((atomic_read(&adapter->rx_pool[scrq_num].available) < |
| 2931 | adapter->req_rx_add_entries_per_subcrq / 2) || |
| 2932 | frames_processed < budget)) |
Nathan Fontenot | 152ce47 | 2017-05-26 10:30:54 -0400 | [diff] [blame] | 2933 | replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2934 | if (frames_processed < budget) { |
Dwip N. Banerjee | ec20f36 | 2020-11-18 19:12:23 -0600 | [diff] [blame] | 2935 | if (napi_complete_done(napi, frames_processed)) { |
| 2936 | enable_scrq_irq(adapter, rx_scrq); |
| 2937 | if (pending_scrq(adapter, rx_scrq)) { |
Dwip N. Banerjee | ec20f36 | 2020-11-18 19:12:23 -0600 | [diff] [blame] | 2938 | if (napi_reschedule(napi)) { |
| 2939 | disable_scrq_irq(adapter, rx_scrq); |
| 2940 | goto restart_poll; |
| 2941 | } |
| 2942 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 2943 | } |
| 2944 | } |
| 2945 | return frames_processed; |
| 2946 | } |
| 2947 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2948 | static int wait_for_reset(struct ibmvnic_adapter *adapter) |
| 2949 | { |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2950 | int rc, ret; |
| 2951 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2952 | adapter->fallback.mtu = adapter->req_mtu; |
| 2953 | adapter->fallback.rx_queues = adapter->req_rx_queues; |
| 2954 | adapter->fallback.tx_queues = adapter->req_tx_queues; |
| 2955 | adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; |
| 2956 | adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; |
| 2957 | |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 2958 | reinit_completion(&adapter->reset_done); |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2959 | adapter->wait_for_reset = true; |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2960 | rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 2961 | |
| 2962 | if (rc) { |
| 2963 | ret = rc; |
| 2964 | goto out; |
| 2965 | } |
| 2966 | rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); |
| 2967 | if (rc) { |
| 2968 | ret = -ENODEV; |
| 2969 | goto out; |
| 2970 | } |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2971 | |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2972 | ret = 0; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2973 | if (adapter->reset_done_rc) { |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2974 | ret = -EIO; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2975 | adapter->desired.mtu = adapter->fallback.mtu; |
| 2976 | adapter->desired.rx_queues = adapter->fallback.rx_queues; |
| 2977 | adapter->desired.tx_queues = adapter->fallback.tx_queues; |
| 2978 | adapter->desired.rx_entries = adapter->fallback.rx_entries; |
| 2979 | adapter->desired.tx_entries = adapter->fallback.tx_entries; |
| 2980 | |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 2981 | reinit_completion(&adapter->reset_done); |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2982 | adapter->wait_for_reset = true; |
| 2983 | rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 2984 | if (rc) { |
| 2985 | ret = rc; |
| 2986 | goto out; |
| 2987 | } |
| 2988 | rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, |
| 2989 | 60000); |
| 2990 | if (rc) { |
| 2991 | ret = -ENODEV; |
| 2992 | goto out; |
| 2993 | } |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2994 | } |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 2995 | out: |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2996 | adapter->wait_for_reset = false; |
| 2997 | |
Thomas Falcon | af894d2 | 2018-04-06 18:37:04 -0500 | [diff] [blame] | 2998 | return ret; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 2999 | } |
| 3000 | |
John Allen | 3a807b7 | 2017-06-06 16:55:52 -0500 | [diff] [blame] | 3001 | static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) |
| 3002 | { |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3003 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3004 | |
| 3005 | adapter->desired.mtu = new_mtu + ETH_HLEN; |
| 3006 | |
| 3007 | return wait_for_reset(adapter); |
John Allen | 3a807b7 | 2017-06-06 16:55:52 -0500 | [diff] [blame] | 3008 | } |
| 3009 | |
Thomas Falcon | f10b09e | 2018-03-12 11:51:05 -0500 | [diff] [blame] | 3010 | static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, |
| 3011 | struct net_device *dev, |
| 3012 | netdev_features_t features) |
| 3013 | { |
| 3014 | /* Some backing hardware adapters can not |
| 3015 | * handle packets with a MSS less than 224 |
| 3016 | * or with only one segment. |
| 3017 | */ |
| 3018 | if (skb_is_gso(skb)) { |
| 3019 | if (skb_shinfo(skb)->gso_size < 224 || |
| 3020 | skb_shinfo(skb)->gso_segs == 1) |
| 3021 | features &= ~NETIF_F_GSO_MASK; |
| 3022 | } |
| 3023 | |
| 3024 | return features; |
| 3025 | } |
| 3026 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3027 | static const struct net_device_ops ibmvnic_netdev_ops = { |
| 3028 | .ndo_open = ibmvnic_open, |
| 3029 | .ndo_stop = ibmvnic_close, |
| 3030 | .ndo_start_xmit = ibmvnic_xmit, |
| 3031 | .ndo_set_rx_mode = ibmvnic_set_multi, |
| 3032 | .ndo_set_mac_address = ibmvnic_set_mac, |
| 3033 | .ndo_validate_addr = eth_validate_addr, |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3034 | .ndo_tx_timeout = ibmvnic_tx_timeout, |
John Allen | 3a807b7 | 2017-06-06 16:55:52 -0500 | [diff] [blame] | 3035 | .ndo_change_mtu = ibmvnic_change_mtu, |
Thomas Falcon | f10b09e | 2018-03-12 11:51:05 -0500 | [diff] [blame] | 3036 | .ndo_features_check = ibmvnic_features_check, |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3037 | }; |
| 3038 | |
| 3039 | /* ethtool functions */ |
| 3040 | |
Philippe Reynes | 8a43379 | 2017-01-07 22:37:29 +0100 | [diff] [blame] | 3041 | static int ibmvnic_get_link_ksettings(struct net_device *netdev, |
| 3042 | struct ethtool_link_ksettings *cmd) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3043 | { |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 3044 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3045 | int rc; |
Philippe Reynes | 8a43379 | 2017-01-07 22:37:29 +0100 | [diff] [blame] | 3046 | |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 3047 | rc = send_query_phys_parms(adapter); |
| 3048 | if (rc) { |
| 3049 | adapter->speed = SPEED_UNKNOWN; |
| 3050 | adapter->duplex = DUPLEX_UNKNOWN; |
| 3051 | } |
| 3052 | cmd->base.speed = adapter->speed; |
| 3053 | cmd->base.duplex = adapter->duplex; |
Philippe Reynes | 8a43379 | 2017-01-07 22:37:29 +0100 | [diff] [blame] | 3054 | cmd->base.port = PORT_FIBRE; |
| 3055 | cmd->base.phy_address = 0; |
| 3056 | cmd->base.autoneg = AUTONEG_ENABLE; |
| 3057 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3058 | return 0; |
| 3059 | } |
| 3060 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 3061 | static void ibmvnic_get_drvinfo(struct net_device *netdev, |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3062 | struct ethtool_drvinfo *info) |
| 3063 | { |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 3064 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3065 | |
Lijun Pan | 8a96c80 | 2021-02-11 00:43:25 -0600 | [diff] [blame] | 3066 | strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); |
| 3067 | strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); |
| 3068 | strscpy(info->fw_version, adapter->fw_version, |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 3069 | sizeof(info->fw_version)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3070 | } |
| 3071 | |
| 3072 | static u32 ibmvnic_get_msglevel(struct net_device *netdev) |
| 3073 | { |
| 3074 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3075 | |
| 3076 | return adapter->msg_enable; |
| 3077 | } |
| 3078 | |
| 3079 | static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) |
| 3080 | { |
| 3081 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3082 | |
| 3083 | adapter->msg_enable = data; |
| 3084 | } |
| 3085 | |
| 3086 | static u32 ibmvnic_get_link(struct net_device *netdev) |
| 3087 | { |
| 3088 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3089 | |
| 3090 | /* Don't need to send a query because we request a logical link up at |
| 3091 | * init and then we wait for link state indications |
| 3092 | */ |
| 3093 | return adapter->logical_link_state; |
| 3094 | } |
| 3095 | |
| 3096 | static void ibmvnic_get_ringparam(struct net_device *netdev, |
Hao Chen | 7462494 | 2021-11-18 20:12:43 +0800 | [diff] [blame] | 3097 | struct ethtool_ringparam *ring, |
| 3098 | struct kernel_ethtool_ringparam *kernel_ring, |
| 3099 | struct netlink_ext_ack *extack) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3100 | { |
John Allen | bc131b3 | 2017-08-02 16:46:30 -0500 | [diff] [blame] | 3101 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3102 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3103 | if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { |
| 3104 | ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; |
| 3105 | ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; |
| 3106 | } else { |
| 3107 | ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; |
| 3108 | ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; |
| 3109 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3110 | ring->rx_mini_max_pending = 0; |
| 3111 | ring->rx_jumbo_max_pending = 0; |
John Allen | bc131b3 | 2017-08-02 16:46:30 -0500 | [diff] [blame] | 3112 | ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; |
| 3113 | ring->tx_pending = adapter->req_tx_entries_per_subcrq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3114 | ring->rx_mini_pending = 0; |
| 3115 | ring->rx_jumbo_pending = 0; |
| 3116 | } |
| 3117 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3118 | static int ibmvnic_set_ringparam(struct net_device *netdev, |
Hao Chen | 7462494 | 2021-11-18 20:12:43 +0800 | [diff] [blame] | 3119 | struct ethtool_ringparam *ring, |
| 3120 | struct kernel_ethtool_ringparam *kernel_ring, |
| 3121 | struct netlink_ext_ack *extack) |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3122 | { |
| 3123 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3124 | int ret; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3125 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3126 | ret = 0; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3127 | adapter->desired.rx_entries = ring->rx_pending; |
| 3128 | adapter->desired.tx_entries = ring->tx_pending; |
| 3129 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3130 | ret = wait_for_reset(adapter); |
| 3131 | |
| 3132 | if (!ret && |
| 3133 | (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || |
| 3134 | adapter->req_tx_entries_per_subcrq != ring->tx_pending)) |
| 3135 | netdev_info(netdev, |
| 3136 | "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", |
| 3137 | ring->rx_pending, ring->tx_pending, |
| 3138 | adapter->req_rx_add_entries_per_subcrq, |
| 3139 | adapter->req_tx_entries_per_subcrq); |
| 3140 | return ret; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3141 | } |
| 3142 | |
John Allen | c2dbeb6 | 2017-08-02 16:47:17 -0500 | [diff] [blame] | 3143 | static void ibmvnic_get_channels(struct net_device *netdev, |
| 3144 | struct ethtool_channels *channels) |
| 3145 | { |
| 3146 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3147 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3148 | if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { |
| 3149 | channels->max_rx = adapter->max_rx_queues; |
| 3150 | channels->max_tx = adapter->max_tx_queues; |
| 3151 | } else { |
| 3152 | channels->max_rx = IBMVNIC_MAX_QUEUES; |
| 3153 | channels->max_tx = IBMVNIC_MAX_QUEUES; |
| 3154 | } |
| 3155 | |
John Allen | c2dbeb6 | 2017-08-02 16:47:17 -0500 | [diff] [blame] | 3156 | channels->max_other = 0; |
| 3157 | channels->max_combined = 0; |
| 3158 | channels->rx_count = adapter->req_rx_queues; |
| 3159 | channels->tx_count = adapter->req_tx_queues; |
| 3160 | channels->other_count = 0; |
| 3161 | channels->combined_count = 0; |
| 3162 | } |
| 3163 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3164 | static int ibmvnic_set_channels(struct net_device *netdev, |
| 3165 | struct ethtool_channels *channels) |
| 3166 | { |
| 3167 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3168 | int ret; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3169 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3170 | ret = 0; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3171 | adapter->desired.rx_queues = channels->rx_count; |
| 3172 | adapter->desired.tx_queues = channels->tx_count; |
| 3173 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3174 | ret = wait_for_reset(adapter); |
| 3175 | |
| 3176 | if (!ret && |
| 3177 | (adapter->req_rx_queues != channels->rx_count || |
| 3178 | adapter->req_tx_queues != channels->tx_count)) |
| 3179 | netdev_info(netdev, |
| 3180 | "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", |
| 3181 | channels->rx_count, channels->tx_count, |
| 3182 | adapter->req_rx_queues, adapter->req_tx_queues); |
| 3183 | return ret; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3184 | } |
| 3185 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3186 | static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
| 3187 | { |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3188 | struct ibmvnic_adapter *adapter = netdev_priv(dev); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3189 | int i; |
| 3190 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3191 | switch (stringset) { |
| 3192 | case ETH_SS_STATS: |
| 3193 | for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); |
| 3194 | i++, data += ETH_GSTRING_LEN) |
| 3195 | memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); |
| 3196 | |
| 3197 | for (i = 0; i < adapter->req_tx_queues; i++) { |
| 3198 | snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); |
| 3199 | data += ETH_GSTRING_LEN; |
| 3200 | |
| 3201 | snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); |
| 3202 | data += ETH_GSTRING_LEN; |
| 3203 | |
| 3204 | snprintf(data, ETH_GSTRING_LEN, |
| 3205 | "tx%d_dropped_packets", i); |
| 3206 | data += ETH_GSTRING_LEN; |
| 3207 | } |
| 3208 | |
| 3209 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 3210 | snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); |
| 3211 | data += ETH_GSTRING_LEN; |
| 3212 | |
| 3213 | snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); |
| 3214 | data += ETH_GSTRING_LEN; |
| 3215 | |
| 3216 | snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); |
| 3217 | data += ETH_GSTRING_LEN; |
| 3218 | } |
| 3219 | break; |
| 3220 | |
| 3221 | case ETH_SS_PRIV_FLAGS: |
| 3222 | for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) |
| 3223 | strcpy(data + i * ETH_GSTRING_LEN, |
| 3224 | ibmvnic_priv_flags[i]); |
| 3225 | break; |
| 3226 | default: |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3227 | return; |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3228 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3229 | } |
| 3230 | |
| 3231 | static int ibmvnic_get_sset_count(struct net_device *dev, int sset) |
| 3232 | { |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3233 | struct ibmvnic_adapter *adapter = netdev_priv(dev); |
| 3234 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3235 | switch (sset) { |
| 3236 | case ETH_SS_STATS: |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3237 | return ARRAY_SIZE(ibmvnic_stats) + |
| 3238 | adapter->req_tx_queues * NUM_TX_STATS + |
| 3239 | adapter->req_rx_queues * NUM_RX_STATS; |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3240 | case ETH_SS_PRIV_FLAGS: |
| 3241 | return ARRAY_SIZE(ibmvnic_priv_flags); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3242 | default: |
| 3243 | return -EOPNOTSUPP; |
| 3244 | } |
| 3245 | } |
| 3246 | |
| 3247 | static void ibmvnic_get_ethtool_stats(struct net_device *dev, |
| 3248 | struct ethtool_stats *stats, u64 *data) |
| 3249 | { |
| 3250 | struct ibmvnic_adapter *adapter = netdev_priv(dev); |
| 3251 | union ibmvnic_crq crq; |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3252 | int i, j; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 3253 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3254 | |
| 3255 | memset(&crq, 0, sizeof(crq)); |
| 3256 | crq.request_statistics.first = IBMVNIC_CRQ_CMD; |
| 3257 | crq.request_statistics.cmd = REQUEST_STATISTICS; |
| 3258 | crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); |
| 3259 | crq.request_statistics.len = |
| 3260 | cpu_to_be32(sizeof(struct ibmvnic_statistics)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3261 | |
| 3262 | /* Wait for data to be written */ |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 3263 | reinit_completion(&adapter->stats_done); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 3264 | rc = ibmvnic_send_crq(adapter, &crq); |
| 3265 | if (rc) |
| 3266 | return; |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 3267 | rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); |
| 3268 | if (rc) |
| 3269 | return; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3270 | |
| 3271 | for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) |
Lijun Pan | 91dc5d2 | 2021-02-11 00:43:22 -0600 | [diff] [blame] | 3272 | data[i] = be64_to_cpu(IBMVNIC_GET_STAT |
| 3273 | (adapter, ibmvnic_stats[i].offset)); |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3274 | |
| 3275 | for (j = 0; j < adapter->req_tx_queues; j++) { |
| 3276 | data[i] = adapter->tx_stats_buffers[j].packets; |
| 3277 | i++; |
| 3278 | data[i] = adapter->tx_stats_buffers[j].bytes; |
| 3279 | i++; |
| 3280 | data[i] = adapter->tx_stats_buffers[j].dropped_packets; |
| 3281 | i++; |
| 3282 | } |
| 3283 | |
| 3284 | for (j = 0; j < adapter->req_rx_queues; j++) { |
| 3285 | data[i] = adapter->rx_stats_buffers[j].packets; |
| 3286 | i++; |
| 3287 | data[i] = adapter->rx_stats_buffers[j].bytes; |
| 3288 | i++; |
| 3289 | data[i] = adapter->rx_stats_buffers[j].interrupts; |
| 3290 | i++; |
| 3291 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3292 | } |
| 3293 | |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3294 | static u32 ibmvnic_get_priv_flags(struct net_device *netdev) |
| 3295 | { |
| 3296 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3297 | |
| 3298 | return adapter->priv_flags; |
| 3299 | } |
| 3300 | |
| 3301 | static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) |
| 3302 | { |
| 3303 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3304 | bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); |
| 3305 | |
| 3306 | if (which_maxes) |
| 3307 | adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; |
| 3308 | else |
| 3309 | adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; |
| 3310 | |
| 3311 | return 0; |
| 3312 | } |
Lijun Pan | 91dc5d2 | 2021-02-11 00:43:22 -0600 | [diff] [blame] | 3313 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3314 | static const struct ethtool_ops ibmvnic_ethtool_ops = { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3315 | .get_drvinfo = ibmvnic_get_drvinfo, |
| 3316 | .get_msglevel = ibmvnic_get_msglevel, |
| 3317 | .set_msglevel = ibmvnic_set_msglevel, |
| 3318 | .get_link = ibmvnic_get_link, |
| 3319 | .get_ringparam = ibmvnic_get_ringparam, |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3320 | .set_ringparam = ibmvnic_set_ringparam, |
John Allen | c2dbeb6 | 2017-08-02 16:47:17 -0500 | [diff] [blame] | 3321 | .get_channels = ibmvnic_get_channels, |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3322 | .set_channels = ibmvnic_set_channels, |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3323 | .get_strings = ibmvnic_get_strings, |
| 3324 | .get_sset_count = ibmvnic_get_sset_count, |
| 3325 | .get_ethtool_stats = ibmvnic_get_ethtool_stats, |
Philippe Reynes | 8a43379 | 2017-01-07 22:37:29 +0100 | [diff] [blame] | 3326 | .get_link_ksettings = ibmvnic_get_link_ksettings, |
Thomas Falcon | 723ad91 | 2018-09-28 18:38:26 -0500 | [diff] [blame] | 3327 | .get_priv_flags = ibmvnic_get_priv_flags, |
| 3328 | .set_priv_flags = ibmvnic_set_priv_flags, |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3329 | }; |
| 3330 | |
| 3331 | /* Routines for managing CRQs/sCRQs */ |
| 3332 | |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3333 | static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, |
| 3334 | struct ibmvnic_sub_crq_queue *scrq) |
| 3335 | { |
| 3336 | int rc; |
| 3337 | |
Dany Madden | 9281cf2 | 2020-11-25 18:04:26 -0600 | [diff] [blame] | 3338 | if (!scrq) { |
YANG LI | 862aecb | 2020-12-30 15:23:14 +0800 | [diff] [blame] | 3339 | netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); |
Dany Madden | 9281cf2 | 2020-11-25 18:04:26 -0600 | [diff] [blame] | 3340 | return -EINVAL; |
| 3341 | } |
| 3342 | |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3343 | if (scrq->irq) { |
| 3344 | free_irq(scrq->irq, scrq); |
| 3345 | irq_dispose_mapping(scrq->irq); |
| 3346 | scrq->irq = 0; |
| 3347 | } |
| 3348 | |
Dany Madden | 9281cf2 | 2020-11-25 18:04:26 -0600 | [diff] [blame] | 3349 | if (scrq->msgs) { |
| 3350 | memset(scrq->msgs, 0, 4 * PAGE_SIZE); |
| 3351 | atomic_set(&scrq->used, 0); |
| 3352 | scrq->cur = 0; |
Jakub Kicinski | 55fd59b | 2020-12-03 15:42:13 -0800 | [diff] [blame] | 3353 | scrq->ind_buf.index = 0; |
Dany Madden | 9281cf2 | 2020-11-25 18:04:26 -0600 | [diff] [blame] | 3354 | } else { |
| 3355 | netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); |
| 3356 | return -EINVAL; |
| 3357 | } |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3358 | |
| 3359 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, |
| 3360 | 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); |
| 3361 | return rc; |
| 3362 | } |
| 3363 | |
| 3364 | static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) |
| 3365 | { |
| 3366 | int i, rc; |
| 3367 | |
Lijun Pan | a0faaa2 | 2020-11-23 13:35:45 -0600 | [diff] [blame] | 3368 | if (!adapter->tx_scrq || !adapter->rx_scrq) |
| 3369 | return -EINVAL; |
| 3370 | |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3371 | for (i = 0; i < adapter->req_tx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 3372 | netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3373 | rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); |
| 3374 | if (rc) |
| 3375 | return rc; |
| 3376 | } |
| 3377 | |
| 3378 | for (i = 0; i < adapter->req_rx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 3379 | netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3380 | rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); |
| 3381 | if (rc) |
| 3382 | return rc; |
| 3383 | } |
| 3384 | |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 3385 | return rc; |
| 3386 | } |
| 3387 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3388 | static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3389 | struct ibmvnic_sub_crq_queue *scrq, |
| 3390 | bool do_h_free) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3391 | { |
| 3392 | struct device *dev = &adapter->vdev->dev; |
| 3393 | long rc; |
| 3394 | |
| 3395 | netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); |
| 3396 | |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3397 | if (do_h_free) { |
| 3398 | /* Close the sub-crqs */ |
| 3399 | do { |
| 3400 | rc = plpar_hcall_norets(H_FREE_SUB_CRQ, |
| 3401 | adapter->vdev->unit_address, |
| 3402 | scrq->crq_num); |
| 3403 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3404 | |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3405 | if (rc) { |
| 3406 | netdev_err(adapter->netdev, |
| 3407 | "Failed to release sub-CRQ %16lx, rc = %ld\n", |
| 3408 | scrq->crq_num, rc); |
| 3409 | } |
Thomas Falcon | ffa7385 | 2017-04-19 13:44:29 -0400 | [diff] [blame] | 3410 | } |
| 3411 | |
Thomas Falcon | f019fb6 | 2020-11-18 19:12:17 -0600 | [diff] [blame] | 3412 | dma_free_coherent(dev, |
| 3413 | IBMVNIC_IND_ARR_SZ, |
| 3414 | scrq->ind_buf.indir_arr, |
| 3415 | scrq->ind_buf.indir_dma); |
| 3416 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3417 | dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, |
| 3418 | DMA_BIDIRECTIONAL); |
| 3419 | free_pages((unsigned long)scrq->msgs, 2); |
| 3420 | kfree(scrq); |
| 3421 | } |
| 3422 | |
| 3423 | static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter |
| 3424 | *adapter) |
| 3425 | { |
| 3426 | struct device *dev = &adapter->vdev->dev; |
| 3427 | struct ibmvnic_sub_crq_queue *scrq; |
| 3428 | int rc; |
| 3429 | |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 3430 | scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3431 | if (!scrq) |
| 3432 | return NULL; |
| 3433 | |
Nathan Fontenot | 7f7adc5 | 2017-04-19 13:45:16 -0400 | [diff] [blame] | 3434 | scrq->msgs = |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 3435 | (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3436 | if (!scrq->msgs) { |
| 3437 | dev_warn(dev, "Couldn't allocate crq queue messages page\n"); |
| 3438 | goto zero_page_failed; |
| 3439 | } |
| 3440 | |
| 3441 | scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, |
| 3442 | DMA_BIDIRECTIONAL); |
| 3443 | if (dma_mapping_error(dev, scrq->msg_token)) { |
| 3444 | dev_warn(dev, "Couldn't map crq queue messages page\n"); |
| 3445 | goto map_failed; |
| 3446 | } |
| 3447 | |
| 3448 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, |
| 3449 | 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); |
| 3450 | |
| 3451 | if (rc == H_RESOURCE) |
| 3452 | rc = ibmvnic_reset_crq(adapter); |
| 3453 | |
| 3454 | if (rc == H_CLOSED) { |
| 3455 | dev_warn(dev, "Partner adapter not ready, waiting.\n"); |
| 3456 | } else if (rc) { |
| 3457 | dev_warn(dev, "Error %d registering sub-crq\n", rc); |
| 3458 | goto reg_failed; |
| 3459 | } |
| 3460 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3461 | scrq->adapter = adapter; |
| 3462 | scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); |
Thomas Falcon | f019fb6 | 2020-11-18 19:12:17 -0600 | [diff] [blame] | 3463 | scrq->ind_buf.index = 0; |
| 3464 | |
| 3465 | scrq->ind_buf.indir_arr = |
| 3466 | dma_alloc_coherent(dev, |
| 3467 | IBMVNIC_IND_ARR_SZ, |
| 3468 | &scrq->ind_buf.indir_dma, |
| 3469 | GFP_KERNEL); |
| 3470 | |
| 3471 | if (!scrq->ind_buf.indir_arr) |
| 3472 | goto indir_failed; |
| 3473 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3474 | spin_lock_init(&scrq->lock); |
| 3475 | |
| 3476 | netdev_dbg(adapter->netdev, |
| 3477 | "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", |
| 3478 | scrq->crq_num, scrq->hw_irq, scrq->irq); |
| 3479 | |
| 3480 | return scrq; |
| 3481 | |
Thomas Falcon | f019fb6 | 2020-11-18 19:12:17 -0600 | [diff] [blame] | 3482 | indir_failed: |
| 3483 | do { |
| 3484 | rc = plpar_hcall_norets(H_FREE_SUB_CRQ, |
| 3485 | adapter->vdev->unit_address, |
| 3486 | scrq->crq_num); |
| 3487 | } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3488 | reg_failed: |
| 3489 | dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, |
| 3490 | DMA_BIDIRECTIONAL); |
| 3491 | map_failed: |
| 3492 | free_pages((unsigned long)scrq->msgs, 2); |
| 3493 | zero_page_failed: |
| 3494 | kfree(scrq); |
| 3495 | |
| 3496 | return NULL; |
| 3497 | } |
| 3498 | |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3499 | static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3500 | { |
| 3501 | int i; |
| 3502 | |
| 3503 | if (adapter->tx_scrq) { |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 3504 | for (i = 0; i < adapter->num_active_tx_scrqs; i++) { |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3505 | if (!adapter->tx_scrq[i]) |
| 3506 | continue; |
| 3507 | |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 3508 | netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", |
| 3509 | i); |
Sukadev Bhattiprolu | 65d6470 | 2021-06-23 21:13:12 -0700 | [diff] [blame] | 3510 | ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3511 | if (adapter->tx_scrq[i]->irq) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3512 | free_irq(adapter->tx_scrq[i]->irq, |
| 3513 | adapter->tx_scrq[i]); |
Thomas Falcon | 88eb98a | 2016-07-06 15:35:16 -0500 | [diff] [blame] | 3514 | irq_dispose_mapping(adapter->tx_scrq[i]->irq); |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3515 | adapter->tx_scrq[i]->irq = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3516 | } |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3517 | |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3518 | release_sub_crq_queue(adapter, adapter->tx_scrq[i], |
| 3519 | do_h_free); |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3520 | } |
| 3521 | |
Nathan Fontenot | 9501df3 | 2017-03-15 23:38:07 -0400 | [diff] [blame] | 3522 | kfree(adapter->tx_scrq); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3523 | adapter->tx_scrq = NULL; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 3524 | adapter->num_active_tx_scrqs = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3525 | } |
| 3526 | |
| 3527 | if (adapter->rx_scrq) { |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 3528 | for (i = 0; i < adapter->num_active_rx_scrqs; i++) { |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3529 | if (!adapter->rx_scrq[i]) |
| 3530 | continue; |
| 3531 | |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 3532 | netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", |
| 3533 | i); |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3534 | if (adapter->rx_scrq[i]->irq) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3535 | free_irq(adapter->rx_scrq[i]->irq, |
| 3536 | adapter->rx_scrq[i]); |
Thomas Falcon | 88eb98a | 2016-07-06 15:35:16 -0500 | [diff] [blame] | 3537 | irq_dispose_mapping(adapter->rx_scrq[i]->irq); |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3538 | adapter->rx_scrq[i]->irq = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3539 | } |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3540 | |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3541 | release_sub_crq_queue(adapter, adapter->rx_scrq[i], |
| 3542 | do_h_free); |
Nathan Fontenot | b510888 | 2017-03-30 02:49:18 -0400 | [diff] [blame] | 3543 | } |
| 3544 | |
Nathan Fontenot | 9501df3 | 2017-03-15 23:38:07 -0400 | [diff] [blame] | 3545 | kfree(adapter->rx_scrq); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3546 | adapter->rx_scrq = NULL; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 3547 | adapter->num_active_rx_scrqs = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3548 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3549 | } |
| 3550 | |
| 3551 | static int disable_scrq_irq(struct ibmvnic_adapter *adapter, |
| 3552 | struct ibmvnic_sub_crq_queue *scrq) |
| 3553 | { |
| 3554 | struct device *dev = &adapter->vdev->dev; |
| 3555 | unsigned long rc; |
| 3556 | |
| 3557 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, |
| 3558 | H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); |
| 3559 | if (rc) |
| 3560 | dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", |
| 3561 | scrq->hw_irq, rc); |
| 3562 | return rc; |
| 3563 | } |
| 3564 | |
| 3565 | static int enable_scrq_irq(struct ibmvnic_adapter *adapter, |
| 3566 | struct ibmvnic_sub_crq_queue *scrq) |
| 3567 | { |
| 3568 | struct device *dev = &adapter->vdev->dev; |
| 3569 | unsigned long rc; |
| 3570 | |
| 3571 | if (scrq->hw_irq > 0x100000000ULL) { |
| 3572 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); |
| 3573 | return 1; |
| 3574 | } |
| 3575 | |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 3576 | if (test_bit(0, &adapter->resetting) && |
Nathan Fontenot | 73f9d36 | 2018-05-22 11:21:10 -0500 | [diff] [blame] | 3577 | adapter->reset_reason == VNIC_RESET_MOBILITY) { |
Juliet Kim | 284f87d | 2019-11-20 10:50:03 -0500 | [diff] [blame] | 3578 | u64 val = (0xff000000) | scrq->hw_irq; |
Nathan Fontenot | 73f9d36 | 2018-05-22 11:21:10 -0500 | [diff] [blame] | 3579 | |
Juliet Kim | 284f87d | 2019-11-20 10:50:03 -0500 | [diff] [blame] | 3580 | rc = plpar_hcall_norets(H_EOI, val); |
Juliet Kim | 2df5c60 | 2019-11-20 10:50:04 -0500 | [diff] [blame] | 3581 | /* H_EOI would fail with rc = H_FUNCTION when running |
| 3582 | * in XIVE mode which is expected, but not an error. |
| 3583 | */ |
Sukadev Bhattiprolu | 154b3b2 | 2021-06-23 21:13:16 -0700 | [diff] [blame] | 3584 | if (rc && (rc != H_FUNCTION)) |
Juliet Kim | 284f87d | 2019-11-20 10:50:03 -0500 | [diff] [blame] | 3585 | dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", |
| 3586 | val, rc); |
Nathan Fontenot | 73f9d36 | 2018-05-22 11:21:10 -0500 | [diff] [blame] | 3587 | } |
Thomas Falcon | f23e064 | 2018-04-15 18:53:36 -0500 | [diff] [blame] | 3588 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3589 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, |
| 3590 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); |
| 3591 | if (rc) |
| 3592 | dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", |
| 3593 | scrq->hw_irq, rc); |
| 3594 | return rc; |
| 3595 | } |
| 3596 | |
| 3597 | static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, |
| 3598 | struct ibmvnic_sub_crq_queue *scrq) |
| 3599 | { |
| 3600 | struct device *dev = &adapter->vdev->dev; |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 3601 | struct ibmvnic_tx_pool *tx_pool; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3602 | struct ibmvnic_tx_buff *txbuff; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 3603 | struct netdev_queue *txq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3604 | union sub_crq *next; |
| 3605 | int index; |
Thomas Falcon | c62aa37 | 2020-11-18 19:12:20 -0600 | [diff] [blame] | 3606 | int i; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3607 | |
| 3608 | restart_loop: |
| 3609 | while (pending_scrq(adapter, scrq)) { |
| 3610 | unsigned int pool = scrq->pool_index; |
Thomas Falcon | ffc385b | 2018-02-18 10:08:41 -0600 | [diff] [blame] | 3611 | int num_entries = 0; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 3612 | int total_bytes = 0; |
| 3613 | int num_packets = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3614 | |
| 3615 | next = ibmvnic_next_scrq(adapter, scrq); |
| 3616 | for (i = 0; i < next->tx_comp.num_comps; i++) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3617 | index = be32_to_cpu(next->tx_comp.correlators[i]); |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 3618 | if (index & IBMVNIC_TSO_POOL_MASK) { |
| 3619 | tx_pool = &adapter->tso_pool[pool]; |
| 3620 | index &= ~IBMVNIC_TSO_POOL_MASK; |
| 3621 | } else { |
| 3622 | tx_pool = &adapter->tx_pool[pool]; |
| 3623 | } |
| 3624 | |
| 3625 | txbuff = &tx_pool->tx_buff[index]; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 3626 | num_packets++; |
Thomas Falcon | ffc385b | 2018-02-18 10:08:41 -0600 | [diff] [blame] | 3627 | num_entries += txbuff->num_entries; |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 3628 | if (txbuff->skb) { |
| 3629 | total_bytes += txbuff->skb->len; |
Lijun Pan | ca09bf7 | 2021-04-13 03:33:25 -0500 | [diff] [blame] | 3630 | if (next->tx_comp.rcs[i]) { |
| 3631 | dev_err(dev, "tx error %x\n", |
| 3632 | next->tx_comp.rcs[i]); |
| 3633 | dev_kfree_skb_irq(txbuff->skb); |
| 3634 | } else { |
| 3635 | dev_consume_skb_irq(txbuff->skb); |
| 3636 | } |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 3637 | txbuff->skb = NULL; |
| 3638 | } else { |
| 3639 | netdev_warn(adapter->netdev, |
| 3640 | "TX completion received with NULL socket buffer\n"); |
| 3641 | } |
Thomas Falcon | 06b3e35 | 2018-03-16 20:00:28 -0500 | [diff] [blame] | 3642 | tx_pool->free_map[tx_pool->producer_index] = index; |
| 3643 | tx_pool->producer_index = |
| 3644 | (tx_pool->producer_index + 1) % |
| 3645 | tx_pool->num_buffers; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3646 | } |
| 3647 | /* remove tx_comp scrq*/ |
| 3648 | next->tx_comp.first = 0; |
Nathan Fontenot | 7c3e7de | 2017-05-03 14:05:25 -0400 | [diff] [blame] | 3649 | |
Thomas Falcon | 0d97338 | 2020-11-18 19:12:19 -0600 | [diff] [blame] | 3650 | txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); |
| 3651 | netdev_tx_completed_queue(txq, num_packets, total_bytes); |
| 3652 | |
Thomas Falcon | ffc385b | 2018-02-18 10:08:41 -0600 | [diff] [blame] | 3653 | if (atomic_sub_return(num_entries, &scrq->used) <= |
Nathan Fontenot | 7c3e7de | 2017-05-03 14:05:25 -0400 | [diff] [blame] | 3654 | (adapter->req_tx_entries_per_subcrq / 2) && |
| 3655 | __netif_subqueue_stopped(adapter->netdev, |
| 3656 | scrq->pool_index)) { |
| 3657 | netif_wake_subqueue(adapter->netdev, scrq->pool_index); |
Thomas Falcon | 0aecb13 | 2018-02-26 18:10:58 -0600 | [diff] [blame] | 3658 | netdev_dbg(adapter->netdev, "Started queue %d\n", |
| 3659 | scrq->pool_index); |
Nathan Fontenot | 7c3e7de | 2017-05-03 14:05:25 -0400 | [diff] [blame] | 3660 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3661 | } |
| 3662 | |
| 3663 | enable_scrq_irq(adapter, scrq); |
| 3664 | |
| 3665 | if (pending_scrq(adapter, scrq)) { |
| 3666 | disable_scrq_irq(adapter, scrq); |
| 3667 | goto restart_loop; |
| 3668 | } |
| 3669 | |
| 3670 | return 0; |
| 3671 | } |
| 3672 | |
| 3673 | static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) |
| 3674 | { |
| 3675 | struct ibmvnic_sub_crq_queue *scrq = instance; |
| 3676 | struct ibmvnic_adapter *adapter = scrq->adapter; |
| 3677 | |
| 3678 | disable_scrq_irq(adapter, scrq); |
| 3679 | ibmvnic_complete_tx(adapter, scrq); |
| 3680 | |
| 3681 | return IRQ_HANDLED; |
| 3682 | } |
| 3683 | |
| 3684 | static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) |
| 3685 | { |
| 3686 | struct ibmvnic_sub_crq_queue *scrq = instance; |
| 3687 | struct ibmvnic_adapter *adapter = scrq->adapter; |
| 3688 | |
Nathan Fontenot | 09fb35e | 2018-01-10 10:40:09 -0600 | [diff] [blame] | 3689 | /* When booting a kdump kernel we can hit pending interrupts |
| 3690 | * prior to completing driver initialization. |
| 3691 | */ |
| 3692 | if (unlikely(adapter->state != VNIC_OPEN)) |
| 3693 | return IRQ_NONE; |
| 3694 | |
John Allen | 3d52b59 | 2017-08-02 16:44:14 -0500 | [diff] [blame] | 3695 | adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; |
| 3696 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3697 | if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { |
| 3698 | disable_scrq_irq(adapter, scrq); |
| 3699 | __napi_schedule(&adapter->napi[scrq->scrq_num]); |
| 3700 | } |
| 3701 | |
| 3702 | return IRQ_HANDLED; |
| 3703 | } |
| 3704 | |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3705 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) |
| 3706 | { |
| 3707 | struct device *dev = &adapter->vdev->dev; |
| 3708 | struct ibmvnic_sub_crq_queue *scrq; |
| 3709 | int i = 0, j = 0; |
| 3710 | int rc = 0; |
| 3711 | |
| 3712 | for (i = 0; i < adapter->req_tx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 3713 | netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", |
| 3714 | i); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3715 | scrq = adapter->tx_scrq[i]; |
| 3716 | scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); |
| 3717 | |
Michael Ellerman | 99c1790 | 2016-09-10 19:59:05 +1000 | [diff] [blame] | 3718 | if (!scrq->irq) { |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3719 | rc = -EINVAL; |
| 3720 | dev_err(dev, "Error mapping irq\n"); |
| 3721 | goto req_tx_irq_failed; |
| 3722 | } |
| 3723 | |
Murilo Fossa Vicentini | e56e251 | 2019-04-25 11:02:33 -0300 | [diff] [blame] | 3724 | snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", |
| 3725 | adapter->vdev->unit_address, i); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3726 | rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, |
Murilo Fossa Vicentini | e56e251 | 2019-04-25 11:02:33 -0300 | [diff] [blame] | 3727 | 0, scrq->name, scrq); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3728 | |
| 3729 | if (rc) { |
| 3730 | dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", |
| 3731 | scrq->irq, rc); |
| 3732 | irq_dispose_mapping(scrq->irq); |
Nathan Fontenot | af9090c | 2018-02-20 11:04:18 -0600 | [diff] [blame] | 3733 | goto req_tx_irq_failed; |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3734 | } |
| 3735 | } |
| 3736 | |
| 3737 | for (i = 0; i < adapter->req_rx_queues; i++) { |
Nathan Fontenot | d1cf33d | 2017-08-08 15:24:05 -0500 | [diff] [blame] | 3738 | netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", |
| 3739 | i); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3740 | scrq = adapter->rx_scrq[i]; |
| 3741 | scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); |
Michael Ellerman | 99c1790 | 2016-09-10 19:59:05 +1000 | [diff] [blame] | 3742 | if (!scrq->irq) { |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3743 | rc = -EINVAL; |
| 3744 | dev_err(dev, "Error mapping irq\n"); |
| 3745 | goto req_rx_irq_failed; |
| 3746 | } |
Murilo Fossa Vicentini | e56e251 | 2019-04-25 11:02:33 -0300 | [diff] [blame] | 3747 | snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", |
| 3748 | adapter->vdev->unit_address, i); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3749 | rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, |
Murilo Fossa Vicentini | e56e251 | 2019-04-25 11:02:33 -0300 | [diff] [blame] | 3750 | 0, scrq->name, scrq); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3751 | if (rc) { |
| 3752 | dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", |
| 3753 | scrq->irq, rc); |
| 3754 | irq_dispose_mapping(scrq->irq); |
| 3755 | goto req_rx_irq_failed; |
| 3756 | } |
| 3757 | } |
| 3758 | return rc; |
| 3759 | |
| 3760 | req_rx_irq_failed: |
Thomas Falcon | 8bf371e | 2016-10-27 12:28:52 -0500 | [diff] [blame] | 3761 | for (j = 0; j < i; j++) { |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3762 | free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); |
| 3763 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); |
Thomas Falcon | 8bf371e | 2016-10-27 12:28:52 -0500 | [diff] [blame] | 3764 | } |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3765 | i = adapter->req_tx_queues; |
| 3766 | req_tx_irq_failed: |
Thomas Falcon | 8bf371e | 2016-10-27 12:28:52 -0500 | [diff] [blame] | 3767 | for (j = 0; j < i; j++) { |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3768 | free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); |
Thomas Falcon | 27a2145 | 2020-07-29 16:36:32 -0500 | [diff] [blame] | 3769 | irq_dispose_mapping(adapter->tx_scrq[j]->irq); |
Thomas Falcon | 8bf371e | 2016-10-27 12:28:52 -0500 | [diff] [blame] | 3770 | } |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3771 | release_sub_crqs(adapter, 1); |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3772 | return rc; |
| 3773 | } |
| 3774 | |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3775 | static int init_sub_crqs(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3776 | { |
| 3777 | struct device *dev = &adapter->vdev->dev; |
| 3778 | struct ibmvnic_sub_crq_queue **allqueues; |
| 3779 | int registered_queues = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3780 | int total_queues; |
| 3781 | int more = 0; |
Thomas Falcon | ea22d51 | 2016-07-06 15:35:17 -0500 | [diff] [blame] | 3782 | int i; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3783 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3784 | total_queues = adapter->req_tx_queues + adapter->req_rx_queues; |
| 3785 | |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 3786 | allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3787 | if (!allqueues) |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 3788 | return -ENOMEM; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3789 | |
| 3790 | for (i = 0; i < total_queues; i++) { |
| 3791 | allqueues[i] = init_sub_crq_queue(adapter); |
| 3792 | if (!allqueues[i]) { |
| 3793 | dev_warn(dev, "Couldn't allocate all sub-crqs\n"); |
| 3794 | break; |
| 3795 | } |
| 3796 | registered_queues++; |
| 3797 | } |
| 3798 | |
| 3799 | /* Make sure we were able to register the minimum number of queues */ |
| 3800 | if (registered_queues < |
| 3801 | adapter->min_tx_queues + adapter->min_rx_queues) { |
| 3802 | dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); |
| 3803 | goto tx_failed; |
| 3804 | } |
| 3805 | |
| 3806 | /* Distribute the failed allocated queues*/ |
| 3807 | for (i = 0; i < total_queues - registered_queues + more ; i++) { |
| 3808 | netdev_dbg(adapter->netdev, "Reducing number of queues\n"); |
| 3809 | switch (i % 3) { |
| 3810 | case 0: |
| 3811 | if (adapter->req_rx_queues > adapter->min_rx_queues) |
| 3812 | adapter->req_rx_queues--; |
| 3813 | else |
| 3814 | more++; |
| 3815 | break; |
| 3816 | case 1: |
| 3817 | if (adapter->req_tx_queues > adapter->min_tx_queues) |
| 3818 | adapter->req_tx_queues--; |
| 3819 | else |
| 3820 | more++; |
| 3821 | break; |
| 3822 | } |
| 3823 | } |
| 3824 | |
| 3825 | adapter->tx_scrq = kcalloc(adapter->req_tx_queues, |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 3826 | sizeof(*adapter->tx_scrq), GFP_KERNEL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3827 | if (!adapter->tx_scrq) |
| 3828 | goto tx_failed; |
| 3829 | |
| 3830 | for (i = 0; i < adapter->req_tx_queues; i++) { |
| 3831 | adapter->tx_scrq[i] = allqueues[i]; |
| 3832 | adapter->tx_scrq[i]->pool_index = i; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 3833 | adapter->num_active_tx_scrqs++; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3834 | } |
| 3835 | |
| 3836 | adapter->rx_scrq = kcalloc(adapter->req_rx_queues, |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 3837 | sizeof(*adapter->rx_scrq), GFP_KERNEL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3838 | if (!adapter->rx_scrq) |
| 3839 | goto rx_failed; |
| 3840 | |
| 3841 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 3842 | adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; |
| 3843 | adapter->rx_scrq[i]->scrq_num = i; |
Nathan Fontenot | 82e3be3 | 2018-02-21 21:33:56 -0600 | [diff] [blame] | 3844 | adapter->num_active_rx_scrqs++; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3845 | } |
| 3846 | |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3847 | kfree(allqueues); |
| 3848 | return 0; |
| 3849 | |
| 3850 | rx_failed: |
| 3851 | kfree(adapter->tx_scrq); |
| 3852 | adapter->tx_scrq = NULL; |
| 3853 | tx_failed: |
| 3854 | for (i = 0; i < registered_queues; i++) |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 3855 | release_sub_crq_queue(adapter, allqueues[i], 1); |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3856 | kfree(allqueues); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 3857 | return -ENOMEM; |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3858 | } |
| 3859 | |
Lijun Pan | 09081b9 | 2020-09-27 20:13:27 -0500 | [diff] [blame] | 3860 | static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3861 | { |
| 3862 | struct device *dev = &adapter->vdev->dev; |
| 3863 | union ibmvnic_crq crq; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3864 | int max_entries; |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3865 | int cap_reqs; |
| 3866 | |
| 3867 | /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on |
| 3868 | * the PROMISC flag). Initialize this count upfront. When the tasklet |
| 3869 | * receives a response to all of these, it will send the next protocol |
| 3870 | * message (QUERY_IP_OFFLOAD). |
| 3871 | */ |
| 3872 | if (!(adapter->netdev->flags & IFF_PROMISC) || |
| 3873 | adapter->promisc_supported) |
| 3874 | cap_reqs = 7; |
| 3875 | else |
| 3876 | cap_reqs = 6; |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3877 | |
| 3878 | if (!retry) { |
| 3879 | /* Sub-CRQ entries are 32 byte long */ |
| 3880 | int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); |
| 3881 | |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3882 | atomic_set(&adapter->running_cap_crqs, cap_reqs); |
| 3883 | |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3884 | if (adapter->min_tx_entries_per_subcrq > entries_page || |
| 3885 | adapter->min_rx_add_entries_per_subcrq > entries_page) { |
| 3886 | dev_err(dev, "Fatal, invalid entries per sub-crq\n"); |
| 3887 | return; |
| 3888 | } |
| 3889 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3890 | if (adapter->desired.mtu) |
| 3891 | adapter->req_mtu = adapter->desired.mtu; |
| 3892 | else |
| 3893 | adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3894 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 3895 | if (!adapter->desired.tx_entries) |
| 3896 | adapter->desired.tx_entries = |
| 3897 | adapter->max_tx_entries_per_subcrq; |
| 3898 | if (!adapter->desired.rx_entries) |
| 3899 | adapter->desired.rx_entries = |
| 3900 | adapter->max_rx_add_entries_per_subcrq; |
| 3901 | |
| 3902 | max_entries = IBMVNIC_MAX_LTB_SIZE / |
| 3903 | (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); |
| 3904 | |
| 3905 | if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * |
| 3906 | adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { |
| 3907 | adapter->desired.tx_entries = max_entries; |
| 3908 | } |
| 3909 | |
| 3910 | if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * |
| 3911 | adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { |
| 3912 | adapter->desired.rx_entries = max_entries; |
| 3913 | } |
| 3914 | |
| 3915 | if (adapter->desired.tx_entries) |
| 3916 | adapter->req_tx_entries_per_subcrq = |
| 3917 | adapter->desired.tx_entries; |
| 3918 | else |
| 3919 | adapter->req_tx_entries_per_subcrq = |
| 3920 | adapter->max_tx_entries_per_subcrq; |
| 3921 | |
| 3922 | if (adapter->desired.rx_entries) |
| 3923 | adapter->req_rx_add_entries_per_subcrq = |
| 3924 | adapter->desired.rx_entries; |
| 3925 | else |
| 3926 | adapter->req_rx_add_entries_per_subcrq = |
| 3927 | adapter->max_rx_add_entries_per_subcrq; |
| 3928 | |
| 3929 | if (adapter->desired.tx_queues) |
| 3930 | adapter->req_tx_queues = |
| 3931 | adapter->desired.tx_queues; |
| 3932 | else |
| 3933 | adapter->req_tx_queues = |
| 3934 | adapter->opt_tx_comp_sub_queues; |
| 3935 | |
| 3936 | if (adapter->desired.rx_queues) |
| 3937 | adapter->req_rx_queues = |
| 3938 | adapter->desired.rx_queues; |
| 3939 | else |
| 3940 | adapter->req_rx_queues = |
| 3941 | adapter->opt_rx_comp_queues; |
| 3942 | |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3943 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3944 | } else { |
| 3945 | atomic_add(cap_reqs, &adapter->running_cap_crqs); |
Nathan Fontenot | d346b9b | 2017-04-25 15:01:04 -0400 | [diff] [blame] | 3946 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3947 | memset(&crq, 0, sizeof(crq)); |
| 3948 | crq.request_capability.first = IBMVNIC_CRQ_CMD; |
| 3949 | crq.request_capability.cmd = REQUEST_CAPABILITY; |
| 3950 | |
| 3951 | crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3952 | crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3953 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3954 | ibmvnic_send_crq(adapter, &crq); |
| 3955 | |
| 3956 | crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3957 | crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3958 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3959 | ibmvnic_send_crq(adapter, &crq); |
| 3960 | |
| 3961 | crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3962 | crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3963 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3964 | ibmvnic_send_crq(adapter, &crq); |
| 3965 | |
| 3966 | crq.request_capability.capability = |
| 3967 | cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); |
| 3968 | crq.request_capability.number = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3969 | cpu_to_be64(adapter->req_tx_entries_per_subcrq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3970 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3971 | ibmvnic_send_crq(adapter, &crq); |
| 3972 | |
| 3973 | crq.request_capability.capability = |
| 3974 | cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); |
| 3975 | crq.request_capability.number = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3976 | cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3977 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3978 | ibmvnic_send_crq(adapter, &crq); |
| 3979 | |
| 3980 | crq.request_capability.capability = cpu_to_be16(REQ_MTU); |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3981 | crq.request_capability.number = cpu_to_be64(adapter->req_mtu); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3982 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3983 | ibmvnic_send_crq(adapter, &crq); |
| 3984 | |
| 3985 | if (adapter->netdev->flags & IFF_PROMISC) { |
| 3986 | if (adapter->promisc_supported) { |
| 3987 | crq.request_capability.capability = |
| 3988 | cpu_to_be16(PROMISC_REQUESTED); |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3989 | crq.request_capability.number = cpu_to_be64(1); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3990 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3991 | ibmvnic_send_crq(adapter, &crq); |
| 3992 | } |
| 3993 | } else { |
| 3994 | crq.request_capability.capability = |
| 3995 | cpu_to_be16(PROMISC_REQUESTED); |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 3996 | crq.request_capability.number = cpu_to_be64(0); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 3997 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 3998 | ibmvnic_send_crq(adapter, &crq); |
| 3999 | } |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4000 | |
| 4001 | /* Keep at end to catch any discrepancy between expected and actual |
| 4002 | * CRQs sent. |
| 4003 | */ |
| 4004 | WARN_ON(cap_reqs != 0); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4005 | } |
| 4006 | |
| 4007 | static int pending_scrq(struct ibmvnic_adapter *adapter, |
| 4008 | struct ibmvnic_sub_crq_queue *scrq) |
| 4009 | { |
| 4010 | union sub_crq *entry = &scrq->msgs[scrq->cur]; |
Lijun Pan | 665ab1e | 2021-01-29 19:19:04 -0600 | [diff] [blame] | 4011 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4012 | |
Lijun Pan | 665ab1e | 2021-01-29 19:19:04 -0600 | [diff] [blame] | 4013 | rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); |
| 4014 | |
| 4015 | /* Ensure that the SCRQ valid flag is loaded prior to loading the |
| 4016 | * contents of the SCRQ descriptor |
| 4017 | */ |
| 4018 | dma_rmb(); |
| 4019 | |
| 4020 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4021 | } |
| 4022 | |
| 4023 | static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, |
| 4024 | struct ibmvnic_sub_crq_queue *scrq) |
| 4025 | { |
| 4026 | union sub_crq *entry; |
| 4027 | unsigned long flags; |
| 4028 | |
| 4029 | spin_lock_irqsave(&scrq->lock, flags); |
| 4030 | entry = &scrq->msgs[scrq->cur]; |
| 4031 | if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { |
| 4032 | if (++scrq->cur == scrq->size) |
| 4033 | scrq->cur = 0; |
| 4034 | } else { |
| 4035 | entry = NULL; |
| 4036 | } |
| 4037 | spin_unlock_irqrestore(&scrq->lock, flags); |
| 4038 | |
Lijun Pan | 665ab1e | 2021-01-29 19:19:04 -0600 | [diff] [blame] | 4039 | /* Ensure that the SCRQ valid flag is loaded prior to loading the |
| 4040 | * contents of the SCRQ descriptor |
Thomas Falcon | b71ec95 | 2020-12-01 09:52:10 -0600 | [diff] [blame] | 4041 | */ |
| 4042 | dma_rmb(); |
| 4043 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4044 | return entry; |
| 4045 | } |
| 4046 | |
| 4047 | static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) |
| 4048 | { |
| 4049 | struct ibmvnic_crq_queue *queue = &adapter->crq; |
| 4050 | union ibmvnic_crq *crq; |
| 4051 | |
| 4052 | crq = &queue->msgs[queue->cur]; |
| 4053 | if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { |
| 4054 | if (++queue->cur == queue->size) |
| 4055 | queue->cur = 0; |
| 4056 | } else { |
| 4057 | crq = NULL; |
| 4058 | } |
| 4059 | |
| 4060 | return crq; |
| 4061 | } |
| 4062 | |
Thomas Falcon | 2d14d37 | 2018-07-13 12:03:32 -0500 | [diff] [blame] | 4063 | static void print_subcrq_error(struct device *dev, int rc, const char *func) |
| 4064 | { |
| 4065 | switch (rc) { |
| 4066 | case H_PARAMETER: |
| 4067 | dev_warn_ratelimited(dev, |
| 4068 | "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", |
| 4069 | func, rc); |
| 4070 | break; |
| 4071 | case H_CLOSED: |
| 4072 | dev_warn_ratelimited(dev, |
| 4073 | "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", |
| 4074 | func, rc); |
| 4075 | break; |
| 4076 | default: |
| 4077 | dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc); |
| 4078 | break; |
| 4079 | } |
| 4080 | } |
| 4081 | |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 4082 | static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, |
| 4083 | u64 remote_handle, u64 ioba, u64 num_entries) |
| 4084 | { |
| 4085 | unsigned int ua = adapter->vdev->unit_address; |
| 4086 | struct device *dev = &adapter->vdev->dev; |
| 4087 | int rc; |
| 4088 | |
| 4089 | /* Make sure the hypervisor sees the complete request */ |
Lijun Pan | 1a42156 | 2021-02-12 20:36:46 -0600 | [diff] [blame] | 4090 | dma_wmb(); |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 4091 | rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, |
| 4092 | cpu_to_be64(remote_handle), |
| 4093 | ioba, num_entries); |
| 4094 | |
Thomas Falcon | 2d14d37 | 2018-07-13 12:03:32 -0500 | [diff] [blame] | 4095 | if (rc) |
| 4096 | print_subcrq_error(dev, rc, __func__); |
Thomas Falcon | ad7775d | 2016-04-01 17:20:34 -0500 | [diff] [blame] | 4097 | |
| 4098 | return rc; |
| 4099 | } |
| 4100 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4101 | static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, |
| 4102 | union ibmvnic_crq *crq) |
| 4103 | { |
| 4104 | unsigned int ua = adapter->vdev->unit_address; |
| 4105 | struct device *dev = &adapter->vdev->dev; |
| 4106 | u64 *u64_crq = (u64 *)crq; |
| 4107 | int rc; |
| 4108 | |
| 4109 | netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", |
Lijun Pan | 429aa36 | 2021-02-11 00:43:18 -0600 | [diff] [blame] | 4110 | (unsigned long)cpu_to_be64(u64_crq[0]), |
| 4111 | (unsigned long)cpu_to_be64(u64_crq[1])); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4112 | |
Thomas Falcon | 5153698 | 2018-05-23 13:37:56 -0500 | [diff] [blame] | 4113 | if (!adapter->crq.active && |
| 4114 | crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { |
| 4115 | dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); |
| 4116 | return -EINVAL; |
| 4117 | } |
| 4118 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4119 | /* Make sure the hypervisor sees the complete request */ |
Lijun Pan | 1a42156 | 2021-02-12 20:36:46 -0600 | [diff] [blame] | 4120 | dma_wmb(); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4121 | |
| 4122 | rc = plpar_hcall_norets(H_SEND_CRQ, ua, |
| 4123 | cpu_to_be64(u64_crq[0]), |
| 4124 | cpu_to_be64(u64_crq[1])); |
| 4125 | |
| 4126 | if (rc) { |
Nathan Fontenot | ec95dff | 2018-02-07 13:00:24 -0600 | [diff] [blame] | 4127 | if (rc == H_CLOSED) { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4128 | dev_warn(dev, "CRQ Queue closed\n"); |
Lijun Pan | fa68bfa | 2020-08-19 17:52:24 -0500 | [diff] [blame] | 4129 | /* do not reset, report the fail, wait for passive init from server */ |
Nathan Fontenot | ec95dff | 2018-02-07 13:00:24 -0600 | [diff] [blame] | 4130 | } |
| 4131 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4132 | dev_warn(dev, "Send error (rc=%d)\n", rc); |
| 4133 | } |
| 4134 | |
| 4135 | return rc; |
| 4136 | } |
| 4137 | |
| 4138 | static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) |
| 4139 | { |
Thomas Falcon | 36a782f | 2020-08-31 11:59:57 -0500 | [diff] [blame] | 4140 | struct device *dev = &adapter->vdev->dev; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4141 | union ibmvnic_crq crq; |
Thomas Falcon | 36a782f | 2020-08-31 11:59:57 -0500 | [diff] [blame] | 4142 | int retries = 100; |
| 4143 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4144 | |
| 4145 | memset(&crq, 0, sizeof(crq)); |
| 4146 | crq.generic.first = IBMVNIC_CRQ_INIT_CMD; |
| 4147 | crq.generic.cmd = IBMVNIC_CRQ_INIT; |
| 4148 | netdev_dbg(adapter->netdev, "Sending CRQ init\n"); |
| 4149 | |
Thomas Falcon | 36a782f | 2020-08-31 11:59:57 -0500 | [diff] [blame] | 4150 | do { |
| 4151 | rc = ibmvnic_send_crq(adapter, &crq); |
| 4152 | if (rc != H_CLOSED) |
| 4153 | break; |
| 4154 | retries--; |
| 4155 | msleep(50); |
| 4156 | |
| 4157 | } while (retries > 0); |
| 4158 | |
| 4159 | if (rc) { |
| 4160 | dev_err(dev, "Failed to send init request, rc = %d\n", rc); |
| 4161 | return rc; |
| 4162 | } |
| 4163 | |
| 4164 | return 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4165 | } |
| 4166 | |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4167 | struct vnic_login_client_data { |
| 4168 | u8 type; |
| 4169 | __be16 len; |
Kees Cook | 08ea556 | 2018-04-10 15:26:43 -0700 | [diff] [blame] | 4170 | char name[]; |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4171 | } __packed; |
| 4172 | |
| 4173 | static int vnic_client_data_len(struct ibmvnic_adapter *adapter) |
| 4174 | { |
| 4175 | int len; |
| 4176 | |
| 4177 | /* Calculate the amount of buffer space needed for the |
| 4178 | * vnic client data in the login buffer. There are four entries, |
| 4179 | * OS name, LPAR name, device name, and a null last entry. |
| 4180 | */ |
| 4181 | len = 4 * sizeof(struct vnic_login_client_data); |
| 4182 | len += 6; /* "Linux" plus NULL */ |
| 4183 | len += strlen(utsname()->nodename) + 1; |
| 4184 | len += strlen(adapter->netdev->name) + 1; |
| 4185 | |
| 4186 | return len; |
| 4187 | } |
| 4188 | |
| 4189 | static void vnic_add_client_data(struct ibmvnic_adapter *adapter, |
| 4190 | struct vnic_login_client_data *vlcd) |
| 4191 | { |
| 4192 | const char *os_name = "Linux"; |
| 4193 | int len; |
| 4194 | |
| 4195 | /* Type 1 - LPAR OS */ |
| 4196 | vlcd->type = 1; |
| 4197 | len = strlen(os_name) + 1; |
| 4198 | vlcd->len = cpu_to_be16(len); |
Kees Cook | ef2c3dd | 2021-06-21 14:35:09 -0700 | [diff] [blame] | 4199 | strscpy(vlcd->name, os_name, len); |
Kees Cook | 08ea556 | 2018-04-10 15:26:43 -0700 | [diff] [blame] | 4200 | vlcd = (struct vnic_login_client_data *)(vlcd->name + len); |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4201 | |
| 4202 | /* Type 2 - LPAR name */ |
| 4203 | vlcd->type = 2; |
| 4204 | len = strlen(utsname()->nodename) + 1; |
| 4205 | vlcd->len = cpu_to_be16(len); |
Kees Cook | ef2c3dd | 2021-06-21 14:35:09 -0700 | [diff] [blame] | 4206 | strscpy(vlcd->name, utsname()->nodename, len); |
Kees Cook | 08ea556 | 2018-04-10 15:26:43 -0700 | [diff] [blame] | 4207 | vlcd = (struct vnic_login_client_data *)(vlcd->name + len); |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4208 | |
| 4209 | /* Type 3 - device name */ |
| 4210 | vlcd->type = 3; |
| 4211 | len = strlen(adapter->netdev->name) + 1; |
| 4212 | vlcd->len = cpu_to_be16(len); |
Kees Cook | ef2c3dd | 2021-06-21 14:35:09 -0700 | [diff] [blame] | 4213 | strscpy(vlcd->name, adapter->netdev->name, len); |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4214 | } |
| 4215 | |
Thomas Falcon | 20a8ab7 | 2018-02-26 18:10:59 -0600 | [diff] [blame] | 4216 | static int send_login(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4217 | { |
| 4218 | struct ibmvnic_login_rsp_buffer *login_rsp_buffer; |
| 4219 | struct ibmvnic_login_buffer *login_buffer; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4220 | struct device *dev = &adapter->vdev->dev; |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 4221 | struct vnic_login_client_data *vlcd; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4222 | dma_addr_t rsp_buffer_token; |
| 4223 | dma_addr_t buffer_token; |
| 4224 | size_t rsp_buffer_size; |
| 4225 | union ibmvnic_crq crq; |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 4226 | int client_data_len; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4227 | size_t buffer_size; |
| 4228 | __be64 *tx_list_p; |
| 4229 | __be64 *rx_list_p; |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 4230 | int rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4231 | int i; |
| 4232 | |
Thomas Falcon | 20a8ab7 | 2018-02-26 18:10:59 -0600 | [diff] [blame] | 4233 | if (!adapter->tx_scrq || !adapter->rx_scrq) { |
| 4234 | netdev_err(adapter->netdev, |
| 4235 | "RX or TX queues are not allocated, device login failed\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 4236 | return -ENOMEM; |
Thomas Falcon | 20a8ab7 | 2018-02-26 18:10:59 -0600 | [diff] [blame] | 4237 | } |
| 4238 | |
Lijun Pan | a0c8be5 | 2020-12-19 15:39:19 -0600 | [diff] [blame] | 4239 | release_login_buffer(adapter); |
Thomas Falcon | 34f0f4e | 2018-02-13 18:23:40 -0600 | [diff] [blame] | 4240 | release_login_rsp_buffer(adapter); |
Lijun Pan | a0c8be5 | 2020-12-19 15:39:19 -0600 | [diff] [blame] | 4241 | |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4242 | client_data_len = vnic_client_data_len(adapter); |
| 4243 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4244 | buffer_size = |
| 4245 | sizeof(struct ibmvnic_login_buffer) + |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4246 | sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + |
| 4247 | client_data_len; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4248 | |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4249 | login_buffer = kzalloc(buffer_size, GFP_ATOMIC); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4250 | if (!login_buffer) |
| 4251 | goto buf_alloc_failed; |
| 4252 | |
| 4253 | buffer_token = dma_map_single(dev, login_buffer, buffer_size, |
| 4254 | DMA_TO_DEVICE); |
| 4255 | if (dma_mapping_error(dev, buffer_token)) { |
| 4256 | dev_err(dev, "Couldn't map login buffer\n"); |
| 4257 | goto buf_map_failed; |
| 4258 | } |
| 4259 | |
John Allen | 498cd8e | 2016-04-06 11:49:55 -0500 | [diff] [blame] | 4260 | rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + |
| 4261 | sizeof(u64) * adapter->req_tx_queues + |
| 4262 | sizeof(u64) * adapter->req_rx_queues + |
| 4263 | sizeof(u64) * adapter->req_rx_queues + |
| 4264 | sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4265 | |
| 4266 | login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); |
| 4267 | if (!login_rsp_buffer) |
| 4268 | goto buf_rsp_alloc_failed; |
| 4269 | |
| 4270 | rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, |
| 4271 | rsp_buffer_size, DMA_FROM_DEVICE); |
| 4272 | if (dma_mapping_error(dev, rsp_buffer_token)) { |
| 4273 | dev_err(dev, "Couldn't map login rsp buffer\n"); |
| 4274 | goto buf_rsp_map_failed; |
| 4275 | } |
Nathan Fontenot | 661a262 | 2017-04-19 13:44:58 -0400 | [diff] [blame] | 4276 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4277 | adapter->login_buf = login_buffer; |
| 4278 | adapter->login_buf_token = buffer_token; |
| 4279 | adapter->login_buf_sz = buffer_size; |
| 4280 | adapter->login_rsp_buf = login_rsp_buffer; |
| 4281 | adapter->login_rsp_buf_token = rsp_buffer_token; |
| 4282 | adapter->login_rsp_buf_sz = rsp_buffer_size; |
| 4283 | |
| 4284 | login_buffer->len = cpu_to_be32(buffer_size); |
| 4285 | login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); |
| 4286 | login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); |
| 4287 | login_buffer->off_txcomp_subcrqs = |
| 4288 | cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); |
| 4289 | login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); |
| 4290 | login_buffer->off_rxcomp_subcrqs = |
| 4291 | cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + |
| 4292 | sizeof(u64) * adapter->req_tx_queues); |
| 4293 | login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); |
| 4294 | login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); |
| 4295 | |
| 4296 | tx_list_p = (__be64 *)((char *)login_buffer + |
| 4297 | sizeof(struct ibmvnic_login_buffer)); |
| 4298 | rx_list_p = (__be64 *)((char *)login_buffer + |
| 4299 | sizeof(struct ibmvnic_login_buffer) + |
| 4300 | sizeof(u64) * adapter->req_tx_queues); |
| 4301 | |
| 4302 | for (i = 0; i < adapter->req_tx_queues; i++) { |
| 4303 | if (adapter->tx_scrq[i]) { |
Lijun Pan | 914789a | 2021-02-11 00:43:21 -0600 | [diff] [blame] | 4304 | tx_list_p[i] = |
| 4305 | cpu_to_be64(adapter->tx_scrq[i]->crq_num); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4306 | } |
| 4307 | } |
| 4308 | |
| 4309 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 4310 | if (adapter->rx_scrq[i]) { |
Lijun Pan | 914789a | 2021-02-11 00:43:21 -0600 | [diff] [blame] | 4311 | rx_list_p[i] = |
| 4312 | cpu_to_be64(adapter->rx_scrq[i]->crq_num); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4313 | } |
| 4314 | } |
| 4315 | |
Nathan Fontenot | 37798d0 | 2017-11-08 11:23:56 -0600 | [diff] [blame] | 4316 | /* Insert vNIC login client data */ |
| 4317 | vlcd = (struct vnic_login_client_data *) |
| 4318 | ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); |
| 4319 | login_buffer->client_data_offset = |
| 4320 | cpu_to_be32((char *)vlcd - (char *)login_buffer); |
| 4321 | login_buffer->client_data_len = cpu_to_be32(client_data_len); |
| 4322 | |
| 4323 | vnic_add_client_data(adapter, vlcd); |
| 4324 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4325 | netdev_dbg(adapter->netdev, "Login Buffer:\n"); |
| 4326 | for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { |
| 4327 | netdev_dbg(adapter->netdev, "%016lx\n", |
Lijun Pan | 429aa36 | 2021-02-11 00:43:18 -0600 | [diff] [blame] | 4328 | ((unsigned long *)(adapter->login_buf))[i]); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4329 | } |
| 4330 | |
| 4331 | memset(&crq, 0, sizeof(crq)); |
| 4332 | crq.login.first = IBMVNIC_CRQ_CMD; |
| 4333 | crq.login.cmd = LOGIN; |
| 4334 | crq.login.ioba = cpu_to_be32(buffer_token); |
| 4335 | crq.login.len = cpu_to_be32(buffer_size); |
Sukadev Bhattiprolu | 76cdc5c | 2020-11-25 18:04:29 -0600 | [diff] [blame] | 4336 | |
| 4337 | adapter->login_pending = true; |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 4338 | rc = ibmvnic_send_crq(adapter, &crq); |
| 4339 | if (rc) { |
| 4340 | adapter->login_pending = false; |
| 4341 | netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); |
| 4342 | goto buf_rsp_map_failed; |
| 4343 | } |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4344 | |
Thomas Falcon | 20a8ab7 | 2018-02-26 18:10:59 -0600 | [diff] [blame] | 4345 | return 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4346 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4347 | buf_rsp_map_failed: |
| 4348 | kfree(login_rsp_buffer); |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 4349 | adapter->login_rsp_buf = NULL; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4350 | buf_rsp_alloc_failed: |
| 4351 | dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); |
| 4352 | buf_map_failed: |
| 4353 | kfree(login_buffer); |
Dany Madden | c98d9cc | 2020-11-25 18:04:30 -0600 | [diff] [blame] | 4354 | adapter->login_buf = NULL; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4355 | buf_alloc_failed: |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 4356 | return -ENOMEM; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4357 | } |
| 4358 | |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 4359 | static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, |
| 4360 | u32 len, u8 map_id) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4361 | { |
| 4362 | union ibmvnic_crq crq; |
| 4363 | |
| 4364 | memset(&crq, 0, sizeof(crq)); |
| 4365 | crq.request_map.first = IBMVNIC_CRQ_CMD; |
| 4366 | crq.request_map.cmd = REQUEST_MAP; |
| 4367 | crq.request_map.map_id = map_id; |
| 4368 | crq.request_map.ioba = cpu_to_be32(addr); |
| 4369 | crq.request_map.len = cpu_to_be32(len); |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 4370 | return ibmvnic_send_crq(adapter, &crq); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4371 | } |
| 4372 | |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 4373 | static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4374 | { |
| 4375 | union ibmvnic_crq crq; |
| 4376 | |
| 4377 | memset(&crq, 0, sizeof(crq)); |
| 4378 | crq.request_unmap.first = IBMVNIC_CRQ_CMD; |
| 4379 | crq.request_unmap.cmd = REQUEST_UNMAP; |
| 4380 | crq.request_unmap.map_id = map_id; |
Thomas Falcon | 9c4eaab | 2018-05-23 13:37:57 -0500 | [diff] [blame] | 4381 | return ibmvnic_send_crq(adapter, &crq); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4382 | } |
| 4383 | |
Lijun Pan | 69980d0 | 2020-09-27 20:13:28 -0500 | [diff] [blame] | 4384 | static void send_query_map(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4385 | { |
| 4386 | union ibmvnic_crq crq; |
| 4387 | |
| 4388 | memset(&crq, 0, sizeof(crq)); |
| 4389 | crq.query_map.first = IBMVNIC_CRQ_CMD; |
| 4390 | crq.query_map.cmd = QUERY_MAP; |
| 4391 | ibmvnic_send_crq(adapter, &crq); |
| 4392 | } |
| 4393 | |
| 4394 | /* Send a series of CRQs requesting various capabilities of the VNIC server */ |
Lijun Pan | 491099a | 2020-09-27 20:13:26 -0500 | [diff] [blame] | 4395 | static void send_query_cap(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4396 | { |
| 4397 | union ibmvnic_crq crq; |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4398 | int cap_reqs; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4399 | |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4400 | /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count |
| 4401 | * upfront. When the tasklet receives a response to all of these, it |
| 4402 | * can send out the next protocol messaage (REQUEST_CAPABILITY). |
| 4403 | */ |
| 4404 | cap_reqs = 25; |
| 4405 | |
| 4406 | atomic_set(&adapter->running_cap_crqs, cap_reqs); |
| 4407 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4408 | memset(&crq, 0, sizeof(crq)); |
| 4409 | crq.query_capability.first = IBMVNIC_CRQ_CMD; |
| 4410 | crq.query_capability.cmd = QUERY_CAPABILITY; |
| 4411 | |
| 4412 | crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4413 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4414 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4415 | |
| 4416 | crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4417 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4418 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4419 | |
| 4420 | crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4421 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4422 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4423 | |
| 4424 | crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4425 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4426 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4427 | |
| 4428 | crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4429 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4430 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4431 | |
| 4432 | crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4433 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4434 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4435 | |
| 4436 | crq.query_capability.capability = |
| 4437 | cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4438 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4439 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4440 | |
| 4441 | crq.query_capability.capability = |
| 4442 | cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4443 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4444 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4445 | |
| 4446 | crq.query_capability.capability = |
| 4447 | cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4448 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4449 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4450 | |
| 4451 | crq.query_capability.capability = |
| 4452 | cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4453 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4454 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4455 | |
| 4456 | crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4457 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4458 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4459 | |
| 4460 | crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4461 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4462 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4463 | |
| 4464 | crq.query_capability.capability = cpu_to_be16(MIN_MTU); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4465 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4466 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4467 | |
| 4468 | crq.query_capability.capability = cpu_to_be16(MAX_MTU); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4469 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4470 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4471 | |
| 4472 | crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4473 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4474 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4475 | |
| 4476 | crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4477 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4478 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4479 | |
Murilo Fossa Vicentini | 6052d5e | 2017-04-21 15:38:46 -0400 | [diff] [blame] | 4480 | crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); |
Murilo Fossa Vicentini | 6052d5e | 2017-04-21 15:38:46 -0400 | [diff] [blame] | 4481 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4482 | cap_reqs--; |
Murilo Fossa Vicentini | 6052d5e | 2017-04-21 15:38:46 -0400 | [diff] [blame] | 4483 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4484 | crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4485 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4486 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4487 | |
| 4488 | crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4489 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4490 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4491 | |
| 4492 | crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4493 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4494 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4495 | |
| 4496 | crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4497 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4498 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4499 | |
| 4500 | crq.query_capability.capability = |
| 4501 | cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4502 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4503 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4504 | |
| 4505 | crq.query_capability.capability = |
| 4506 | cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4507 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4508 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4509 | |
| 4510 | crq.query_capability.capability = |
| 4511 | cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4512 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4513 | cap_reqs--; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4514 | |
| 4515 | crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4516 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4517 | ibmvnic_send_crq(adapter, &crq); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4518 | cap_reqs--; |
| 4519 | |
| 4520 | /* Keep at end to catch any discrepancy between expected and actual |
| 4521 | * CRQs sent. |
| 4522 | */ |
| 4523 | WARN_ON(cap_reqs != 0); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4524 | } |
| 4525 | |
Lijun Pan | 16e811f | 2020-09-27 20:13:29 -0500 | [diff] [blame] | 4526 | static void send_query_ip_offload(struct ibmvnic_adapter *adapter) |
| 4527 | { |
| 4528 | int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); |
| 4529 | struct device *dev = &adapter->vdev->dev; |
| 4530 | union ibmvnic_crq crq; |
| 4531 | |
| 4532 | adapter->ip_offload_tok = |
| 4533 | dma_map_single(dev, |
| 4534 | &adapter->ip_offload_buf, |
| 4535 | buf_sz, |
| 4536 | DMA_FROM_DEVICE); |
| 4537 | |
| 4538 | if (dma_mapping_error(dev, adapter->ip_offload_tok)) { |
| 4539 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
| 4540 | dev_err(dev, "Couldn't map offload buffer\n"); |
| 4541 | return; |
| 4542 | } |
| 4543 | |
| 4544 | memset(&crq, 0, sizeof(crq)); |
| 4545 | crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; |
| 4546 | crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; |
| 4547 | crq.query_ip_offload.len = cpu_to_be32(buf_sz); |
| 4548 | crq.query_ip_offload.ioba = |
| 4549 | cpu_to_be32(adapter->ip_offload_tok); |
| 4550 | |
| 4551 | ibmvnic_send_crq(adapter, &crq); |
| 4552 | } |
| 4553 | |
Lijun Pan | 46899bd | 2020-09-27 20:13:30 -0500 | [diff] [blame] | 4554 | static void send_control_ip_offload(struct ibmvnic_adapter *adapter) |
| 4555 | { |
| 4556 | struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; |
| 4557 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; |
| 4558 | struct device *dev = &adapter->vdev->dev; |
| 4559 | netdev_features_t old_hw_features = 0; |
| 4560 | union ibmvnic_crq crq; |
| 4561 | |
| 4562 | adapter->ip_offload_ctrl_tok = |
| 4563 | dma_map_single(dev, |
| 4564 | ctrl_buf, |
| 4565 | sizeof(adapter->ip_offload_ctrl), |
| 4566 | DMA_TO_DEVICE); |
| 4567 | |
| 4568 | if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { |
| 4569 | dev_err(dev, "Couldn't map ip offload control buffer\n"); |
| 4570 | return; |
| 4571 | } |
| 4572 | |
| 4573 | ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); |
| 4574 | ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); |
| 4575 | ctrl_buf->ipv4_chksum = buf->ipv4_chksum; |
| 4576 | ctrl_buf->ipv6_chksum = buf->ipv6_chksum; |
| 4577 | ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; |
| 4578 | ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; |
| 4579 | ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; |
| 4580 | ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; |
| 4581 | ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; |
| 4582 | ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; |
| 4583 | |
| 4584 | /* large_rx disabled for now, additional features needed */ |
| 4585 | ctrl_buf->large_rx_ipv4 = 0; |
| 4586 | ctrl_buf->large_rx_ipv6 = 0; |
| 4587 | |
| 4588 | if (adapter->state != VNIC_PROBING) { |
| 4589 | old_hw_features = adapter->netdev->hw_features; |
| 4590 | adapter->netdev->hw_features = 0; |
| 4591 | } |
| 4592 | |
| 4593 | adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; |
| 4594 | |
| 4595 | if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) |
| 4596 | adapter->netdev->hw_features |= NETIF_F_IP_CSUM; |
| 4597 | |
| 4598 | if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) |
| 4599 | adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; |
| 4600 | |
| 4601 | if ((adapter->netdev->features & |
| 4602 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
| 4603 | adapter->netdev->hw_features |= NETIF_F_RXCSUM; |
| 4604 | |
| 4605 | if (buf->large_tx_ipv4) |
| 4606 | adapter->netdev->hw_features |= NETIF_F_TSO; |
| 4607 | if (buf->large_tx_ipv6) |
| 4608 | adapter->netdev->hw_features |= NETIF_F_TSO6; |
| 4609 | |
| 4610 | if (adapter->state == VNIC_PROBING) { |
| 4611 | adapter->netdev->features |= adapter->netdev->hw_features; |
| 4612 | } else if (old_hw_features != adapter->netdev->hw_features) { |
| 4613 | netdev_features_t tmp = 0; |
| 4614 | |
| 4615 | /* disable features no longer supported */ |
| 4616 | adapter->netdev->features &= adapter->netdev->hw_features; |
| 4617 | /* turn on features now supported if previously enabled */ |
| 4618 | tmp = (old_hw_features ^ adapter->netdev->hw_features) & |
| 4619 | adapter->netdev->hw_features; |
| 4620 | adapter->netdev->features |= |
| 4621 | tmp & adapter->netdev->wanted_features; |
| 4622 | } |
| 4623 | |
| 4624 | memset(&crq, 0, sizeof(crq)); |
| 4625 | crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; |
| 4626 | crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; |
| 4627 | crq.control_ip_offload.len = |
| 4628 | cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); |
| 4629 | crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); |
| 4630 | ibmvnic_send_crq(adapter, &crq); |
| 4631 | } |
| 4632 | |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 4633 | static void handle_vpd_size_rsp(union ibmvnic_crq *crq, |
| 4634 | struct ibmvnic_adapter *adapter) |
| 4635 | { |
| 4636 | struct device *dev = &adapter->vdev->dev; |
| 4637 | |
| 4638 | if (crq->get_vpd_size_rsp.rc.code) { |
| 4639 | dev_err(dev, "Error retrieving VPD size, rc=%x\n", |
| 4640 | crq->get_vpd_size_rsp.rc.code); |
| 4641 | complete(&adapter->fw_done); |
| 4642 | return; |
| 4643 | } |
| 4644 | |
| 4645 | adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); |
| 4646 | complete(&adapter->fw_done); |
| 4647 | } |
| 4648 | |
| 4649 | static void handle_vpd_rsp(union ibmvnic_crq *crq, |
| 4650 | struct ibmvnic_adapter *adapter) |
| 4651 | { |
| 4652 | struct device *dev = &adapter->vdev->dev; |
Desnes Augusto Nunes do Rosario | 21a2545 | 2018-02-05 14:33:55 -0200 | [diff] [blame] | 4653 | unsigned char *substr = NULL; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 4654 | u8 fw_level_len = 0; |
| 4655 | |
| 4656 | memset(adapter->fw_version, 0, 32); |
| 4657 | |
| 4658 | dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, |
| 4659 | DMA_FROM_DEVICE); |
| 4660 | |
| 4661 | if (crq->get_vpd_rsp.rc.code) { |
| 4662 | dev_err(dev, "Error retrieving VPD from device, rc=%x\n", |
| 4663 | crq->get_vpd_rsp.rc.code); |
| 4664 | goto complete; |
| 4665 | } |
| 4666 | |
| 4667 | /* get the position of the firmware version info |
| 4668 | * located after the ASCII 'RM' substring in the buffer |
| 4669 | */ |
| 4670 | substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); |
| 4671 | if (!substr) { |
Desnes Augusto Nunes do Rosario | a107311 | 2018-02-01 16:04:30 -0200 | [diff] [blame] | 4672 | dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 4673 | goto complete; |
| 4674 | } |
| 4675 | |
| 4676 | /* get length of firmware level ASCII substring */ |
| 4677 | if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { |
| 4678 | fw_level_len = *(substr + 2); |
| 4679 | } else { |
| 4680 | dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); |
| 4681 | goto complete; |
| 4682 | } |
| 4683 | |
| 4684 | /* copy firmware version string from vpd into adapter */ |
| 4685 | if ((substr + 3 + fw_level_len) < |
| 4686 | (adapter->vpd->buff + adapter->vpd->len)) { |
Desnes Augusto Nunes do Rosario | 21a2545 | 2018-02-05 14:33:55 -0200 | [diff] [blame] | 4687 | strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 4688 | } else { |
| 4689 | dev_info(dev, "FW substr extrapolated VPD buff\n"); |
| 4690 | } |
| 4691 | |
| 4692 | complete: |
Desnes Augusto Nunes do Rosario | 21a2545 | 2018-02-05 14:33:55 -0200 | [diff] [blame] | 4693 | if (adapter->fw_version[0] == '\0') |
Lijun Pan | 0b217d3 | 2021-06-11 13:33:53 -0500 | [diff] [blame] | 4694 | strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 4695 | complete(&adapter->fw_done); |
| 4696 | } |
| 4697 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4698 | static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) |
| 4699 | { |
| 4700 | struct device *dev = &adapter->vdev->dev; |
| 4701 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4702 | int i; |
| 4703 | |
| 4704 | dma_unmap_single(dev, adapter->ip_offload_tok, |
| 4705 | sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); |
| 4706 | |
| 4707 | netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); |
| 4708 | for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) |
| 4709 | netdev_dbg(adapter->netdev, "%016lx\n", |
Lijun Pan | 429aa36 | 2021-02-11 00:43:18 -0600 | [diff] [blame] | 4710 | ((unsigned long *)(buf))[i]); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4711 | |
| 4712 | netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); |
| 4713 | netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); |
| 4714 | netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", |
| 4715 | buf->tcp_ipv4_chksum); |
| 4716 | netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", |
| 4717 | buf->tcp_ipv6_chksum); |
| 4718 | netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", |
| 4719 | buf->udp_ipv4_chksum); |
| 4720 | netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", |
| 4721 | buf->udp_ipv6_chksum); |
| 4722 | netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", |
| 4723 | buf->large_tx_ipv4); |
| 4724 | netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", |
| 4725 | buf->large_tx_ipv6); |
| 4726 | netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", |
| 4727 | buf->large_rx_ipv4); |
| 4728 | netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", |
| 4729 | buf->large_rx_ipv6); |
| 4730 | netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", |
| 4731 | buf->max_ipv4_header_size); |
| 4732 | netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", |
| 4733 | buf->max_ipv6_header_size); |
| 4734 | netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", |
| 4735 | buf->max_tcp_header_size); |
| 4736 | netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", |
| 4737 | buf->max_udp_header_size); |
| 4738 | netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", |
| 4739 | buf->max_large_tx_size); |
| 4740 | netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", |
| 4741 | buf->max_large_rx_size); |
| 4742 | netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", |
| 4743 | buf->ipv6_extension_header); |
| 4744 | netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", |
| 4745 | buf->tcp_pseudosum_req); |
| 4746 | netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", |
| 4747 | buf->num_ipv6_ext_headers); |
| 4748 | netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", |
| 4749 | buf->off_ipv6_ext_headers); |
| 4750 | |
Lijun Pan | 46899bd | 2020-09-27 20:13:30 -0500 | [diff] [blame] | 4751 | send_control_ip_offload(adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4752 | } |
| 4753 | |
Thomas Falcon | c9008d3 | 2018-08-06 21:39:59 -0500 | [diff] [blame] | 4754 | static const char *ibmvnic_fw_err_cause(u16 cause) |
| 4755 | { |
| 4756 | switch (cause) { |
| 4757 | case ADAPTER_PROBLEM: |
| 4758 | return "adapter problem"; |
| 4759 | case BUS_PROBLEM: |
| 4760 | return "bus problem"; |
| 4761 | case FW_PROBLEM: |
| 4762 | return "firmware problem"; |
| 4763 | case DD_PROBLEM: |
| 4764 | return "device driver problem"; |
| 4765 | case EEH_RECOVERY: |
| 4766 | return "EEH recovery"; |
| 4767 | case FW_UPDATED: |
| 4768 | return "firmware updated"; |
| 4769 | case LOW_MEMORY: |
| 4770 | return "low Memory"; |
| 4771 | default: |
| 4772 | return "unknown"; |
| 4773 | } |
| 4774 | } |
| 4775 | |
Nathan Fontenot | 2f9de9b | 2017-04-21 15:38:52 -0400 | [diff] [blame] | 4776 | static void handle_error_indication(union ibmvnic_crq *crq, |
| 4777 | struct ibmvnic_adapter *adapter) |
| 4778 | { |
| 4779 | struct device *dev = &adapter->vdev->dev; |
Thomas Falcon | c9008d3 | 2018-08-06 21:39:59 -0500 | [diff] [blame] | 4780 | u16 cause; |
Nathan Fontenot | 2f9de9b | 2017-04-21 15:38:52 -0400 | [diff] [blame] | 4781 | |
Thomas Falcon | c9008d3 | 2018-08-06 21:39:59 -0500 | [diff] [blame] | 4782 | cause = be16_to_cpu(crq->error_indication.error_cause); |
| 4783 | |
| 4784 | dev_warn_ratelimited(dev, |
| 4785 | "Firmware reports %serror, cause: %s. Starting recovery...\n", |
| 4786 | crq->error_indication.flags |
| 4787 | & IBMVNIC_FATAL_ERROR ? "FATAL " : "", |
| 4788 | ibmvnic_fw_err_cause(cause)); |
Nathan Fontenot | 2f9de9b | 2017-04-21 15:38:52 -0400 | [diff] [blame] | 4789 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 4790 | if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) |
| 4791 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
John Allen | 8cb31cf | 2017-05-26 10:30:37 -0400 | [diff] [blame] | 4792 | else |
| 4793 | ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4794 | } |
| 4795 | |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 4796 | static int handle_change_mac_rsp(union ibmvnic_crq *crq, |
| 4797 | struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4798 | { |
| 4799 | struct net_device *netdev = adapter->netdev; |
| 4800 | struct device *dev = &adapter->vdev->dev; |
| 4801 | long rc; |
| 4802 | |
| 4803 | rc = crq->change_mac_addr_rsp.rc.code; |
| 4804 | if (rc) { |
| 4805 | dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 4806 | goto out; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4807 | } |
Lijun Pan | d9b0e59 | 2020-10-20 17:39:19 -0500 | [diff] [blame] | 4808 | /* crq->change_mac_addr.mac_addr is the requested one |
| 4809 | * crq->change_mac_addr_rsp.mac_addr is the returned valid one. |
| 4810 | */ |
Jakub Kicinski | f3956eb | 2021-10-01 14:32:23 -0700 | [diff] [blame] | 4811 | eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); |
Lijun Pan | d9b0e59 | 2020-10-20 17:39:19 -0500 | [diff] [blame] | 4812 | ether_addr_copy(adapter->mac_addr, |
| 4813 | &crq->change_mac_addr_rsp.mac_addr[0]); |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 4814 | out: |
| 4815 | complete(&adapter->fw_done); |
| 4816 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4817 | } |
| 4818 | |
| 4819 | static void handle_request_cap_rsp(union ibmvnic_crq *crq, |
| 4820 | struct ibmvnic_adapter *adapter) |
| 4821 | { |
| 4822 | struct device *dev = &adapter->vdev->dev; |
| 4823 | u64 *req_value; |
| 4824 | char *name; |
| 4825 | |
Thomas Falcon | 901e040 | 2017-02-15 12:17:59 -0600 | [diff] [blame] | 4826 | atomic_dec(&adapter->running_cap_crqs); |
Sukadev Bhattiprolu | 151b6a5c | 2022-01-21 18:59:19 -0800 | [diff] [blame] | 4827 | netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", |
| 4828 | atomic_read(&adapter->running_cap_crqs)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4829 | switch (be16_to_cpu(crq->request_capability_rsp.capability)) { |
| 4830 | case REQ_TX_QUEUES: |
| 4831 | req_value = &adapter->req_tx_queues; |
| 4832 | name = "tx"; |
| 4833 | break; |
| 4834 | case REQ_RX_QUEUES: |
| 4835 | req_value = &adapter->req_rx_queues; |
| 4836 | name = "rx"; |
| 4837 | break; |
| 4838 | case REQ_RX_ADD_QUEUES: |
| 4839 | req_value = &adapter->req_rx_add_queues; |
| 4840 | name = "rx_add"; |
| 4841 | break; |
| 4842 | case REQ_TX_ENTRIES_PER_SUBCRQ: |
| 4843 | req_value = &adapter->req_tx_entries_per_subcrq; |
| 4844 | name = "tx_entries_per_subcrq"; |
| 4845 | break; |
| 4846 | case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: |
| 4847 | req_value = &adapter->req_rx_add_entries_per_subcrq; |
| 4848 | name = "rx_add_entries_per_subcrq"; |
| 4849 | break; |
| 4850 | case REQ_MTU: |
| 4851 | req_value = &adapter->req_mtu; |
| 4852 | name = "mtu"; |
| 4853 | break; |
| 4854 | case PROMISC_REQUESTED: |
| 4855 | req_value = &adapter->promisc; |
| 4856 | name = "promisc"; |
| 4857 | break; |
| 4858 | default: |
| 4859 | dev_err(dev, "Got invalid cap request rsp %d\n", |
| 4860 | crq->request_capability.capability); |
| 4861 | return; |
| 4862 | } |
| 4863 | |
| 4864 | switch (crq->request_capability_rsp.rc.code) { |
| 4865 | case SUCCESS: |
| 4866 | break; |
| 4867 | case PARTIALSUCCESS: |
| 4868 | dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", |
| 4869 | *req_value, |
Lijun Pan | 914789a | 2021-02-11 00:43:21 -0600 | [diff] [blame] | 4870 | (long)be64_to_cpu(crq->request_capability_rsp.number), |
| 4871 | name); |
John Allen | e791380 | 2018-01-18 16:27:12 -0600 | [diff] [blame] | 4872 | |
| 4873 | if (be16_to_cpu(crq->request_capability_rsp.capability) == |
| 4874 | REQ_MTU) { |
| 4875 | pr_err("mtu of %llu is not supported. Reverting.\n", |
| 4876 | *req_value); |
| 4877 | *req_value = adapter->fallback.mtu; |
| 4878 | } else { |
| 4879 | *req_value = |
| 4880 | be64_to_cpu(crq->request_capability_rsp.number); |
| 4881 | } |
| 4882 | |
Lijun Pan | 09081b9 | 2020-09-27 20:13:27 -0500 | [diff] [blame] | 4883 | send_request_cap(adapter, 1); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4884 | return; |
| 4885 | default: |
| 4886 | dev_err(dev, "Error %d in request cap rsp\n", |
| 4887 | crq->request_capability_rsp.rc.code); |
| 4888 | return; |
| 4889 | } |
| 4890 | |
| 4891 | /* Done receiving requested capabilities, query IP offload support */ |
Sukadev Bhattiprolu | 3a5d9db | 2022-01-21 18:59:21 -0800 | [diff] [blame] | 4892 | if (atomic_read(&adapter->running_cap_crqs) == 0) |
Lijun Pan | 16e811f | 2020-09-27 20:13:29 -0500 | [diff] [blame] | 4893 | send_query_ip_offload(adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4894 | } |
| 4895 | |
| 4896 | static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, |
| 4897 | struct ibmvnic_adapter *adapter) |
| 4898 | { |
| 4899 | struct device *dev = &adapter->vdev->dev; |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 4900 | struct net_device *netdev = adapter->netdev; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4901 | struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; |
| 4902 | struct ibmvnic_login_buffer *login = adapter->login_buf; |
Cristobal Forno | f3ae59c | 2020-08-19 13:16:23 -0500 | [diff] [blame] | 4903 | u64 *tx_handle_array; |
| 4904 | u64 *rx_handle_array; |
| 4905 | int num_tx_pools; |
| 4906 | int num_rx_pools; |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 4907 | u64 *size_array; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4908 | int i; |
| 4909 | |
Sukadev Bhattiprolu | 76cdc5c | 2020-11-25 18:04:29 -0600 | [diff] [blame] | 4910 | /* CHECK: Test/set of login_pending does not need to be atomic |
| 4911 | * because only ibmvnic_tasklet tests/clears this. |
| 4912 | */ |
| 4913 | if (!adapter->login_pending) { |
| 4914 | netdev_warn(netdev, "Ignoring unexpected login response\n"); |
| 4915 | return 0; |
| 4916 | } |
| 4917 | adapter->login_pending = false; |
| 4918 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4919 | dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, |
Thomas Falcon | 37e40fa | 2018-04-06 18:37:02 -0500 | [diff] [blame] | 4920 | DMA_TO_DEVICE); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4921 | dma_unmap_single(dev, adapter->login_rsp_buf_token, |
Thomas Falcon | 37e40fa | 2018-04-06 18:37:02 -0500 | [diff] [blame] | 4922 | adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4923 | |
John Allen | 498cd8e | 2016-04-06 11:49:55 -0500 | [diff] [blame] | 4924 | /* If the number of queues requested can't be allocated by the |
| 4925 | * server, the login response will return with code 1. We will need |
| 4926 | * to resend the login buffer with fewer queues requested. |
| 4927 | */ |
| 4928 | if (login_rsp_crq->generic.rc.code) { |
Nathan Fontenot | 64d92aa | 2018-04-11 10:09:32 -0500 | [diff] [blame] | 4929 | adapter->init_done_rc = login_rsp_crq->generic.rc.code; |
John Allen | 498cd8e | 2016-04-06 11:49:55 -0500 | [diff] [blame] | 4930 | complete(&adapter->init_done); |
| 4931 | return 0; |
| 4932 | } |
| 4933 | |
Sukadev Bhattiprolu | d437f5a | 2021-09-07 22:07:03 -0700 | [diff] [blame] | 4934 | if (adapter->failover_pending) { |
| 4935 | adapter->init_done_rc = -EAGAIN; |
| 4936 | netdev_dbg(netdev, "Failover pending, ignoring login response\n"); |
| 4937 | complete(&adapter->init_done); |
| 4938 | /* login response buffer will be released on reset */ |
| 4939 | return 0; |
| 4940 | } |
| 4941 | |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 4942 | netdev->mtu = adapter->req_mtu - ETH_HLEN; |
| 4943 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4944 | netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); |
| 4945 | for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { |
| 4946 | netdev_dbg(adapter->netdev, "%016lx\n", |
Lijun Pan | 429aa36 | 2021-02-11 00:43:18 -0600 | [diff] [blame] | 4947 | ((unsigned long *)(adapter->login_rsp_buf))[i]); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4948 | } |
| 4949 | |
| 4950 | /* Sanity checks */ |
| 4951 | if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || |
| 4952 | (be32_to_cpu(login->num_rxcomp_subcrqs) * |
| 4953 | adapter->req_rx_add_queues != |
| 4954 | be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { |
| 4955 | dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); |
Dany Madden | 31d6b40 | 2020-11-25 18:04:24 -0600 | [diff] [blame] | 4956 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4957 | return -EIO; |
| 4958 | } |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 4959 | size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
| 4960 | be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); |
| 4961 | /* variable buffer sizes are not supported, so just read the |
| 4962 | * first entry. |
| 4963 | */ |
| 4964 | adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); |
Cristobal Forno | f3ae59c | 2020-08-19 13:16:23 -0500 | [diff] [blame] | 4965 | |
| 4966 | num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); |
| 4967 | num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
| 4968 | |
| 4969 | tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
| 4970 | be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); |
| 4971 | rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
| 4972 | be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); |
| 4973 | |
| 4974 | for (i = 0; i < num_tx_pools; i++) |
| 4975 | adapter->tx_scrq[i]->handle = tx_handle_array[i]; |
| 4976 | |
| 4977 | for (i = 0; i < num_rx_pools; i++) |
| 4978 | adapter->rx_scrq[i]->handle = rx_handle_array[i]; |
| 4979 | |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 4980 | adapter->num_active_tx_scrqs = num_tx_pools; |
| 4981 | adapter->num_active_rx_scrqs = num_rx_pools; |
Cristobal Forno | f3ae59c | 2020-08-19 13:16:23 -0500 | [diff] [blame] | 4982 | release_login_rsp_buffer(adapter); |
Thomas Falcon | a2c0f03 | 2018-02-21 18:18:30 -0600 | [diff] [blame] | 4983 | release_login_buffer(adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4984 | complete(&adapter->init_done); |
| 4985 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4986 | return 0; |
| 4987 | } |
| 4988 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 4989 | static void handle_request_unmap_rsp(union ibmvnic_crq *crq, |
| 4990 | struct ibmvnic_adapter *adapter) |
| 4991 | { |
| 4992 | struct device *dev = &adapter->vdev->dev; |
| 4993 | long rc; |
| 4994 | |
| 4995 | rc = crq->request_unmap_rsp.rc.code; |
| 4996 | if (rc) |
| 4997 | dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); |
| 4998 | } |
| 4999 | |
| 5000 | static void handle_query_map_rsp(union ibmvnic_crq *crq, |
| 5001 | struct ibmvnic_adapter *adapter) |
| 5002 | { |
| 5003 | struct net_device *netdev = adapter->netdev; |
| 5004 | struct device *dev = &adapter->vdev->dev; |
| 5005 | long rc; |
| 5006 | |
| 5007 | rc = crq->query_map_rsp.rc.code; |
| 5008 | if (rc) { |
| 5009 | dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); |
| 5010 | return; |
| 5011 | } |
Sukadev Bhattiprolu | 0f2bf31 | 2021-09-14 20:52:52 -0700 | [diff] [blame] | 5012 | netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", |
| 5013 | crq->query_map_rsp.page_size, |
| 5014 | __be32_to_cpu(crq->query_map_rsp.tot_pages), |
| 5015 | __be32_to_cpu(crq->query_map_rsp.free_pages)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5016 | } |
| 5017 | |
| 5018 | static void handle_query_cap_rsp(union ibmvnic_crq *crq, |
| 5019 | struct ibmvnic_adapter *adapter) |
| 5020 | { |
| 5021 | struct net_device *netdev = adapter->netdev; |
| 5022 | struct device *dev = &adapter->vdev->dev; |
| 5023 | long rc; |
| 5024 | |
Thomas Falcon | 901e040 | 2017-02-15 12:17:59 -0600 | [diff] [blame] | 5025 | atomic_dec(&adapter->running_cap_crqs); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5026 | netdev_dbg(netdev, "Outstanding queries: %d\n", |
Thomas Falcon | 901e040 | 2017-02-15 12:17:59 -0600 | [diff] [blame] | 5027 | atomic_read(&adapter->running_cap_crqs)); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5028 | rc = crq->query_capability.rc.code; |
| 5029 | if (rc) { |
| 5030 | dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); |
| 5031 | goto out; |
| 5032 | } |
| 5033 | |
| 5034 | switch (be16_to_cpu(crq->query_capability.capability)) { |
| 5035 | case MIN_TX_QUEUES: |
| 5036 | adapter->min_tx_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5037 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5038 | netdev_dbg(netdev, "min_tx_queues = %lld\n", |
| 5039 | adapter->min_tx_queues); |
| 5040 | break; |
| 5041 | case MIN_RX_QUEUES: |
| 5042 | adapter->min_rx_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5043 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5044 | netdev_dbg(netdev, "min_rx_queues = %lld\n", |
| 5045 | adapter->min_rx_queues); |
| 5046 | break; |
| 5047 | case MIN_RX_ADD_QUEUES: |
| 5048 | adapter->min_rx_add_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5049 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5050 | netdev_dbg(netdev, "min_rx_add_queues = %lld\n", |
| 5051 | adapter->min_rx_add_queues); |
| 5052 | break; |
| 5053 | case MAX_TX_QUEUES: |
| 5054 | adapter->max_tx_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5055 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5056 | netdev_dbg(netdev, "max_tx_queues = %lld\n", |
| 5057 | adapter->max_tx_queues); |
| 5058 | break; |
| 5059 | case MAX_RX_QUEUES: |
| 5060 | adapter->max_rx_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5061 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5062 | netdev_dbg(netdev, "max_rx_queues = %lld\n", |
| 5063 | adapter->max_rx_queues); |
| 5064 | break; |
| 5065 | case MAX_RX_ADD_QUEUES: |
| 5066 | adapter->max_rx_add_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5067 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5068 | netdev_dbg(netdev, "max_rx_add_queues = %lld\n", |
| 5069 | adapter->max_rx_add_queues); |
| 5070 | break; |
| 5071 | case MIN_TX_ENTRIES_PER_SUBCRQ: |
| 5072 | adapter->min_tx_entries_per_subcrq = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5073 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5074 | netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", |
| 5075 | adapter->min_tx_entries_per_subcrq); |
| 5076 | break; |
| 5077 | case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: |
| 5078 | adapter->min_rx_add_entries_per_subcrq = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5079 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5080 | netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", |
| 5081 | adapter->min_rx_add_entries_per_subcrq); |
| 5082 | break; |
| 5083 | case MAX_TX_ENTRIES_PER_SUBCRQ: |
| 5084 | adapter->max_tx_entries_per_subcrq = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5085 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5086 | netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", |
| 5087 | adapter->max_tx_entries_per_subcrq); |
| 5088 | break; |
| 5089 | case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: |
| 5090 | adapter->max_rx_add_entries_per_subcrq = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5091 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5092 | netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", |
| 5093 | adapter->max_rx_add_entries_per_subcrq); |
| 5094 | break; |
| 5095 | case TCP_IP_OFFLOAD: |
| 5096 | adapter->tcp_ip_offload = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5097 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5098 | netdev_dbg(netdev, "tcp_ip_offload = %lld\n", |
| 5099 | adapter->tcp_ip_offload); |
| 5100 | break; |
| 5101 | case PROMISC_SUPPORTED: |
| 5102 | adapter->promisc_supported = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5103 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5104 | netdev_dbg(netdev, "promisc_supported = %lld\n", |
| 5105 | adapter->promisc_supported); |
| 5106 | break; |
| 5107 | case MIN_MTU: |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5108 | adapter->min_mtu = be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | f39f0d1 | 2017-02-14 10:22:59 -0600 | [diff] [blame] | 5109 | netdev->min_mtu = adapter->min_mtu - ETH_HLEN; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5110 | netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); |
| 5111 | break; |
| 5112 | case MAX_MTU: |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5113 | adapter->max_mtu = be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | f39f0d1 | 2017-02-14 10:22:59 -0600 | [diff] [blame] | 5114 | netdev->max_mtu = adapter->max_mtu - ETH_HLEN; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5115 | netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); |
| 5116 | break; |
| 5117 | case MAX_MULTICAST_FILTERS: |
| 5118 | adapter->max_multicast_filters = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5119 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5120 | netdev_dbg(netdev, "max_multicast_filters = %lld\n", |
| 5121 | adapter->max_multicast_filters); |
| 5122 | break; |
| 5123 | case VLAN_HEADER_INSERTION: |
| 5124 | adapter->vlan_header_insertion = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5125 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5126 | if (adapter->vlan_header_insertion) |
| 5127 | netdev->features |= NETIF_F_HW_VLAN_STAG_TX; |
| 5128 | netdev_dbg(netdev, "vlan_header_insertion = %lld\n", |
| 5129 | adapter->vlan_header_insertion); |
| 5130 | break; |
Murilo Fossa Vicentini | 6052d5e | 2017-04-21 15:38:46 -0400 | [diff] [blame] | 5131 | case RX_VLAN_HEADER_INSERTION: |
| 5132 | adapter->rx_vlan_header_insertion = |
| 5133 | be64_to_cpu(crq->query_capability.number); |
| 5134 | netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", |
| 5135 | adapter->rx_vlan_header_insertion); |
| 5136 | break; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5137 | case MAX_TX_SG_ENTRIES: |
| 5138 | adapter->max_tx_sg_entries = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5139 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5140 | netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", |
| 5141 | adapter->max_tx_sg_entries); |
| 5142 | break; |
| 5143 | case RX_SG_SUPPORTED: |
| 5144 | adapter->rx_sg_supported = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5145 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5146 | netdev_dbg(netdev, "rx_sg_supported = %lld\n", |
| 5147 | adapter->rx_sg_supported); |
| 5148 | break; |
| 5149 | case OPT_TX_COMP_SUB_QUEUES: |
| 5150 | adapter->opt_tx_comp_sub_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5151 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5152 | netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", |
| 5153 | adapter->opt_tx_comp_sub_queues); |
| 5154 | break; |
| 5155 | case OPT_RX_COMP_QUEUES: |
| 5156 | adapter->opt_rx_comp_queues = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5157 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5158 | netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", |
| 5159 | adapter->opt_rx_comp_queues); |
| 5160 | break; |
| 5161 | case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: |
| 5162 | adapter->opt_rx_bufadd_q_per_rx_comp_q = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5163 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5164 | netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", |
| 5165 | adapter->opt_rx_bufadd_q_per_rx_comp_q); |
| 5166 | break; |
| 5167 | case OPT_TX_ENTRIES_PER_SUBCRQ: |
| 5168 | adapter->opt_tx_entries_per_subcrq = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5169 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5170 | netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", |
| 5171 | adapter->opt_tx_entries_per_subcrq); |
| 5172 | break; |
| 5173 | case OPT_RXBA_ENTRIES_PER_SUBCRQ: |
| 5174 | adapter->opt_rxba_entries_per_subcrq = |
Thomas Falcon | de89e85 | 2016-03-01 10:20:09 -0600 | [diff] [blame] | 5175 | be64_to_cpu(crq->query_capability.number); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5176 | netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", |
| 5177 | adapter->opt_rxba_entries_per_subcrq); |
| 5178 | break; |
| 5179 | case TX_RX_DESC_REQ: |
| 5180 | adapter->tx_rx_desc_req = crq->query_capability.number; |
| 5181 | netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", |
| 5182 | adapter->tx_rx_desc_req); |
| 5183 | break; |
| 5184 | |
| 5185 | default: |
| 5186 | netdev_err(netdev, "Got invalid cap rsp %d\n", |
| 5187 | crq->query_capability.capability); |
| 5188 | } |
| 5189 | |
| 5190 | out: |
Sukadev Bhattiprolu | 3a5d9db | 2022-01-21 18:59:21 -0800 | [diff] [blame] | 5191 | if (atomic_read(&adapter->running_cap_crqs) == 0) |
Lijun Pan | 09081b9 | 2020-09-27 20:13:27 -0500 | [diff] [blame] | 5192 | send_request_cap(adapter, 0); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5193 | } |
| 5194 | |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5195 | static int send_query_phys_parms(struct ibmvnic_adapter *adapter) |
| 5196 | { |
| 5197 | union ibmvnic_crq crq; |
| 5198 | int rc; |
| 5199 | |
| 5200 | memset(&crq, 0, sizeof(crq)); |
| 5201 | crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; |
| 5202 | crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5203 | |
| 5204 | mutex_lock(&adapter->fw_lock); |
| 5205 | adapter->fw_done_rc = 0; |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 5206 | reinit_completion(&adapter->fw_done); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5207 | |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5208 | rc = ibmvnic_send_crq(adapter, &crq); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5209 | if (rc) { |
| 5210 | mutex_unlock(&adapter->fw_lock); |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5211 | return rc; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5212 | } |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 5213 | |
| 5214 | rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5215 | if (rc) { |
| 5216 | mutex_unlock(&adapter->fw_lock); |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 5217 | return rc; |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5218 | } |
Thomas Falcon | 476d96c | 2019-11-25 17:12:55 -0600 | [diff] [blame] | 5219 | |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5220 | mutex_unlock(&adapter->fw_lock); |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5221 | return adapter->fw_done_rc ? -EIO : 0; |
| 5222 | } |
| 5223 | |
| 5224 | static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, |
| 5225 | struct ibmvnic_adapter *adapter) |
| 5226 | { |
| 5227 | struct net_device *netdev = adapter->netdev; |
| 5228 | int rc; |
Murilo Fossa Vicentini | dd0f9d8 | 2019-09-16 11:50:37 -0300 | [diff] [blame] | 5229 | __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5230 | |
| 5231 | rc = crq->query_phys_parms_rsp.rc.code; |
| 5232 | if (rc) { |
| 5233 | netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); |
| 5234 | return rc; |
| 5235 | } |
Murilo Fossa Vicentini | dd0f9d8 | 2019-09-16 11:50:37 -0300 | [diff] [blame] | 5236 | switch (rspeed) { |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5237 | case IBMVNIC_10MBPS: |
| 5238 | adapter->speed = SPEED_10; |
| 5239 | break; |
| 5240 | case IBMVNIC_100MBPS: |
| 5241 | adapter->speed = SPEED_100; |
| 5242 | break; |
| 5243 | case IBMVNIC_1GBPS: |
| 5244 | adapter->speed = SPEED_1000; |
| 5245 | break; |
Lijun Pan | b9cd795 | 2020-09-27 19:06:25 -0500 | [diff] [blame] | 5246 | case IBMVNIC_10GBPS: |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5247 | adapter->speed = SPEED_10000; |
| 5248 | break; |
| 5249 | case IBMVNIC_25GBPS: |
| 5250 | adapter->speed = SPEED_25000; |
| 5251 | break; |
| 5252 | case IBMVNIC_40GBPS: |
| 5253 | adapter->speed = SPEED_40000; |
| 5254 | break; |
| 5255 | case IBMVNIC_50GBPS: |
| 5256 | adapter->speed = SPEED_50000; |
| 5257 | break; |
| 5258 | case IBMVNIC_100GBPS: |
| 5259 | adapter->speed = SPEED_100000; |
| 5260 | break; |
Lijun Pan | b9cd795 | 2020-09-27 19:06:25 -0500 | [diff] [blame] | 5261 | case IBMVNIC_200GBPS: |
| 5262 | adapter->speed = SPEED_200000; |
| 5263 | break; |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5264 | default: |
Murilo Fossa Vicentini | dd0f9d8 | 2019-09-16 11:50:37 -0300 | [diff] [blame] | 5265 | if (netif_carrier_ok(netdev)) |
| 5266 | netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5267 | adapter->speed = SPEED_UNKNOWN; |
| 5268 | } |
| 5269 | if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) |
| 5270 | adapter->duplex = DUPLEX_FULL; |
| 5271 | else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) |
| 5272 | adapter->duplex = DUPLEX_HALF; |
| 5273 | else |
| 5274 | adapter->duplex = DUPLEX_UNKNOWN; |
| 5275 | |
| 5276 | return rc; |
| 5277 | } |
| 5278 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5279 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, |
| 5280 | struct ibmvnic_adapter *adapter) |
| 5281 | { |
| 5282 | struct ibmvnic_generic_crq *gen_crq = &crq->generic; |
| 5283 | struct net_device *netdev = adapter->netdev; |
| 5284 | struct device *dev = &adapter->vdev->dev; |
Murilo Fossa Vicentini | 993a82b | 2017-04-19 13:44:35 -0400 | [diff] [blame] | 5285 | u64 *u64_crq = (u64 *)crq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5286 | long rc; |
| 5287 | |
| 5288 | netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", |
Lijun Pan | 429aa36 | 2021-02-11 00:43:18 -0600 | [diff] [blame] | 5289 | (unsigned long)cpu_to_be64(u64_crq[0]), |
| 5290 | (unsigned long)cpu_to_be64(u64_crq[1])); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5291 | switch (gen_crq->first) { |
| 5292 | case IBMVNIC_CRQ_INIT_RSP: |
| 5293 | switch (gen_crq->cmd) { |
| 5294 | case IBMVNIC_CRQ_INIT: |
| 5295 | dev_info(dev, "Partner initialized\n"); |
John Allen | 017892c1 | 2017-05-26 10:30:19 -0400 | [diff] [blame] | 5296 | adapter->from_passive_init = true; |
Sukadev Bhattiprolu | 76cdc5c | 2020-11-25 18:04:29 -0600 | [diff] [blame] | 5297 | /* Discard any stale login responses from prev reset. |
| 5298 | * CHECK: should we clear even on INIT_COMPLETE? |
| 5299 | */ |
| 5300 | adapter->login_pending = false; |
| 5301 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 5302 | if (adapter->state == VNIC_DOWN) |
| 5303 | rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); |
| 5304 | else |
| 5305 | rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); |
| 5306 | |
Sukadev Bhattiprolu | ef66a1e | 2021-02-02 21:08:02 -0800 | [diff] [blame] | 5307 | if (rc && rc != -EBUSY) { |
| 5308 | /* We were unable to schedule the failover |
| 5309 | * reset either because the adapter was still |
| 5310 | * probing (eg: during kexec) or we could not |
| 5311 | * allocate memory. Clear the failover_pending |
| 5312 | * flag since no one else will. We ignore |
| 5313 | * EBUSY because it means either FAILOVER reset |
| 5314 | * is already scheduled or the adapter is |
| 5315 | * being removed. |
| 5316 | */ |
| 5317 | netdev_err(netdev, |
| 5318 | "Error %ld scheduling failover reset\n", |
| 5319 | rc); |
| 5320 | adapter->failover_pending = false; |
| 5321 | } |
Sukadev Bhattiprolu | 6b278c0 | 2021-10-29 15:03:16 -0700 | [diff] [blame] | 5322 | |
| 5323 | if (!completion_done(&adapter->init_done)) { |
| 5324 | complete(&adapter->init_done); |
| 5325 | if (!adapter->init_done_rc) |
| 5326 | adapter->init_done_rc = -EAGAIN; |
| 5327 | } |
| 5328 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5329 | break; |
| 5330 | case IBMVNIC_CRQ_INIT_COMPLETE: |
| 5331 | dev_info(dev, "Partner initialization complete\n"); |
Thomas Falcon | 5153698 | 2018-05-23 13:37:56 -0500 | [diff] [blame] | 5332 | adapter->crq.active = true; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5333 | send_version_xchg(adapter); |
| 5334 | break; |
| 5335 | default: |
| 5336 | dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); |
| 5337 | } |
| 5338 | return; |
| 5339 | case IBMVNIC_CRQ_XPORT_EVENT: |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 5340 | netif_carrier_off(netdev); |
Thomas Falcon | 5153698 | 2018-05-23 13:37:56 -0500 | [diff] [blame] | 5341 | adapter->crq.active = false; |
Thomas Falcon | 2147e3d | 2019-11-25 17:12:54 -0600 | [diff] [blame] | 5342 | /* terminate any thread waiting for a response |
| 5343 | * from the device |
| 5344 | */ |
| 5345 | if (!completion_done(&adapter->fw_done)) { |
| 5346 | adapter->fw_done_rc = -EIO; |
| 5347 | complete(&adapter->fw_done); |
| 5348 | } |
| 5349 | if (!completion_done(&adapter->stats_done)) |
| 5350 | complete(&adapter->stats_done); |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 5351 | if (test_bit(0, &adapter->resetting)) |
Thomas Falcon | 2770a79 | 2018-05-23 13:38:02 -0500 | [diff] [blame] | 5352 | adapter->force_reset_recovery = true; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5353 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 5354 | dev_info(dev, "Migrated, re-enabling adapter\n"); |
| 5355 | ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); |
Thomas Falcon | dfad09a | 2016-08-18 11:37:51 -0500 | [diff] [blame] | 5356 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { |
| 5357 | dev_info(dev, "Backing device failover detected\n"); |
Thomas Falcon | 5a18e1e | 2018-04-06 18:37:05 -0500 | [diff] [blame] | 5358 | adapter->failover_pending = true; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5359 | } else { |
| 5360 | /* The adapter lost the connection */ |
| 5361 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", |
| 5362 | gen_crq->cmd); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 5363 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5364 | } |
| 5365 | return; |
| 5366 | case IBMVNIC_CRQ_CMD_RSP: |
| 5367 | break; |
| 5368 | default: |
| 5369 | dev_err(dev, "Got an invalid msg type 0x%02x\n", |
| 5370 | gen_crq->first); |
| 5371 | return; |
| 5372 | } |
| 5373 | |
| 5374 | switch (gen_crq->cmd) { |
| 5375 | case VERSION_EXCHANGE_RSP: |
| 5376 | rc = crq->version_exchange_rsp.rc.code; |
| 5377 | if (rc) { |
| 5378 | dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); |
| 5379 | break; |
| 5380 | } |
Thomas Falcon | 7846889 | 2020-05-28 11:19:17 -0500 | [diff] [blame] | 5381 | ibmvnic_version = |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5382 | be16_to_cpu(crq->version_exchange_rsp.version); |
Thomas Falcon | 7846889 | 2020-05-28 11:19:17 -0500 | [diff] [blame] | 5383 | dev_info(dev, "Partner protocol version is %d\n", |
| 5384 | ibmvnic_version); |
Lijun Pan | 491099a | 2020-09-27 20:13:26 -0500 | [diff] [blame] | 5385 | send_query_cap(adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5386 | break; |
| 5387 | case QUERY_CAPABILITY_RSP: |
| 5388 | handle_query_cap_rsp(crq, adapter); |
| 5389 | break; |
| 5390 | case QUERY_MAP_RSP: |
| 5391 | handle_query_map_rsp(crq, adapter); |
| 5392 | break; |
| 5393 | case REQUEST_MAP_RSP: |
Thomas Falcon | f3be0cb | 2017-06-21 14:53:01 -0500 | [diff] [blame] | 5394 | adapter->fw_done_rc = crq->request_map_rsp.rc.code; |
| 5395 | complete(&adapter->fw_done); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5396 | break; |
| 5397 | case REQUEST_UNMAP_RSP: |
| 5398 | handle_request_unmap_rsp(crq, adapter); |
| 5399 | break; |
| 5400 | case REQUEST_CAPABILITY_RSP: |
| 5401 | handle_request_cap_rsp(crq, adapter); |
| 5402 | break; |
| 5403 | case LOGIN_RSP: |
| 5404 | netdev_dbg(netdev, "Got Login Response\n"); |
| 5405 | handle_login_rsp(crq, adapter); |
| 5406 | break; |
| 5407 | case LOGICAL_LINK_STATE_RSP: |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 5408 | netdev_dbg(netdev, |
| 5409 | "Got Logical Link State Response, state: %d rc: %d\n", |
| 5410 | crq->logical_link_state_rsp.link_state, |
| 5411 | crq->logical_link_state_rsp.rc.code); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5412 | adapter->logical_link_state = |
| 5413 | crq->logical_link_state_rsp.link_state; |
Nathan Fontenot | 53da09e | 2017-04-21 15:39:04 -0400 | [diff] [blame] | 5414 | adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; |
| 5415 | complete(&adapter->init_done); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5416 | break; |
| 5417 | case LINK_STATE_INDICATION: |
| 5418 | netdev_dbg(netdev, "Got Logical Link State Indication\n"); |
| 5419 | adapter->phys_link_state = |
| 5420 | crq->link_state_indication.phys_link_state; |
| 5421 | adapter->logical_link_state = |
| 5422 | crq->link_state_indication.logical_link_state; |
Thomas Falcon | 0655f99 | 2019-05-09 23:13:44 -0500 | [diff] [blame] | 5423 | if (adapter->phys_link_state && adapter->logical_link_state) |
| 5424 | netif_carrier_on(netdev); |
| 5425 | else |
| 5426 | netif_carrier_off(netdev); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5427 | break; |
| 5428 | case CHANGE_MAC_ADDR_RSP: |
| 5429 | netdev_dbg(netdev, "Got MAC address change Response\n"); |
Thomas Falcon | f813614 | 2018-01-29 13:45:05 -0600 | [diff] [blame] | 5430 | adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5431 | break; |
| 5432 | case ERROR_INDICATION: |
| 5433 | netdev_dbg(netdev, "Got Error Indication\n"); |
| 5434 | handle_error_indication(crq, adapter); |
| 5435 | break; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5436 | case REQUEST_STATISTICS_RSP: |
| 5437 | netdev_dbg(netdev, "Got Statistics Response\n"); |
| 5438 | complete(&adapter->stats_done); |
| 5439 | break; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5440 | case QUERY_IP_OFFLOAD_RSP: |
| 5441 | netdev_dbg(netdev, "Got Query IP offload Response\n"); |
| 5442 | handle_query_ip_offload_rsp(adapter); |
| 5443 | break; |
| 5444 | case MULTICAST_CTRL_RSP: |
| 5445 | netdev_dbg(netdev, "Got multicast control Response\n"); |
| 5446 | break; |
| 5447 | case CONTROL_IP_OFFLOAD_RSP: |
| 5448 | netdev_dbg(netdev, "Got Control IP offload Response\n"); |
| 5449 | dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, |
| 5450 | sizeof(adapter->ip_offload_ctrl), |
| 5451 | DMA_TO_DEVICE); |
John Allen | bd0b672 | 2017-03-17 17:13:40 -0500 | [diff] [blame] | 5452 | complete(&adapter->init_done); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5453 | break; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5454 | case COLLECT_FW_TRACE_RSP: |
| 5455 | netdev_dbg(netdev, "Got Collect firmware trace Response\n"); |
| 5456 | complete(&adapter->fw_done); |
| 5457 | break; |
Desnes Augusto Nunes do Rosario | 4e6759b | 2017-11-13 15:59:19 -0200 | [diff] [blame] | 5458 | case GET_VPD_SIZE_RSP: |
| 5459 | handle_vpd_size_rsp(crq, adapter); |
| 5460 | break; |
| 5461 | case GET_VPD_RSP: |
| 5462 | handle_vpd_rsp(crq, adapter); |
| 5463 | break; |
Murilo Fossa Vicentini | f8d6ae0 | 2019-03-19 10:28:51 -0300 | [diff] [blame] | 5464 | case QUERY_PHYS_PARMS_RSP: |
| 5465 | adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); |
| 5466 | complete(&adapter->fw_done); |
| 5467 | break; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5468 | default: |
| 5469 | netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", |
| 5470 | gen_crq->cmd); |
| 5471 | } |
| 5472 | } |
| 5473 | |
| 5474 | static irqreturn_t ibmvnic_interrupt(int irq, void *instance) |
| 5475 | { |
| 5476 | struct ibmvnic_adapter *adapter = instance; |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5477 | |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5478 | tasklet_schedule(&adapter->tasklet); |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5479 | return IRQ_HANDLED; |
| 5480 | } |
| 5481 | |
Allen Pais | aa7c3fe | 2020-09-14 12:59:29 +0530 | [diff] [blame] | 5482 | static void ibmvnic_tasklet(struct tasklet_struct *t) |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5483 | { |
Allen Pais | aa7c3fe | 2020-09-14 12:59:29 +0530 | [diff] [blame] | 5484 | struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5485 | struct ibmvnic_crq_queue *queue = &adapter->crq; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5486 | union ibmvnic_crq *crq; |
| 5487 | unsigned long flags; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5488 | |
| 5489 | spin_lock_irqsave(&queue->lock, flags); |
Sukadev Bhattiprolu | 3a5d9db | 2022-01-21 18:59:21 -0800 | [diff] [blame] | 5490 | |
| 5491 | /* Pull all the valid messages off the CRQ */ |
| 5492 | while ((crq = ibmvnic_next_crq(adapter)) != NULL) { |
| 5493 | /* This barrier makes sure ibmvnic_next_crq()'s |
| 5494 | * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded |
| 5495 | * before ibmvnic_handle_crq()'s |
| 5496 | * switch(gen_crq->first) and switch(gen_crq->cmd). |
| 5497 | */ |
| 5498 | dma_rmb(); |
| 5499 | ibmvnic_handle_crq(crq, adapter); |
| 5500 | crq->generic.first = 0; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5501 | } |
Sukadev Bhattiprolu | 3a5d9db | 2022-01-21 18:59:21 -0800 | [diff] [blame] | 5502 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5503 | spin_unlock_irqrestore(&queue->lock, flags); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5504 | } |
| 5505 | |
| 5506 | static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) |
| 5507 | { |
| 5508 | struct vio_dev *vdev = adapter->vdev; |
| 5509 | int rc; |
| 5510 | |
| 5511 | do { |
| 5512 | rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); |
| 5513 | } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
| 5514 | |
| 5515 | if (rc) |
| 5516 | dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); |
| 5517 | |
| 5518 | return rc; |
| 5519 | } |
| 5520 | |
| 5521 | static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) |
| 5522 | { |
| 5523 | struct ibmvnic_crq_queue *crq = &adapter->crq; |
| 5524 | struct device *dev = &adapter->vdev->dev; |
| 5525 | struct vio_dev *vdev = adapter->vdev; |
| 5526 | int rc; |
| 5527 | |
| 5528 | /* Close the CRQ */ |
| 5529 | do { |
| 5530 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); |
| 5531 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
| 5532 | |
| 5533 | /* Clean out the queue */ |
Lijun Pan | 0e435be | 2020-11-23 13:35:46 -0600 | [diff] [blame] | 5534 | if (!crq->msgs) |
| 5535 | return -EINVAL; |
| 5536 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5537 | memset(crq->msgs, 0, PAGE_SIZE); |
| 5538 | crq->cur = 0; |
Thomas Falcon | 5153698 | 2018-05-23 13:37:56 -0500 | [diff] [blame] | 5539 | crq->active = false; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5540 | |
| 5541 | /* And re-open it again */ |
| 5542 | rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, |
| 5543 | crq->msg_token, PAGE_SIZE); |
| 5544 | |
| 5545 | if (rc == H_CLOSED) |
| 5546 | /* Adapter is good, but other end is not ready */ |
| 5547 | dev_warn(dev, "Partner adapter not ready\n"); |
| 5548 | else if (rc != 0) |
| 5549 | dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); |
| 5550 | |
| 5551 | return rc; |
| 5552 | } |
| 5553 | |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 5554 | static void release_crq_queue(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5555 | { |
| 5556 | struct ibmvnic_crq_queue *crq = &adapter->crq; |
| 5557 | struct vio_dev *vdev = adapter->vdev; |
| 5558 | long rc; |
| 5559 | |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 5560 | if (!crq->msgs) |
| 5561 | return; |
| 5562 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5563 | netdev_dbg(adapter->netdev, "Releasing CRQ\n"); |
| 5564 | free_irq(vdev->irq, adapter); |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5565 | tasklet_kill(&adapter->tasklet); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5566 | do { |
| 5567 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); |
| 5568 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
| 5569 | |
| 5570 | dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, |
| 5571 | DMA_BIDIRECTIONAL); |
| 5572 | free_page((unsigned long)crq->msgs); |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 5573 | crq->msgs = NULL; |
Thomas Falcon | 5153698 | 2018-05-23 13:37:56 -0500 | [diff] [blame] | 5574 | crq->active = false; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5575 | } |
| 5576 | |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 5577 | static int init_crq_queue(struct ibmvnic_adapter *adapter) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5578 | { |
| 5579 | struct ibmvnic_crq_queue *crq = &adapter->crq; |
| 5580 | struct device *dev = &adapter->vdev->dev; |
| 5581 | struct vio_dev *vdev = adapter->vdev; |
| 5582 | int rc, retrc = -ENOMEM; |
| 5583 | |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 5584 | if (crq->msgs) |
| 5585 | return 0; |
| 5586 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5587 | crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); |
| 5588 | /* Should we allocate more than one page? */ |
| 5589 | |
| 5590 | if (!crq->msgs) |
| 5591 | return -ENOMEM; |
| 5592 | |
| 5593 | crq->size = PAGE_SIZE / sizeof(*crq->msgs); |
| 5594 | crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, |
| 5595 | DMA_BIDIRECTIONAL); |
| 5596 | if (dma_mapping_error(dev, crq->msg_token)) |
| 5597 | goto map_failed; |
| 5598 | |
| 5599 | rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, |
| 5600 | crq->msg_token, PAGE_SIZE); |
| 5601 | |
| 5602 | if (rc == H_RESOURCE) |
| 5603 | /* maybe kexecing and resource is busy. try a reset */ |
| 5604 | rc = ibmvnic_reset_crq(adapter); |
| 5605 | retrc = rc; |
| 5606 | |
| 5607 | if (rc == H_CLOSED) { |
| 5608 | dev_warn(dev, "Partner adapter not ready\n"); |
| 5609 | } else if (rc) { |
| 5610 | dev_warn(dev, "Error %d opening adapter\n", rc); |
| 5611 | goto reg_crq_failed; |
| 5612 | } |
| 5613 | |
| 5614 | retrc = 0; |
| 5615 | |
Allen Pais | aa7c3fe | 2020-09-14 12:59:29 +0530 | [diff] [blame] | 5616 | tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5617 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5618 | netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); |
Murilo Fossa Vicentini | e56e251 | 2019-04-25 11:02:33 -0300 | [diff] [blame] | 5619 | snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", |
| 5620 | adapter->vdev->unit_address); |
| 5621 | rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5622 | if (rc) { |
| 5623 | dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", |
| 5624 | vdev->irq, rc); |
| 5625 | goto req_irq_failed; |
| 5626 | } |
| 5627 | |
| 5628 | rc = vio_enable_interrupts(vdev); |
| 5629 | if (rc) { |
| 5630 | dev_err(dev, "Error %d enabling interrupts\n", rc); |
| 5631 | goto req_irq_failed; |
| 5632 | } |
| 5633 | |
| 5634 | crq->cur = 0; |
| 5635 | spin_lock_init(&crq->lock); |
| 5636 | |
Sukadev Bhattiprolu | 6e20d00 | 2021-10-29 15:03:15 -0700 | [diff] [blame] | 5637 | /* process any CRQs that were queued before we enabled interrupts */ |
| 5638 | tasklet_schedule(&adapter->tasklet); |
| 5639 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5640 | return retrc; |
| 5641 | |
| 5642 | req_irq_failed: |
Thomas Falcon | 6c267b3 | 2017-02-15 12:17:58 -0600 | [diff] [blame] | 5643 | tasklet_kill(&adapter->tasklet); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5644 | do { |
| 5645 | rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); |
| 5646 | } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); |
| 5647 | reg_crq_failed: |
| 5648 | dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 5649 | map_failed: |
| 5650 | free_page((unsigned long)crq->msgs); |
Nathan Fontenot | f992887 | 2017-03-30 02:48:54 -0400 | [diff] [blame] | 5651 | crq->msgs = NULL; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5652 | return retrc; |
| 5653 | } |
| 5654 | |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 5655 | static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) |
John Allen | f6ef640 | 2017-03-17 17:13:42 -0500 | [diff] [blame] | 5656 | { |
| 5657 | struct device *dev = &adapter->vdev->dev; |
Dany Madden | 98c41f0 | 2020-11-25 18:04:32 -0600 | [diff] [blame] | 5658 | unsigned long timeout = msecs_to_jiffies(20000); |
Michal Suchanek | 6881b07 | 2021-03-02 20:47:47 +0100 | [diff] [blame] | 5659 | u64 old_num_rx_queues = adapter->req_rx_queues; |
| 5660 | u64 old_num_tx_queues = adapter->req_tx_queues; |
John Allen | f6ef640 | 2017-03-17 17:13:42 -0500 | [diff] [blame] | 5661 | int rc; |
| 5662 | |
John Allen | 017892c1 | 2017-05-26 10:30:19 -0400 | [diff] [blame] | 5663 | adapter->from_passive_init = false; |
| 5664 | |
Michal Suchanek | 6881b07 | 2021-03-02 20:47:47 +0100 | [diff] [blame] | 5665 | if (reset) |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 5666 | reinit_completion(&adapter->init_done); |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 5667 | |
Nathan Fontenot | 6a2fb0e | 2017-06-15 14:48:09 -0400 | [diff] [blame] | 5668 | adapter->init_done_rc = 0; |
Lijun Pan | fa68bfa | 2020-08-19 17:52:24 -0500 | [diff] [blame] | 5669 | rc = ibmvnic_send_crq_init(adapter); |
| 5670 | if (rc) { |
| 5671 | dev_err(dev, "Send crq init failed with error %d\n", rc); |
| 5672 | return rc; |
| 5673 | } |
| 5674 | |
John Allen | f6ef640 | 2017-03-17 17:13:42 -0500 | [diff] [blame] | 5675 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
| 5676 | dev_err(dev, "Initialization sequence timed out\n"); |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 5677 | return -ETIMEDOUT; |
John Allen | 017892c1 | 2017-05-26 10:30:19 -0400 | [diff] [blame] | 5678 | } |
| 5679 | |
Nathan Fontenot | 6a2fb0e | 2017-06-15 14:48:09 -0400 | [diff] [blame] | 5680 | if (adapter->init_done_rc) { |
| 5681 | release_crq_queue(adapter); |
| 5682 | return adapter->init_done_rc; |
| 5683 | } |
| 5684 | |
Lijun Pan | 785a2b1 | 2020-09-17 21:12:46 -0500 | [diff] [blame] | 5685 | if (adapter->from_passive_init) { |
| 5686 | adapter->state = VNIC_OPEN; |
| 5687 | adapter->from_passive_init = false; |
Dany Madden | b6ee566 | 2021-12-14 00:17:47 -0500 | [diff] [blame] | 5688 | return -EINVAL; |
Lijun Pan | 785a2b1 | 2020-09-17 21:12:46 -0500 | [diff] [blame] | 5689 | } |
| 5690 | |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 5691 | if (reset && |
| 5692 | test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 5693 | adapter->reset_reason != VNIC_RESET_MOBILITY) { |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 5694 | if (adapter->req_rx_queues != old_num_rx_queues || |
| 5695 | adapter->req_tx_queues != old_num_tx_queues) { |
| 5696 | release_sub_crqs(adapter, 0); |
| 5697 | rc = init_sub_crqs(adapter); |
| 5698 | } else { |
| 5699 | rc = reset_sub_crq_queues(adapter); |
| 5700 | } |
| 5701 | } else { |
Nathan Fontenot | 57a4943 | 2017-05-26 10:31:12 -0400 | [diff] [blame] | 5702 | rc = init_sub_crqs(adapter); |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 5703 | } |
| 5704 | |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 5705 | if (rc) { |
| 5706 | dev_err(dev, "Initialization of sub crqs failed\n"); |
| 5707 | release_crq_queue(adapter); |
Thomas Falcon | 5df969c | 2017-06-28 19:55:54 -0500 | [diff] [blame] | 5708 | return rc; |
| 5709 | } |
| 5710 | |
| 5711 | rc = init_sub_crq_irqs(adapter); |
| 5712 | if (rc) { |
| 5713 | dev_err(dev, "Failed to initialize sub crq irqs\n"); |
| 5714 | release_crq_queue(adapter); |
Nathan Fontenot | 1bb3c73 | 2017-04-25 15:01:10 -0400 | [diff] [blame] | 5715 | } |
| 5716 | |
| 5717 | return rc; |
John Allen | f6ef640 | 2017-03-17 17:13:42 -0500 | [diff] [blame] | 5718 | } |
| 5719 | |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5720 | static struct device_attribute dev_attr_failover; |
| 5721 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5722 | static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) |
| 5723 | { |
| 5724 | struct ibmvnic_adapter *adapter; |
| 5725 | struct net_device *netdev; |
| 5726 | unsigned char *mac_addr_p; |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 5727 | bool init_success; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5728 | int rc; |
| 5729 | |
| 5730 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", |
| 5731 | dev->unit_address); |
| 5732 | |
| 5733 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, |
| 5734 | VETH_MAC_ADDR, NULL); |
| 5735 | if (!mac_addr_p) { |
| 5736 | dev_err(&dev->dev, |
| 5737 | "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", |
| 5738 | __FILE__, __LINE__); |
| 5739 | return 0; |
| 5740 | } |
| 5741 | |
| 5742 | netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), |
Thomas Falcon | d45cc3a | 2017-12-18 12:52:11 -0600 | [diff] [blame] | 5743 | IBMVNIC_MAX_QUEUES); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5744 | if (!netdev) |
| 5745 | return -ENOMEM; |
| 5746 | |
| 5747 | adapter = netdev_priv(netdev); |
Nathan Fontenot | 90c8014 | 2017-05-03 14:04:32 -0400 | [diff] [blame] | 5748 | adapter->state = VNIC_PROBING; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5749 | dev_set_drvdata(&dev->dev, netdev); |
| 5750 | adapter->vdev = dev; |
| 5751 | adapter->netdev = netdev; |
Sukadev Bhattiprolu | 76cdc5c | 2020-11-25 18:04:29 -0600 | [diff] [blame] | 5752 | adapter->login_pending = false; |
Sukadev Bhattiprolu | 129854f0 | 2021-09-14 20:52:56 -0700 | [diff] [blame] | 5753 | memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); |
| 5754 | /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ |
| 5755 | bitmap_set(adapter->map_ids, 0, 1); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5756 | |
| 5757 | ether_addr_copy(adapter->mac_addr, mac_addr_p); |
Jakub Kicinski | f3956eb | 2021-10-01 14:32:23 -0700 | [diff] [blame] | 5758 | eth_hw_addr_set(netdev, adapter->mac_addr); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5759 | netdev->irq = dev->irq; |
| 5760 | netdev->netdev_ops = &ibmvnic_netdev_ops; |
| 5761 | netdev->ethtool_ops = &ibmvnic_ethtool_ops; |
| 5762 | SET_NETDEV_DEV(netdev, &dev->dev); |
| 5763 | |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 5764 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 5765 | INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, |
| 5766 | __ibmvnic_delayed_reset); |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 5767 | INIT_LIST_HEAD(&adapter->rwi_list); |
Thomas Falcon | 6c5c748 | 2018-12-10 15:22:22 -0600 | [diff] [blame] | 5768 | spin_lock_init(&adapter->rwi_lock); |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 5769 | spin_lock_init(&adapter->state_lock); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5770 | mutex_init(&adapter->fw_lock); |
Thomas Falcon | bbd669a | 2019-04-04 18:58:26 -0500 | [diff] [blame] | 5771 | init_completion(&adapter->init_done); |
Thomas Falcon | 070eca9 | 2019-11-25 17:12:53 -0600 | [diff] [blame] | 5772 | init_completion(&adapter->fw_done); |
| 5773 | init_completion(&adapter->reset_done); |
| 5774 | init_completion(&adapter->stats_done); |
Juliet Kim | 7ed5b31 | 2019-09-20 16:11:23 -0400 | [diff] [blame] | 5775 | clear_bit(0, &adapter->resetting); |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 5776 | adapter->prev_rx_buf_sz = 0; |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 5777 | adapter->prev_mtu = 0; |
Nathan Fontenot | ed651a1 | 2017-05-03 14:04:38 -0400 | [diff] [blame] | 5778 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 5779 | init_success = false; |
Nathan Fontenot | 6a2fb0e | 2017-06-15 14:48:09 -0400 | [diff] [blame] | 5780 | do { |
Nathan Fontenot | 30f7962 | 2018-04-06 18:37:06 -0500 | [diff] [blame] | 5781 | rc = init_crq_queue(adapter); |
| 5782 | if (rc) { |
| 5783 | dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", |
| 5784 | rc); |
| 5785 | goto ibmvnic_init_fail; |
| 5786 | } |
| 5787 | |
Lijun Pan | 635e442 | 2020-08-19 17:52:26 -0500 | [diff] [blame] | 5788 | rc = ibmvnic_reset_init(adapter, false); |
Sukadev Bhattiprolu | 6b278c0 | 2021-10-29 15:03:16 -0700 | [diff] [blame] | 5789 | } while (rc == -EAGAIN); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5790 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 5791 | /* We are ignoring the error from ibmvnic_reset_init() assuming that the |
| 5792 | * partner is not ready. CRQ is not active. When the partner becomes |
| 5793 | * ready, we will do the passive init reset. |
| 5794 | */ |
| 5795 | |
| 5796 | if (!rc) |
| 5797 | init_success = true; |
| 5798 | |
Thomas Falcon | 0718421 | 2018-05-16 15:49:05 -0500 | [diff] [blame] | 5799 | rc = init_stats_buffers(adapter); |
| 5800 | if (rc) |
| 5801 | goto ibmvnic_init_fail; |
| 5802 | |
| 5803 | rc = init_stats_token(adapter); |
| 5804 | if (rc) |
| 5805 | goto ibmvnic_stats_fail; |
| 5806 | |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5807 | rc = device_create_file(&dev->dev, &dev_attr_failover); |
Nathan Fontenot | 7c1885a | 2017-08-08 14:28:45 -0500 | [diff] [blame] | 5808 | if (rc) |
Thomas Falcon | 0718421 | 2018-05-16 15:49:05 -0500 | [diff] [blame] | 5809 | goto ibmvnic_dev_file_err; |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5810 | |
Mick Tarsel | e876a8a | 2017-09-28 13:53:18 -0700 | [diff] [blame] | 5811 | netif_carrier_off(netdev); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5812 | rc = register_netdev(netdev); |
| 5813 | if (rc) { |
| 5814 | dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); |
Nathan Fontenot | 7c1885a | 2017-08-08 14:28:45 -0500 | [diff] [blame] | 5815 | goto ibmvnic_register_fail; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5816 | } |
| 5817 | dev_info(&dev->dev, "ibmvnic registered\n"); |
| 5818 | |
Cristobal Forno | 53f8b1b | 2021-06-10 11:08:35 -0600 | [diff] [blame] | 5819 | if (init_success) { |
| 5820 | adapter->state = VNIC_PROBED; |
| 5821 | netdev->mtu = adapter->req_mtu - ETH_HLEN; |
| 5822 | netdev->min_mtu = adapter->min_mtu - ETH_HLEN; |
| 5823 | netdev->max_mtu = adapter->max_mtu - ETH_HLEN; |
| 5824 | } else { |
| 5825 | adapter->state = VNIC_DOWN; |
| 5826 | } |
John Allen | c26eba0 | 2017-10-26 16:23:25 -0500 | [diff] [blame] | 5827 | |
| 5828 | adapter->wait_for_reset = false; |
Dany Madden | a86d5c6 | 2020-11-25 18:04:31 -0600 | [diff] [blame] | 5829 | adapter->last_reset_time = jiffies; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5830 | return 0; |
Nathan Fontenot | 7c1885a | 2017-08-08 14:28:45 -0500 | [diff] [blame] | 5831 | |
| 5832 | ibmvnic_register_fail: |
| 5833 | device_remove_file(&dev->dev, &dev_attr_failover); |
| 5834 | |
Thomas Falcon | 0718421 | 2018-05-16 15:49:05 -0500 | [diff] [blame] | 5835 | ibmvnic_dev_file_err: |
| 5836 | release_stats_token(adapter); |
| 5837 | |
| 5838 | ibmvnic_stats_fail: |
| 5839 | release_stats_buffers(adapter); |
| 5840 | |
Nathan Fontenot | 7c1885a | 2017-08-08 14:28:45 -0500 | [diff] [blame] | 5841 | ibmvnic_init_fail: |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 5842 | release_sub_crqs(adapter, 1); |
Nathan Fontenot | 7c1885a | 2017-08-08 14:28:45 -0500 | [diff] [blame] | 5843 | release_crq_queue(adapter); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5844 | mutex_destroy(&adapter->fw_lock); |
Nathan Fontenot | 7c1885a | 2017-08-08 14:28:45 -0500 | [diff] [blame] | 5845 | free_netdev(netdev); |
| 5846 | |
| 5847 | return rc; |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5848 | } |
| 5849 | |
Uwe Kleine-König | 386a966 | 2021-02-25 23:18:34 +0100 | [diff] [blame] | 5850 | static void ibmvnic_remove(struct vio_dev *dev) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5851 | { |
| 5852 | struct net_device *netdev = dev_get_drvdata(&dev->dev); |
Nathan Fontenot | 3748905 | 2017-04-19 13:45:04 -0400 | [diff] [blame] | 5853 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 5854 | unsigned long flags; |
| 5855 | |
| 5856 | spin_lock_irqsave(&adapter->state_lock, flags); |
Sukadev Bhattiprolu | 4a41c42 | 2021-02-12 20:42:50 -0800 | [diff] [blame] | 5857 | |
| 5858 | /* If ibmvnic_reset() is scheduling a reset, wait for it to |
| 5859 | * finish. Then, set the state to REMOVING to prevent it from |
| 5860 | * scheduling any more work and to have reset functions ignore |
| 5861 | * any resets that have already been scheduled. Drop the lock |
| 5862 | * after setting state, so __ibmvnic_reset() which is called |
| 5863 | * from the flush_work() below, can make progress. |
| 5864 | */ |
Junlin Yang | 69cdb79 | 2021-03-05 16:48:39 +0800 | [diff] [blame] | 5865 | spin_lock(&adapter->rwi_lock); |
Nathan Fontenot | 90c8014 | 2017-05-03 14:04:32 -0400 | [diff] [blame] | 5866 | adapter->state = VNIC_REMOVING; |
Junlin Yang | 69cdb79 | 2021-03-05 16:48:39 +0800 | [diff] [blame] | 5867 | spin_unlock(&adapter->rwi_lock); |
Sukadev Bhattiprolu | 4a41c42 | 2021-02-12 20:42:50 -0800 | [diff] [blame] | 5868 | |
Juliet Kim | 7d7195a | 2020-03-10 09:23:58 -0500 | [diff] [blame] | 5869 | spin_unlock_irqrestore(&adapter->state_lock, flags); |
| 5870 | |
Thomas Falcon | 6954a9e | 2020-06-12 13:34:41 -0500 | [diff] [blame] | 5871 | flush_work(&adapter->ibmvnic_reset); |
| 5872 | flush_delayed_work(&adapter->ibmvnic_delayed_reset); |
| 5873 | |
Juliet Kim | a5681e2 | 2018-11-19 15:59:22 -0600 | [diff] [blame] | 5874 | rtnl_lock(); |
| 5875 | unregister_netdevice(netdev); |
Nathan Fontenot | 3748905 | 2017-04-19 13:45:04 -0400 | [diff] [blame] | 5876 | |
| 5877 | release_resources(adapter); |
Sukadev Bhattiprolu | 489de95 | 2021-09-14 20:52:58 -0700 | [diff] [blame] | 5878 | release_rx_pools(adapter); |
Sukadev Bhattiprolu | bbd8093 | 2021-09-14 20:52:59 -0700 | [diff] [blame] | 5879 | release_tx_pools(adapter); |
Nathan Fontenot | d7c0ef3 | 2018-02-19 13:30:31 -0600 | [diff] [blame] | 5880 | release_sub_crqs(adapter, 1); |
Nathan Fontenot | 3748905 | 2017-04-19 13:45:04 -0400 | [diff] [blame] | 5881 | release_crq_queue(adapter); |
| 5882 | |
Thomas Falcon | 53cc772 | 2018-02-26 18:10:56 -0600 | [diff] [blame] | 5883 | release_stats_token(adapter); |
| 5884 | release_stats_buffers(adapter); |
| 5885 | |
Nathan Fontenot | 90c8014 | 2017-05-03 14:04:32 -0400 | [diff] [blame] | 5886 | adapter->state = VNIC_REMOVED; |
| 5887 | |
Juliet Kim | a5681e2 | 2018-11-19 15:59:22 -0600 | [diff] [blame] | 5888 | rtnl_unlock(); |
Thomas Falcon | ff25dcb | 2019-11-25 17:12:56 -0600 | [diff] [blame] | 5889 | mutex_destroy(&adapter->fw_lock); |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5890 | device_remove_file(&dev->dev, &dev_attr_failover); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5891 | free_netdev(netdev); |
| 5892 | dev_set_drvdata(&dev->dev, NULL); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5893 | } |
| 5894 | |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5895 | static ssize_t failover_store(struct device *dev, struct device_attribute *attr, |
| 5896 | const char *buf, size_t count) |
| 5897 | { |
| 5898 | struct net_device *netdev = dev_get_drvdata(dev); |
| 5899 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 5900 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 5901 | __be64 session_token; |
| 5902 | long rc; |
| 5903 | |
| 5904 | if (!sysfs_streq(buf, "1")) |
| 5905 | return -EINVAL; |
| 5906 | |
| 5907 | rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, |
| 5908 | H_GET_SESSION_TOKEN, 0, 0, 0); |
| 5909 | if (rc) { |
| 5910 | netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", |
| 5911 | rc); |
Lijun Pan | 334c424 | 2021-04-13 03:31:44 -0500 | [diff] [blame] | 5912 | goto last_resort; |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5913 | } |
| 5914 | |
| 5915 | session_token = (__be64)retbuf[0]; |
| 5916 | netdev_dbg(netdev, "Initiating client failover, session id %llx\n", |
| 5917 | be64_to_cpu(session_token)); |
| 5918 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, |
| 5919 | H_SESSION_ERR_DETECTED, session_token, 0, 0); |
Lijun Pan | 334c424 | 2021-04-13 03:31:44 -0500 | [diff] [blame] | 5920 | if (rc) |
| 5921 | netdev_err(netdev, |
| 5922 | "H_VIOCTL initiated failover failed, rc %ld\n", |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5923 | rc); |
Lijun Pan | 334c424 | 2021-04-13 03:31:44 -0500 | [diff] [blame] | 5924 | |
| 5925 | last_resort: |
| 5926 | netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); |
| 5927 | ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5928 | |
| 5929 | return count; |
| 5930 | } |
Joe Perches | 6cbaefb | 2017-12-19 10:15:09 -0800 | [diff] [blame] | 5931 | static DEVICE_ATTR_WO(failover); |
Thomas Falcon | 40c9db8 | 2017-06-12 12:35:04 -0500 | [diff] [blame] | 5932 | |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5933 | static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) |
| 5934 | { |
| 5935 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); |
| 5936 | struct ibmvnic_adapter *adapter; |
| 5937 | struct iommu_table *tbl; |
| 5938 | unsigned long ret = 0; |
| 5939 | int i; |
| 5940 | |
| 5941 | tbl = get_iommu_table_base(&vdev->dev); |
| 5942 | |
| 5943 | /* netdev inits at probe time along with the structures we need below*/ |
| 5944 | if (!netdev) |
| 5945 | return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); |
| 5946 | |
| 5947 | adapter = netdev_priv(netdev); |
| 5948 | |
| 5949 | ret += PAGE_SIZE; /* the crq message queue */ |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5950 | ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); |
| 5951 | |
| 5952 | for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) |
| 5953 | ret += 4 * PAGE_SIZE; /* the scrq message queue */ |
| 5954 | |
Thomas Falcon | 507ebe6 | 2020-08-21 13:39:01 -0500 | [diff] [blame] | 5955 | for (i = 0; i < adapter->num_active_rx_pools; i++) |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5956 | ret += adapter->rx_pool[i].size * |
| 5957 | IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); |
| 5958 | |
| 5959 | return ret; |
| 5960 | } |
| 5961 | |
| 5962 | static int ibmvnic_resume(struct device *dev) |
| 5963 | { |
| 5964 | struct net_device *netdev = dev_get_drvdata(dev); |
| 5965 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5966 | |
John Allen | cb89ba2 | 2017-06-19 11:27:53 -0500 | [diff] [blame] | 5967 | if (adapter->state != VNIC_OPEN) |
| 5968 | return 0; |
| 5969 | |
John Allen | a248878 | 2017-07-24 13:26:06 -0500 | [diff] [blame] | 5970 | tasklet_schedule(&adapter->tasklet); |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5971 | |
| 5972 | return 0; |
| 5973 | } |
| 5974 | |
Arvind Yadav | 8c37bc6 | 2017-08-17 18:52:54 +0530 | [diff] [blame] | 5975 | static const struct vio_device_id ibmvnic_device_table[] = { |
Thomas Falcon | 032c5e8 | 2015-12-21 11:26:06 -0600 | [diff] [blame] | 5976 | {"network", "IBM,vnic"}, |
| 5977 | {"", "" } |
| 5978 | }; |
| 5979 | MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); |
| 5980 | |
| 5981 | static const struct dev_pm_ops ibmvnic_pm_ops = { |
| 5982 | .resume = ibmvnic_resume |
| 5983 | }; |
| 5984 | |
| 5985 | static struct vio_driver ibmvnic_driver = { |
| 5986 | .id_table = ibmvnic_device_table, |
| 5987 | .probe = ibmvnic_probe, |
| 5988 | .remove = ibmvnic_remove, |
| 5989 | .get_desired_dma = ibmvnic_get_desired_dma, |
| 5990 | .name = ibmvnic_driver_name, |
| 5991 | .pm = &ibmvnic_pm_ops, |
| 5992 | }; |
| 5993 | |
| 5994 | /* module functions */ |
| 5995 | static int __init ibmvnic_module_init(void) |
| 5996 | { |
| 5997 | pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, |
| 5998 | IBMVNIC_DRIVER_VERSION); |
| 5999 | |
| 6000 | return vio_register_driver(&ibmvnic_driver); |
| 6001 | } |
| 6002 | |
| 6003 | static void __exit ibmvnic_module_exit(void) |
| 6004 | { |
| 6005 | vio_unregister_driver(&ibmvnic_driver); |
| 6006 | } |
| 6007 | |
| 6008 | module_init(ibmvnic_module_init); |
| 6009 | module_exit(ibmvnic_module_exit); |