blob: 29617a86b2995b882a5a74f7017d40339fdc1889 [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060081static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060082static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
Thomas Falconad7775d2016-04-01 17:20:34 -050086static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060087static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96static int ibmvnic_poll(struct napi_struct *napi, int data);
Lijun Pan69980d02020-09-27 20:13:28 -050097static void send_query_map(struct ibmvnic_adapter *adapter);
Lijun Pan673ead22021-06-14 00:20:45 -050098static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -050099static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600100static int send_login(struct ibmvnic_adapter *adapter);
Lijun Pan491099a2020-09-27 20:13:26 -0500101static void send_query_cap(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500102static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500103static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500104static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400105static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500106static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500107static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300108static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -0700109static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700111static void free_long_term_buff(struct ibmvnic_adapter *adapter,
112 struct ibmvnic_long_term_buff *ltb);
Sukadev Bhattiprolu61772b02022-02-07 16:19:18 -0800113static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600114
115struct ibmvnic_stat {
116 char name[ETH_GSTRING_LEN];
117 int offset;
118};
119
120#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 offsetof(struct ibmvnic_statistics, stat))
Lijun Pan91dc5d22021-02-11 00:43:22 -0600122#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
Thomas Falcon032c5e82015-12-21 11:26:06 -0600123
124static const struct ibmvnic_stat ibmvnic_stats[] = {
125 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
126 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
127 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
128 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
129 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
130 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
131 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
132 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
133 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
134 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
135 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
136 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
137 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
138 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
139 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
140 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
141 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
142 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
143 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
144 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
145 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
146 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
147};
148
Cristobal Forno53f8b1b2021-06-10 11:08:35 -0600149static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
150{
151 union ibmvnic_crq crq;
152
153 memset(&crq, 0, sizeof(crq));
154 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
155 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
156
157 return ibmvnic_send_crq(adapter, &crq);
158}
159
160static int send_version_xchg(struct ibmvnic_adapter *adapter)
161{
162 union ibmvnic_crq crq;
163
164 memset(&crq, 0, sizeof(crq));
165 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
166 crq.version_exchange.cmd = VERSION_EXCHANGE;
167 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
168
169 return ibmvnic_send_crq(adapter, &crq);
170}
171
Thomas Falcon032c5e82015-12-21 11:26:06 -0600172static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
173 unsigned long length, unsigned long *number,
174 unsigned long *irq)
175{
176 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
177 long rc;
178
179 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
180 *number = retbuf[0];
181 *irq = retbuf[1];
182
183 return rc;
184}
185
Thomas Falcon476d96c2019-11-25 17:12:55 -0600186/**
187 * ibmvnic_wait_for_completion - Check device state and wait for completion
188 * @adapter: private device data
189 * @comp_done: completion structure to wait for
190 * @timeout: time to wait in milliseconds
191 *
192 * Wait for a completion signal or until the timeout limit is reached
193 * while checking that the device is still active.
194 */
195static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
196 struct completion *comp_done,
197 unsigned long timeout)
198{
199 struct net_device *netdev;
200 unsigned long div_timeout;
201 u8 retry;
202
203 netdev = adapter->netdev;
204 retry = 5;
205 div_timeout = msecs_to_jiffies(timeout / retry);
206 while (true) {
207 if (!adapter->crq.active) {
208 netdev_err(netdev, "Device down!\n");
209 return -ENODEV;
210 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600211 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600212 break;
213 if (wait_for_completion_timeout(comp_done, div_timeout))
214 return 0;
215 }
216 netdev_err(netdev, "Operation timed out.\n");
217 return -ETIMEDOUT;
218}
219
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700220/**
221 * reuse_ltb() - Check if a long term buffer can be reused
222 * @ltb: The long term buffer to be checked
223 * @size: The size of the long term buffer.
224 *
225 * An LTB can be reused unless its size has changed.
226 *
227 * Return: Return true if the LTB can be reused, false otherwise.
228 */
229static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
230{
231 return (ltb->buff && ltb->size == size);
232}
233
234/**
235 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
236 *
237 * @adapter: ibmvnic adapter associated to the LTB
238 * @ltb: container object for the LTB
239 * @size: size of the LTB
240 *
241 * Allocate an LTB of the specified size and notify VIOS.
242 *
243 * If the given @ltb already has the correct size, reuse it. Otherwise if
244 * its non-NULL, free it. Then allocate a new one of the correct size.
245 * Notify the VIOS either way since we may now be working with a new VIOS.
246 *
247 * Allocating larger chunks of memory during resets, specially LPM or under
248 * low memory situations can cause resets to fail/timeout and for LPAR to
249 * lose connectivity. So hold onto the LTB even if we fail to communicate
250 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
251 *
252 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
253 * a negative value otherwise.
254 */
Thomas Falcon032c5e82015-12-21 11:26:06 -0600255static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
256 struct ibmvnic_long_term_buff *ltb, int size)
257{
258 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500259 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600260
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700261 if (!reuse_ltb(ltb, size)) {
262 dev_dbg(dev,
263 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
264 ltb->size, size);
265 free_long_term_buff(adapter, ltb);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600266 }
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700267
268 if (ltb->buff) {
269 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
270 ltb->map_id, ltb->size);
271 } else {
272 ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
273 GFP_KERNEL);
274 if (!ltb->buff) {
275 dev_err(dev, "Couldn't alloc long term buffer\n");
276 return -ENOMEM;
277 }
278 ltb->size = size;
279
280 ltb->map_id = find_first_zero_bit(adapter->map_ids,
281 MAX_MAP_ID);
282 bitmap_set(adapter->map_ids, ltb->map_id, 1);
283
284 dev_dbg(dev,
285 "Allocated new LTB [map %d, size 0x%llx]\n",
286 ltb->map_id, ltb->size);
287 }
288
289 /* Ensure ltb is zeroed - specially when reusing it. */
290 memset(ltb->buff, 0, ltb->size);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500291
Thomas Falconff25dcb2019-11-25 17:12:56 -0600292 mutex_lock(&adapter->fw_lock);
293 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600294 reinit_completion(&adapter->fw_done);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700295
296 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500297 if (rc) {
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700298 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
299 goto out;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500300 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600301
302 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
303 if (rc) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700304 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
Thomas Falcon476d96c2019-11-25 17:12:55 -0600305 rc);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700306 goto out;
Thomas Falcon476d96c2019-11-25 17:12:55 -0600307 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500308
309 if (adapter->fw_done_rc) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700310 dev_err(dev, "Couldn't map LTB, rc = %d\n",
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500311 adapter->fw_done_rc);
Dany Maddenb6ee5662021-12-14 00:17:47 -0500312 rc = -EIO;
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700313 goto out;
314 }
315 rc = 0;
316out:
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700317 /* don't free LTB on communication error - see function header */
Thomas Falconff25dcb2019-11-25 17:12:56 -0600318 mutex_unlock(&adapter->fw_lock);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700319 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600320}
321
322static void free_long_term_buff(struct ibmvnic_adapter *adapter,
323 struct ibmvnic_long_term_buff *ltb)
324{
325 struct device *dev = &adapter->vdev->dev;
326
Nathan Fontenotc657e322017-03-30 02:49:06 -0400327 if (!ltb->buff)
328 return;
329
Lijun Pan7d3a7b92021-02-12 20:49:00 -0600330 /* VIOS automatically unmaps the long term buffer at remote
331 * end for the following resets:
332 * FAILOVER, MOBILITY, TIMEOUT.
333 */
Nathan Fontenoted651a12017-05-03 14:04:38 -0400334 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
Lijun Pan7d3a7b92021-02-12 20:49:00 -0600335 adapter->reset_reason != VNIC_RESET_MOBILITY &&
336 adapter->reset_reason != VNIC_RESET_TIMEOUT)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500337 send_request_unmap(adapter, ltb->map_id);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700338
Brian King59af56c2017-04-19 13:44:41 -0400339 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700340
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700341 ltb->buff = NULL;
Sukadev Bhattiprolu129854f02021-09-14 20:52:56 -0700342 /* mark this map_id free */
343 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700344 ltb->map_id = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600345}
346
Thomas Falconf185a492017-05-26 10:30:48 -0400347static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
348{
349 int i;
350
Thomas Falcon507ebe62020-08-21 13:39:01 -0500351 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400352 adapter->rx_pool[i].active = 0;
353}
354
Thomas Falcon032c5e82015-12-21 11:26:06 -0600355static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
356 struct ibmvnic_rx_pool *pool)
357{
358 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500359 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600360 struct device *dev = &adapter->vdev->dev;
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600361 struct ibmvnic_ind_xmit_queue *ind_bufp;
362 struct ibmvnic_sub_crq_queue *rx_scrq;
363 union sub_crq *sub_crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600364 int buffers_added = 0;
365 unsigned long lpar_rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600366 struct sk_buff *skb;
367 unsigned int offset;
368 dma_addr_t dma_addr;
369 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600370 int shift = 0;
371 int index;
372 int i;
373
Thomas Falconf185a492017-05-26 10:30:48 -0400374 if (!pool->active)
375 return;
376
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600377 rx_scrq = adapter->rx_scrq[pool->index];
378 ind_bufp = &rx_scrq->ind_buf;
Sukadev Bhattiprolu72368f82021-06-23 21:13:13 -0700379
380 /* netdev_skb_alloc() could have failed after we saved a few skbs
381 * in the indir_buf and we would not have sent them to VIOS yet.
382 * To account for them, start the loop at ind_bufp->index rather
383 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
384 * be 0.
385 */
386 for (i = ind_bufp->index; i < count; ++i) {
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700387 index = pool->free_map[pool->next_free];
388
389 /* We maybe reusing the skb from earlier resets. Allocate
390 * only if necessary. But since the LTB may have changed
391 * during reset (see init_rx_pools()), update LTB below
392 * even if reusing skb.
393 */
394 skb = pool->rx_buff[index].skb;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600395 if (!skb) {
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700396 skb = netdev_alloc_skb(adapter->netdev,
397 pool->buff_size);
398 if (!skb) {
399 dev_err(dev, "Couldn't replenish rx buff\n");
400 adapter->replenish_no_mem++;
401 break;
402 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600403 }
404
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700405 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
406 pool->next_free = (pool->next_free + 1) % pool->size;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600407
Thomas Falcon032c5e82015-12-21 11:26:06 -0600408 /* Copy the skb to the long term mapped DMA buffer */
409 offset = index * pool->buff_size;
410 dst = pool->long_term_buff.buff + offset;
411 memset(dst, 0, pool->buff_size);
412 dma_addr = pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600413
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700414 /* add the skb to an rx_buff in the pool */
415 pool->rx_buff[index].data = dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600416 pool->rx_buff[index].dma = dma_addr;
417 pool->rx_buff[index].skb = skb;
418 pool->rx_buff[index].pool_index = pool->index;
419 pool->rx_buff[index].size = pool->buff_size;
420
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700421 /* queue the rx_buff for the next send_subcrq_indirect */
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600422 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
423 memset(sub_crq, 0, sizeof(*sub_crq));
424 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
425 sub_crq->rx_add.correlator =
Thomas Falcon032c5e82015-12-21 11:26:06 -0600426 cpu_to_be64((u64)&pool->rx_buff[index]);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600427 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
428 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600429
430 /* The length field of the sCRQ is defined to be 24 bits so the
431 * buffer size needs to be left shifted by a byte before it is
432 * converted to big endian to prevent the last byte from being
433 * truncated.
434 */
435#ifdef __LITTLE_ENDIAN__
436 shift = 8;
437#endif
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600438 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700439
440 /* if send_subcrq_indirect queue is full, flush to VIOS */
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600441 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
442 i == count - 1) {
443 lpar_rc =
444 send_subcrq_indirect(adapter, handle,
445 (u64)ind_bufp->indir_dma,
446 (u64)ind_bufp->index);
447 if (lpar_rc != H_SUCCESS)
448 goto failure;
449 buffers_added += ind_bufp->index;
450 adapter->replenish_add_buff_success += ind_bufp->index;
451 ind_bufp->index = 0;
452 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600453 }
454 atomic_add(buffers_added, &pool->available);
455 return;
456
457failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500458 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
459 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600460 for (i = ind_bufp->index - 1; i >= 0; --i) {
461 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600462
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600463 pool->next_free = pool->next_free == 0 ?
464 pool->size - 1 : pool->next_free - 1;
465 sub_crq = &ind_bufp->indir_arr[i];
466 rx_buff = (struct ibmvnic_rx_buff *)
467 be64_to_cpu(sub_crq->rx_add.correlator);
468 index = (int)(rx_buff - pool->rx_buff);
469 pool->free_map[pool->next_free] = index;
470 dev_kfree_skb_any(pool->rx_buff[index].skb);
471 pool->rx_buff[index].skb = NULL;
472 }
Dwip N. Banerjeec2af6222020-12-09 20:53:31 -0500473 adapter->replenish_add_buff_failure += ind_bufp->index;
474 atomic_add(buffers_added, &pool->available);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600475 ind_bufp->index = 0;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500476 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400477 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500478 * queue is closed or pending failover.
479 * Firmware guarantees that a signal will be sent to the
480 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400481 */
482 deactivate_rx_pools(adapter);
483 netif_carrier_off(adapter->netdev);
484 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600485}
486
487static void replenish_pools(struct ibmvnic_adapter *adapter)
488{
489 int i;
490
Thomas Falcon032c5e82015-12-21 11:26:06 -0600491 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500492 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600493 if (adapter->rx_pool[i].active)
494 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
495 }
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -0800496
497 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600498}
499
John Allen3d52b592017-08-02 16:44:14 -0500500static void release_stats_buffers(struct ibmvnic_adapter *adapter)
501{
502 kfree(adapter->tx_stats_buffers);
503 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600504 adapter->tx_stats_buffers = NULL;
505 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500506}
507
508static int init_stats_buffers(struct ibmvnic_adapter *adapter)
509{
510 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600511 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500512 sizeof(struct ibmvnic_tx_queue_stats),
513 GFP_KERNEL);
514 if (!adapter->tx_stats_buffers)
515 return -ENOMEM;
516
517 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600518 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500519 sizeof(struct ibmvnic_rx_queue_stats),
520 GFP_KERNEL);
521 if (!adapter->rx_stats_buffers)
522 return -ENOMEM;
523
524 return 0;
525}
526
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400527static void release_stats_token(struct ibmvnic_adapter *adapter)
528{
529 struct device *dev = &adapter->vdev->dev;
530
531 if (!adapter->stats_token)
532 return;
533
534 dma_unmap_single(dev, adapter->stats_token,
535 sizeof(struct ibmvnic_statistics),
536 DMA_FROM_DEVICE);
537 adapter->stats_token = 0;
538}
539
540static int init_stats_token(struct ibmvnic_adapter *adapter)
541{
542 struct device *dev = &adapter->vdev->dev;
543 dma_addr_t stok;
Dany Maddenb6ee5662021-12-14 00:17:47 -0500544 int rc;
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400545
546 stok = dma_map_single(dev, &adapter->stats,
547 sizeof(struct ibmvnic_statistics),
548 DMA_FROM_DEVICE);
Dany Maddenb6ee5662021-12-14 00:17:47 -0500549 rc = dma_mapping_error(dev, stok);
550 if (rc) {
551 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
552 return rc;
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400553 }
554
555 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500556 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400557 return 0;
558}
559
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700560/**
561 * release_rx_pools() - Release any rx pools attached to @adapter.
562 * @adapter: ibmvnic adapter
563 *
564 * Safe to call this multiple times - even if no pools are attached.
565 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400566static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600567{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400568 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400569 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600570
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400571 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600572 return;
573
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600574 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400575 rx_pool = &adapter->rx_pool[i];
576
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500577 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
578
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400579 kfree(rx_pool->free_map);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700580
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400581 free_long_term_buff(adapter, &rx_pool->long_term_buff);
582
583 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400584 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400585
586 for (j = 0; j < rx_pool->size; j++) {
587 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600588 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
589 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400590 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600591 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400592
593 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600594 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400595
596 kfree(adapter->rx_pool);
597 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600598 adapter->num_active_rx_pools = 0;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700599 adapter->prev_rx_pool_size = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400600}
601
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700602/**
603 * reuse_rx_pools() - Check if the existing rx pools can be reused.
604 * @adapter: ibmvnic adapter
605 *
606 * Check if the existing rx pools in the adapter can be reused. The
607 * pools can be reused if the pool parameters (number of pools,
608 * number of buffers in the pool and size of each buffer) have not
609 * changed.
610 *
611 * NOTE: This assumes that all pools have the same number of buffers
612 * which is the case currently. If that changes, we must fix this.
613 *
614 * Return: true if the rx pools can be reused, false otherwise.
615 */
616static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
617{
618 u64 old_num_pools, new_num_pools;
619 u64 old_pool_size, new_pool_size;
620 u64 old_buff_size, new_buff_size;
621
622 if (!adapter->rx_pool)
623 return false;
624
625 old_num_pools = adapter->num_active_rx_pools;
626 new_num_pools = adapter->req_rx_queues;
627
628 old_pool_size = adapter->prev_rx_pool_size;
629 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
630
631 old_buff_size = adapter->prev_rx_buf_sz;
632 new_buff_size = adapter->cur_rx_buf_sz;
633
Sukadev Bhattiprolu0584f492021-11-30 21:48:35 -0800634 if (old_buff_size != new_buff_size ||
635 old_num_pools != new_num_pools ||
636 old_pool_size != new_pool_size)
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700637 return false;
638
639 return true;
640}
641
642/**
643 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
644 * @netdev: net device associated with the vnic interface
645 *
646 * Initialize the set of receiver pools in the ibmvnic adapter associated
647 * with the net_device @netdev. If possible, reuse the existing rx pools.
648 * Otherwise free any existing pools and allocate a new set of pools
649 * before initializing them.
650 *
651 * Return: 0 on success and negative value on error.
652 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400653static int init_rx_pools(struct net_device *netdev)
654{
655 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
656 struct device *dev = &adapter->vdev->dev;
657 struct ibmvnic_rx_pool *rx_pool;
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700658 u64 num_pools;
659 u64 pool_size; /* # of buffers in one pool */
Thomas Falcon507ebe62020-08-21 13:39:01 -0500660 u64 buff_size;
Dany Maddenb6ee5662021-12-14 00:17:47 -0500661 int i, j, rc;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400662
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700663 pool_size = adapter->req_rx_add_entries_per_subcrq;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700664 num_pools = adapter->req_rx_queues;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500665 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400666
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700667 if (reuse_rx_pools(adapter)) {
668 dev_dbg(dev, "Reusing rx pools\n");
669 goto update_ltb;
670 }
671
672 /* Allocate/populate the pools. */
673 release_rx_pools(adapter);
674
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700675 adapter->rx_pool = kcalloc(num_pools,
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400676 sizeof(struct ibmvnic_rx_pool),
677 GFP_KERNEL);
678 if (!adapter->rx_pool) {
679 dev_err(dev, "Failed to allocate rx pools\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -0500680 return -ENOMEM;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400681 }
682
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700683 /* Set num_active_rx_pools early. If we fail below after partial
684 * allocation, release_rx_pools() will know how many to look for.
685 */
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700686 adapter->num_active_rx_pools = num_pools;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600687
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700688 for (i = 0; i < num_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400689 rx_pool = &adapter->rx_pool[i];
690
691 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500692 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700693 i, pool_size, buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400694
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700695 rx_pool->size = pool_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400696 rx_pool->index = i;
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600697 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400698
699 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
700 GFP_KERNEL);
701 if (!rx_pool->free_map) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700702 dev_err(dev, "Couldn't alloc free_map %d\n", i);
Dany Maddenb6ee5662021-12-14 00:17:47 -0500703 rc = -ENOMEM;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700704 goto out_release;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400705 }
706
707 rx_pool->rx_buff = kcalloc(rx_pool->size,
708 sizeof(struct ibmvnic_rx_buff),
709 GFP_KERNEL);
710 if (!rx_pool->rx_buff) {
711 dev_err(dev, "Couldn't alloc rx buffers\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -0500712 rc = -ENOMEM;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700713 goto out_release;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400714 }
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700715 }
716
717 adapter->prev_rx_pool_size = pool_size;
718 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
719
720update_ltb:
721 for (i = 0; i < num_pools; i++) {
722 rx_pool = &adapter->rx_pool[i];
723 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
724 i, rx_pool->size, rx_pool->buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400725
Dany Maddenb6ee5662021-12-14 00:17:47 -0500726 rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
727 rx_pool->size * rx_pool->buff_size);
728 if (rc)
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700729 goto out;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400730
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700731 for (j = 0; j < rx_pool->size; ++j) {
732 struct ibmvnic_rx_buff *rx_buff;
733
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400734 rx_pool->free_map[j] = j;
735
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700736 /* NOTE: Don't clear rx_buff->skb here - will leak
737 * memory! replenish_rx_pool() will reuse skbs or
738 * allocate as necessary.
739 */
740 rx_buff = &rx_pool->rx_buff[j];
741 rx_buff->dma = 0;
742 rx_buff->data = 0;
743 rx_buff->size = 0;
744 rx_buff->pool_index = 0;
745 }
746
747 /* Mark pool "empty" so replenish_rx_pools() will
748 * update the LTB info for each buffer
749 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400750 atomic_set(&rx_pool->available, 0);
751 rx_pool->next_alloc = 0;
752 rx_pool->next_free = 0;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700753 /* replenish_rx_pool() may have called deactivate_rx_pools()
754 * on failover. Ensure pool is active now.
755 */
756 rx_pool->active = 1;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400757 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400758 return 0;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700759out_release:
760 release_rx_pools(adapter);
761out:
762 /* We failed to allocate one or more LTBs or map them on the VIOS.
763 * Hold onto the pools and any LTBs that we did allocate/map.
764 */
Dany Maddenb6ee5662021-12-14 00:17:47 -0500765 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600766}
767
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200768static void release_vpd_data(struct ibmvnic_adapter *adapter)
769{
770 if (!adapter->vpd)
771 return;
772
773 kfree(adapter->vpd->buff);
774 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600775
776 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200777}
778
Thomas Falconfb794212018-03-16 20:00:26 -0500779static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
780 struct ibmvnic_tx_pool *tx_pool)
781{
782 kfree(tx_pool->tx_buff);
783 kfree(tx_pool->free_map);
784 free_long_term_buff(adapter, &tx_pool->long_term_buff);
785}
786
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700787/**
788 * release_tx_pools() - Release any tx pools attached to @adapter.
789 * @adapter: ibmvnic adapter
790 *
791 * Safe to call this multiple times - even if no pools are attached.
792 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400793static void release_tx_pools(struct ibmvnic_adapter *adapter)
794{
John Allen896d8692018-01-18 16:26:31 -0600795 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400796
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700797 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
798 * both NULL or both non-NULL. So we only need to check one.
799 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400800 if (!adapter->tx_pool)
801 return;
802
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600803 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500804 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
805 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400806 }
807
808 kfree(adapter->tx_pool);
809 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500810 kfree(adapter->tso_pool);
811 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600812 adapter->num_active_tx_pools = 0;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700813 adapter->prev_tx_pool_size = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400814}
815
Thomas Falcon32053062018-03-16 20:00:27 -0500816static int init_one_tx_pool(struct net_device *netdev,
817 struct ibmvnic_tx_pool *tx_pool,
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700818 int pool_size, int buf_size)
Thomas Falcon32053062018-03-16 20:00:27 -0500819{
Thomas Falcon32053062018-03-16 20:00:27 -0500820 int i;
821
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700822 tx_pool->tx_buff = kcalloc(pool_size,
Thomas Falcon32053062018-03-16 20:00:27 -0500823 sizeof(struct ibmvnic_tx_buff),
824 GFP_KERNEL);
825 if (!tx_pool->tx_buff)
Dany Maddenb6ee5662021-12-14 00:17:47 -0500826 return -ENOMEM;
Thomas Falcon32053062018-03-16 20:00:27 -0500827
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700828 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700829 if (!tx_pool->free_map) {
830 kfree(tx_pool->tx_buff);
831 tx_pool->tx_buff = NULL;
Dany Maddenb6ee5662021-12-14 00:17:47 -0500832 return -ENOMEM;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700833 }
Thomas Falcon32053062018-03-16 20:00:27 -0500834
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700835 for (i = 0; i < pool_size; i++)
Thomas Falcon32053062018-03-16 20:00:27 -0500836 tx_pool->free_map[i] = i;
837
838 tx_pool->consumer_index = 0;
839 tx_pool->producer_index = 0;
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700840 tx_pool->num_buffers = pool_size;
Thomas Falcon32053062018-03-16 20:00:27 -0500841 tx_pool->buf_size = buf_size;
842
843 return 0;
844}
845
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700846/**
847 * reuse_tx_pools() - Check if the existing tx pools can be reused.
848 * @adapter: ibmvnic adapter
849 *
850 * Check if the existing tx pools in the adapter can be reused. The
851 * pools can be reused if the pool parameters (number of pools,
852 * number of buffers in the pool and mtu) have not changed.
853 *
854 * NOTE: This assumes that all pools have the same number of buffers
855 * which is the case currently. If that changes, we must fix this.
856 *
857 * Return: true if the tx pools can be reused, false otherwise.
858 */
859static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
860{
861 u64 old_num_pools, new_num_pools;
862 u64 old_pool_size, new_pool_size;
863 u64 old_mtu, new_mtu;
864
865 if (!adapter->tx_pool)
866 return false;
867
868 old_num_pools = adapter->num_active_tx_pools;
869 new_num_pools = adapter->num_active_tx_scrqs;
870 old_pool_size = adapter->prev_tx_pool_size;
871 new_pool_size = adapter->req_tx_entries_per_subcrq;
872 old_mtu = adapter->prev_mtu;
873 new_mtu = adapter->req_mtu;
874
Sukadev Bhattiprolu5b085602021-11-30 21:48:36 -0800875 if (old_mtu != new_mtu ||
876 old_num_pools != new_num_pools ||
877 old_pool_size != new_pool_size)
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700878 return false;
879
880 return true;
881}
882
883/**
884 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
885 * @netdev: net device associated with the vnic interface
886 *
887 * Initialize the set of transmit pools in the ibmvnic adapter associated
888 * with the net_device @netdev. If possible, reuse the existing tx pools.
889 * Otherwise free any existing pools and allocate a new set of pools
890 * before initializing them.
891 *
892 * Return: 0 on success and negative value on error.
893 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400894static int init_tx_pools(struct net_device *netdev)
895{
896 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700897 struct device *dev = &adapter->vdev->dev;
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700898 int num_pools;
899 u64 pool_size; /* # of buffers in pool */
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600900 u64 buff_size;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700901 int i, j, rc;
902
903 num_pools = adapter->req_tx_queues;
904
905 /* We must notify the VIOS about the LTB on all resets - but we only
906 * need to alloc/populate pools if either the number of buffers or
907 * size of each buffer in the pool has changed.
908 */
909 if (reuse_tx_pools(adapter)) {
910 netdev_dbg(netdev, "Reusing tx pools\n");
911 goto update_ltb;
912 }
913
914 /* Allocate/populate the pools. */
915 release_tx_pools(adapter);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400916
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700917 pool_size = adapter->req_tx_entries_per_subcrq;
918 num_pools = adapter->num_active_tx_scrqs;
919
920 adapter->tx_pool = kcalloc(num_pools,
Nathan Fontenotc657e322017-03-30 02:49:06 -0400921 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
922 if (!adapter->tx_pool)
Dany Maddenb6ee5662021-12-14 00:17:47 -0500923 return -ENOMEM;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400924
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700925 adapter->tso_pool = kcalloc(num_pools,
Thomas Falcon32053062018-03-16 20:00:27 -0500926 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700927 /* To simplify release_tx_pools() ensure that ->tx_pool and
928 * ->tso_pool are either both NULL or both non-NULL.
929 */
Sukadev Bhattiproluf6ebca82021-06-23 21:13:15 -0700930 if (!adapter->tso_pool) {
931 kfree(adapter->tx_pool);
932 adapter->tx_pool = NULL;
Dany Maddenb6ee5662021-12-14 00:17:47 -0500933 return -ENOMEM;
Sukadev Bhattiproluf6ebca82021-06-23 21:13:15 -0700934 }
Thomas Falcon32053062018-03-16 20:00:27 -0500935
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700936 /* Set num_active_tx_pools early. If we fail below after partial
937 * allocation, release_tx_pools() will know how many to look for.
938 */
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700939 adapter->num_active_tx_pools = num_pools;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700940
Sukadev Bhattiprolu0d1af4f2021-09-14 20:52:55 -0700941 buff_size = adapter->req_mtu + VLAN_HLEN;
942 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600943
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700944 for (i = 0; i < num_pools; i++) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700945 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
946 i, adapter->req_tx_entries_per_subcrq, buff_size);
947
Thomas Falcon32053062018-03-16 20:00:27 -0500948 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700949 pool_size, buff_size);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700950 if (rc)
951 goto out_release;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400952
Thomas Falcon7c940b12019-06-07 16:03:55 -0500953 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
954 IBMVNIC_TSO_BUFS,
955 IBMVNIC_TSO_BUF_SZ);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700956 if (rc)
957 goto out_release;
958 }
959
960 adapter->prev_tx_pool_size = pool_size;
961 adapter->prev_mtu = adapter->req_mtu;
962
963update_ltb:
964 /* NOTE: All tx_pools have the same number of buffers (which is
965 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
966 * buffers (see calls init_one_tx_pool() for these).
967 * For consistency, we use tx_pool->num_buffers and
968 * tso_pool->num_buffers below.
969 */
970 rc = -1;
971 for (i = 0; i < num_pools; i++) {
972 struct ibmvnic_tx_pool *tso_pool;
973 struct ibmvnic_tx_pool *tx_pool;
974 u32 ltb_size;
975
976 tx_pool = &adapter->tx_pool[i];
977 ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
978 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
979 ltb_size))
980 goto out;
981
982 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
983 i, tx_pool->long_term_buff.buff,
984 tx_pool->num_buffers, tx_pool->buf_size);
985
986 tx_pool->consumer_index = 0;
987 tx_pool->producer_index = 0;
988
989 for (j = 0; j < tx_pool->num_buffers; j++)
990 tx_pool->free_map[j] = j;
991
992 tso_pool = &adapter->tso_pool[i];
993 ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
994 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
995 ltb_size))
996 goto out;
997
998 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
999 i, tso_pool->long_term_buff.buff,
1000 tso_pool->num_buffers, tso_pool->buf_size);
1001
1002 tso_pool->consumer_index = 0;
1003 tso_pool->producer_index = 0;
1004
1005 for (j = 0; j < tso_pool->num_buffers; j++)
1006 tso_pool->free_map[j] = j;
Nathan Fontenotc657e322017-03-30 02:49:06 -04001007 }
1008
1009 return 0;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07001010out_release:
1011 release_tx_pools(adapter);
1012out:
1013 /* We failed to allocate one or more LTBs or map them on the VIOS.
1014 * Hold onto the pools and any LTBs that we did allocate/map.
1015 */
1016 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -04001017}
1018
John Allend944c3d62017-05-26 10:30:13 -04001019static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1020{
1021 int i;
1022
1023 if (adapter->napi_enabled)
1024 return;
1025
1026 for (i = 0; i < adapter->req_rx_queues; i++)
1027 napi_enable(&adapter->napi[i]);
1028
1029 adapter->napi_enabled = true;
1030}
1031
1032static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1033{
1034 int i;
1035
1036 if (!adapter->napi_enabled)
1037 return;
1038
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001039 for (i = 0; i < adapter->req_rx_queues; i++) {
1040 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -04001041 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001042 }
John Allend944c3d62017-05-26 10:30:13 -04001043
1044 adapter->napi_enabled = false;
1045}
1046
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001047static int init_napi(struct ibmvnic_adapter *adapter)
1048{
1049 int i;
1050
1051 adapter->napi = kcalloc(adapter->req_rx_queues,
1052 sizeof(struct napi_struct), GFP_KERNEL);
1053 if (!adapter->napi)
1054 return -ENOMEM;
1055
1056 for (i = 0; i < adapter->req_rx_queues; i++) {
1057 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1058 netif_napi_add(adapter->netdev, &adapter->napi[i],
1059 ibmvnic_poll, NAPI_POLL_WEIGHT);
1060 }
1061
Nathan Fontenot82e3be32018-02-21 21:33:56 -06001062 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001063 return 0;
1064}
1065
1066static void release_napi(struct ibmvnic_adapter *adapter)
1067{
1068 int i;
1069
1070 if (!adapter->napi)
1071 return;
1072
Nathan Fontenot82e3be32018-02-21 21:33:56 -06001073 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +08001074 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1075 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001076 }
1077
1078 kfree(adapter->napi);
1079 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06001080 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -05001081 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001082}
1083
Lijun Pan0666ef72021-04-12 02:41:28 -05001084static const char *adapter_state_to_string(enum vnic_state state)
1085{
1086 switch (state) {
1087 case VNIC_PROBING:
1088 return "PROBING";
1089 case VNIC_PROBED:
1090 return "PROBED";
1091 case VNIC_OPENING:
1092 return "OPENING";
1093 case VNIC_OPEN:
1094 return "OPEN";
1095 case VNIC_CLOSING:
1096 return "CLOSING";
1097 case VNIC_CLOSED:
1098 return "CLOSED";
1099 case VNIC_REMOVING:
1100 return "REMOVING";
1101 case VNIC_REMOVED:
1102 return "REMOVED";
Lijun Pan822ebc22021-06-11 10:35:37 -05001103 case VNIC_DOWN:
1104 return "DOWN";
Lijun Pan0666ef72021-04-12 02:41:28 -05001105 }
Michal Suchanek07b5dc12021-05-20 08:50:34 +02001106 return "UNKNOWN";
Lijun Pan0666ef72021-04-12 02:41:28 -05001107}
1108
John Allena57a5d22017-03-17 17:13:41 -05001109static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001110{
1111 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Dany Madden98c41f02020-11-25 18:04:32 -06001112 unsigned long timeout = msecs_to_jiffies(20000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001113 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -05001114 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -05001115 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -05001116 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001117
John Allenbd0b6722017-03-17 17:13:40 -05001118 do {
Thomas Falconeb110412018-05-24 14:37:53 -05001119 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -05001120 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001121 netdev_warn(netdev, "Login attempts exceeded\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05001122 return -EACCES;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001123 }
1124
1125 adapter->init_done_rc = 0;
1126 reinit_completion(&adapter->init_done);
1127 rc = send_login(adapter);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06001128 if (rc)
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001129 return rc;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001130
1131 if (!wait_for_completion_timeout(&adapter->init_done,
1132 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -05001133 netdev_warn(netdev, "Login timed out, retrying...\n");
1134 retry = true;
1135 adapter->init_done_rc = 0;
1136 retry_count++;
1137 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001138 }
1139
Thomas Falcondff515a32020-06-15 10:29:23 -05001140 if (adapter->init_done_rc == ABORTED) {
1141 netdev_warn(netdev, "Login aborted, retrying...\n");
1142 retry = true;
1143 adapter->init_done_rc = 0;
1144 retry_count++;
1145 /* FW or device may be busy, so
1146 * wait a bit before retrying login
1147 */
1148 msleep(500);
1149 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001150 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06001151 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -05001152
Thomas Falconeb110412018-05-24 14:37:53 -05001153 retry = true;
1154 netdev_dbg(netdev,
1155 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001156 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -05001157 reinit_completion(&adapter->init_done);
Lijun Pan491099a2020-09-27 20:13:26 -05001158 send_query_cap(adapter);
John Allenbd0b6722017-03-17 17:13:40 -05001159 if (!wait_for_completion_timeout(&adapter->init_done,
1160 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001161 netdev_warn(netdev,
1162 "Capabilities query timed out\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05001163 return -ETIMEDOUT;
John Allenbd0b6722017-03-17 17:13:40 -05001164 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001165
Thomas Falcon4d96f122017-08-01 15:04:36 -05001166 rc = init_sub_crqs(adapter);
1167 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001168 netdev_warn(netdev,
1169 "SCRQ initialization failed\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05001170 return rc;
Thomas Falcon4d96f122017-08-01 15:04:36 -05001171 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001172
Thomas Falcon4d96f122017-08-01 15:04:36 -05001173 rc = init_sub_crq_irqs(adapter);
1174 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001175 netdev_warn(netdev,
1176 "SCRQ irq initialization failed\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05001177 return rc;
Thomas Falcon4d96f122017-08-01 15:04:36 -05001178 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001179 } else if (adapter->init_done_rc) {
Dany Maddenb6ee5662021-12-14 00:17:47 -05001180 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1181 adapter->init_done_rc);
1182 return -EIO;
John Allenbd0b6722017-03-17 17:13:40 -05001183 }
Thomas Falconeb110412018-05-24 14:37:53 -05001184 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -05001185
Thomas Falcon62740e92019-05-09 23:13:43 -05001186 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -06001187
Lijun Pan0666ef72021-04-12 02:41:28 -05001188 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
John Allena57a5d22017-03-17 17:13:41 -05001189 return 0;
1190}
1191
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06001192static void release_login_buffer(struct ibmvnic_adapter *adapter)
1193{
1194 kfree(adapter->login_buf);
1195 adapter->login_buf = NULL;
1196}
1197
1198static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1199{
1200 kfree(adapter->login_rsp_buf);
1201 adapter->login_rsp_buf = NULL;
1202}
1203
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001204static void release_resources(struct ibmvnic_adapter *adapter)
1205{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001206 release_vpd_data(adapter);
1207
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001208 release_napi(adapter);
Lijun Pana0c8be52020-12-19 15:39:19 -06001209 release_login_buffer(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06001210 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001211}
1212
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001213static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1214{
1215 struct net_device *netdev = adapter->netdev;
Dany Madden98c41f02020-11-25 18:04:32 -06001216 unsigned long timeout = msecs_to_jiffies(20000);
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001217 union ibmvnic_crq crq;
1218 bool resend;
1219 int rc;
1220
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001221 netdev_dbg(netdev, "setting link state %d\n", link_state);
1222
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001223 memset(&crq, 0, sizeof(crq));
1224 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1225 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1226 crq.logical_link_state.link_state = link_state;
1227
1228 do {
1229 resend = false;
1230
1231 reinit_completion(&adapter->init_done);
1232 rc = ibmvnic_send_crq(adapter, &crq);
1233 if (rc) {
1234 netdev_err(netdev, "Failed to set link state\n");
1235 return rc;
1236 }
1237
1238 if (!wait_for_completion_timeout(&adapter->init_done,
1239 timeout)) {
1240 netdev_err(netdev, "timeout setting link state\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05001241 return -ETIMEDOUT;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001242 }
1243
Lijun Pan4c5f6af2020-08-19 17:52:23 -05001244 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001245 /* Partuial success, delay and re-send */
1246 mdelay(1000);
1247 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -05001248 } else if (adapter->init_done_rc) {
1249 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1250 adapter->init_done_rc);
1251 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001252 }
1253 } while (resend);
1254
1255 return 0;
1256}
1257
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001258static int set_real_num_queues(struct net_device *netdev)
1259{
1260 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1261 int rc;
1262
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001263 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1264 adapter->req_tx_queues, adapter->req_rx_queues);
1265
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001266 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1267 if (rc) {
1268 netdev_err(netdev, "failed to set the number of tx queues\n");
1269 return rc;
1270 }
1271
1272 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1273 if (rc)
1274 netdev_err(netdev, "failed to set the number of rx queues\n");
1275
1276 return rc;
1277}
1278
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001279static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1280{
1281 struct device *dev = &adapter->vdev->dev;
1282 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001283 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001284 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001285
1286 if (adapter->vpd->buff)
1287 len = adapter->vpd->len;
1288
Thomas Falconff25dcb2019-11-25 17:12:56 -06001289 mutex_lock(&adapter->fw_lock);
1290 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001291 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001292
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001293 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1294 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001295 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001296 if (rc) {
1297 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001298 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001299 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001300
1301 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1302 if (rc) {
1303 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001304 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001305 return rc;
1306 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001307 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001308
1309 if (!adapter->vpd->len)
1310 return -ENODATA;
1311
1312 if (!adapter->vpd->buff)
1313 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1314 else if (adapter->vpd->len != len)
1315 adapter->vpd->buff =
1316 krealloc(adapter->vpd->buff,
1317 adapter->vpd->len, GFP_KERNEL);
1318
1319 if (!adapter->vpd->buff) {
1320 dev_err(dev, "Could allocate VPD buffer\n");
1321 return -ENOMEM;
1322 }
1323
1324 adapter->vpd->dma_addr =
1325 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1326 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001327 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001328 dev_err(dev, "Could not map VPD buffer\n");
1329 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001330 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001331 return -ENOMEM;
1332 }
1333
Thomas Falconff25dcb2019-11-25 17:12:56 -06001334 mutex_lock(&adapter->fw_lock);
1335 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001336 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001337
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001338 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1339 crq.get_vpd.cmd = GET_VPD;
1340 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1341 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001342 rc = ibmvnic_send_crq(adapter, &crq);
1343 if (rc) {
1344 kfree(adapter->vpd->buff);
1345 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001346 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001347 return rc;
1348 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001349
1350 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1351 if (rc) {
1352 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1353 kfree(adapter->vpd->buff);
1354 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001355 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001356 return rc;
1357 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001358
Thomas Falconff25dcb2019-11-25 17:12:56 -06001359 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001360 return 0;
1361}
1362
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001363static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001364{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001365 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001366 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001367
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001368 rc = set_real_num_queues(netdev);
1369 if (rc)
1370 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001371
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001372 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1373 if (!adapter->vpd)
1374 return -ENOMEM;
1375
John Allen69d08dc2018-01-18 16:27:58 -06001376 /* Vital Product Data (VPD) */
1377 rc = ibmvnic_get_vpd(adapter);
1378 if (rc) {
1379 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1380 return rc;
1381 }
1382
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001383 rc = init_napi(adapter);
1384 if (rc)
1385 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001386
Lijun Pan69980d02020-09-27 20:13:28 -05001387 send_query_map(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001388
1389 rc = init_rx_pools(netdev);
1390 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001391 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001392
Nathan Fontenotc657e322017-03-30 02:49:06 -04001393 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001394 return rc;
1395}
1396
Nathan Fontenoted651a12017-05-03 14:04:38 -04001397static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001398{
1399 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001400 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001401 int i, rc;
1402
Nathan Fontenot90c80142017-05-03 14:04:32 -04001403 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001404 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001405 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001406
Thomas Falcon032c5e82015-12-21 11:26:06 -06001407 /* We're ready to receive frames, enable the sub-crq interrupts and
1408 * set the logical link state to up
1409 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001410 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001411 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001412 if (prev_state == VNIC_CLOSED)
1413 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001414 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001415 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001416
Nathan Fontenoted651a12017-05-03 14:04:38 -04001417 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001418 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001419 if (prev_state == VNIC_CLOSED)
1420 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001421 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Thomas Falcon0d973382020-11-18 19:12:19 -06001422 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
Nathan Fontenoted651a12017-05-03 14:04:38 -04001423 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001424
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001425 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001426 if (rc) {
Lijun Pan0775ebc2021-04-14 02:46:14 -05001427 ibmvnic_napi_disable(adapter);
Sukadev Bhattiprolu61772b02022-02-07 16:19:18 -08001428 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001429 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001430 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001431
Nathan Fontenoted651a12017-05-03 14:04:38 -04001432 netif_tx_start_all_queues(netdev);
1433
Dany Madden2ca220f2021-06-23 21:13:11 -07001434 if (prev_state == VNIC_CLOSED) {
1435 for (i = 0; i < adapter->req_rx_queues; i++)
1436 napi_schedule(&adapter->napi[i]);
1437 }
1438
Nathan Fontenoted651a12017-05-03 14:04:38 -04001439 adapter->state = VNIC_OPEN;
1440 return rc;
1441}
1442
1443static int ibmvnic_open(struct net_device *netdev)
1444{
1445 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001446 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001447
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001448 ASSERT_RTNL();
1449
1450 /* If device failover is pending or we are about to reset, just set
1451 * device state and return. Device operation will be handled by reset
1452 * routine.
1453 *
1454 * It should be safe to overwrite the adapter->state here. Since
1455 * we hold the rtnl, either the reset has not actually started or
1456 * the rtnl got dropped during the set_link_state() in do_reset().
1457 * In the former case, no one else is changing the state (again we
1458 * have the rtnl) and in the latter case, do_reset() will detect and
1459 * honor our setting below.
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001460 */
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001461 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
Lijun Pan0666ef72021-04-12 02:41:28 -05001462 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1463 adapter_state_to_string(adapter->state),
1464 adapter->failover_pending);
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001465 adapter->state = VNIC_OPEN;
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001466 rc = 0;
1467 goto out;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001468 }
1469
Nathan Fontenoted651a12017-05-03 14:04:38 -04001470 if (adapter->state != VNIC_CLOSED) {
1471 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001472 if (rc)
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001473 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001474
1475 rc = init_resources(adapter);
1476 if (rc) {
1477 netdev_err(netdev, "failed to initialize resources\n");
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001478 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001479 }
1480 }
1481
1482 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001483
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001484out:
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001485 /* If open failed and there is a pending failover or in-progress reset,
1486 * set device state and return. Device operation will be handled by
1487 * reset routine. See also comments above regarding rtnl.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001488 */
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001489 if (rc &&
1490 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001491 adapter->state = VNIC_OPEN;
1492 rc = 0;
1493 }
Sukadev Bhattiprolu61772b02022-02-07 16:19:18 -08001494
1495 if (rc) {
1496 release_resources(adapter);
1497 release_rx_pools(adapter);
1498 release_tx_pools(adapter);
1499 }
1500
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001501 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001502}
1503
Thomas Falcond0869c02018-02-13 18:23:43 -06001504static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1505{
1506 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001507 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001508 u64 rx_entries;
1509 int rx_scrqs;
1510 int i, j;
1511
1512 if (!adapter->rx_pool)
1513 return;
1514
Thomas Falcon660e3092018-04-20 14:25:32 -05001515 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001516 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1517
1518 /* Free any remaining skbs in the rx buffer pools */
1519 for (i = 0; i < rx_scrqs; i++) {
1520 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001521 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001522 continue;
1523
1524 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1525 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001526 rx_buff = &rx_pool->rx_buff[j];
1527 if (rx_buff && rx_buff->skb) {
1528 dev_kfree_skb_any(rx_buff->skb);
1529 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001530 }
1531 }
1532 }
1533}
1534
Thomas Falcone9e1e972018-03-16 20:00:30 -05001535static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1536 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001537{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001538 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001539 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001540 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001541
Dan Carpenter050e85c2018-03-23 14:36:15 +03001542 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001543 return;
1544
1545 tx_entries = tx_pool->num_buffers;
1546
1547 for (i = 0; i < tx_entries; i++) {
1548 tx_buff = &tx_pool->tx_buff[i];
1549 if (tx_buff && tx_buff->skb) {
1550 dev_kfree_skb_any(tx_buff->skb);
1551 tx_buff->skb = NULL;
1552 }
1553 }
1554}
1555
1556static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1557{
1558 int tx_scrqs;
1559 int i;
1560
1561 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001562 return;
1563
Thomas Falcon660e3092018-04-20 14:25:32 -05001564 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001565
1566 /* Free any remaining skbs in the tx buffer pools */
1567 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001568 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001569 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1570 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001571 }
1572}
1573
John Allen6095e592018-03-30 13:44:21 -05001574static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001575{
John Allen6095e592018-03-30 13:44:21 -05001576 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001577 int i;
1578
Nathan Fontenot46293b92017-05-03 14:05:02 -04001579 if (adapter->tx_scrq) {
1580 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001581 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001582 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001583 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001584 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001585 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001586 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001587 }
1588
Nathan Fontenot46293b92017-05-03 14:05:02 -04001589 if (adapter->rx_scrq) {
1590 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001591 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001592 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001593 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001594 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001595 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001596 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001597 }
1598 }
John Allen6095e592018-03-30 13:44:21 -05001599}
1600
1601static void ibmvnic_cleanup(struct net_device *netdev)
1602{
1603 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1604
1605 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001606 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001607 netif_tx_disable(netdev);
1608 else
1609 netif_tx_stop_all_queues(netdev);
1610
1611 ibmvnic_napi_disable(adapter);
1612 ibmvnic_disable_irqs(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001613}
1614
1615static int __ibmvnic_close(struct net_device *netdev)
1616{
1617 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1618 int rc = 0;
1619
1620 adapter->state = VNIC_CLOSING;
1621 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
Nathan Fontenot90c80142017-05-03 14:04:32 -04001622 adapter->state = VNIC_CLOSED;
Sukadev Bhattiprolud4083d32021-02-10 17:41:43 -08001623 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001624}
1625
Nathan Fontenoted651a12017-05-03 14:04:38 -04001626static int ibmvnic_close(struct net_device *netdev)
1627{
1628 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1629 int rc;
1630
Lijun Pan0666ef72021-04-12 02:41:28 -05001631 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1632 adapter_state_to_string(adapter->state),
1633 adapter->failover_pending,
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08001634 adapter->force_reset_recovery);
1635
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001636 /* If device failover is pending, just set device state and return.
1637 * Device operation will be handled by reset routine.
1638 */
1639 if (adapter->failover_pending) {
1640 adapter->state = VNIC_CLOSED;
1641 return 0;
1642 }
1643
Nathan Fontenoted651a12017-05-03 14:04:38 -04001644 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001645 ibmvnic_cleanup(netdev);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07001646 clean_rx_pools(adapter);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07001647 clean_tx_pools(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001648
1649 return rc;
1650}
1651
Thomas Falconad7775d2016-04-01 17:20:34 -05001652/**
1653 * build_hdr_data - creates L2/L3/L4 header data buffer
Lee Jones80708602021-01-15 20:09:03 +00001654 * @hdr_field: bitfield determining needed headers
1655 * @skb: socket buffer
1656 * @hdr_len: array of header lengths
1657 * @hdr_data: buffer to write the header to
Thomas Falconad7775d2016-04-01 17:20:34 -05001658 *
1659 * Reads hdr_field to determine which headers are needed by firmware.
1660 * Builds a buffer containing these headers. Saves individual header
1661 * lengths and total buffer length to be used to build descriptors.
1662 */
1663static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1664 int *hdr_len, u8 *hdr_data)
1665{
1666 int len = 0;
1667 u8 *hdr;
1668
Thomas Falconda75e3b2018-03-12 11:51:02 -05001669 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1670 hdr_len[0] = sizeof(struct vlan_ethhdr);
1671 else
1672 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001673
1674 if (skb->protocol == htons(ETH_P_IP)) {
1675 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1676 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1677 hdr_len[2] = tcp_hdrlen(skb);
1678 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1679 hdr_len[2] = sizeof(struct udphdr);
1680 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1681 hdr_len[1] = sizeof(struct ipv6hdr);
1682 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1683 hdr_len[2] = tcp_hdrlen(skb);
1684 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1685 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001686 } else if (skb->protocol == htons(ETH_P_ARP)) {
1687 hdr_len[1] = arp_hdr_len(skb->dev);
1688 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001689 }
1690
1691 memset(hdr_data, 0, 120);
1692 if ((hdr_field >> 6) & 1) {
1693 hdr = skb_mac_header(skb);
1694 memcpy(hdr_data, hdr, hdr_len[0]);
1695 len += hdr_len[0];
1696 }
1697
1698 if ((hdr_field >> 5) & 1) {
1699 hdr = skb_network_header(skb);
1700 memcpy(hdr_data + len, hdr, hdr_len[1]);
1701 len += hdr_len[1];
1702 }
1703
1704 if ((hdr_field >> 4) & 1) {
1705 hdr = skb_transport_header(skb);
1706 memcpy(hdr_data + len, hdr, hdr_len[2]);
1707 len += hdr_len[2];
1708 }
1709 return len;
1710}
1711
1712/**
1713 * create_hdr_descs - create header and header extension descriptors
Lee Jones80708602021-01-15 20:09:03 +00001714 * @hdr_field: bitfield determining needed headers
1715 * @hdr_data: buffer containing header data
1716 * @len: length of data buffer
1717 * @hdr_len: array of individual header lengths
1718 * @scrq_arr: descriptor array
Thomas Falconad7775d2016-04-01 17:20:34 -05001719 *
1720 * Creates header and, if needed, header extension descriptors and
1721 * places them in a descriptor array, scrq_arr
1722 */
1723
Thomas Falcon2de09682017-10-16 10:02:11 -05001724static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1725 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001726{
1727 union sub_crq hdr_desc;
1728 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001729 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001730 u8 *data, *cur;
1731 int tmp;
1732
1733 while (tmp_len > 0) {
1734 cur = hdr_data + len - tmp_len;
1735
1736 memset(&hdr_desc, 0, sizeof(hdr_desc));
1737 if (cur != hdr_data) {
1738 data = hdr_desc.hdr_ext.data;
1739 tmp = tmp_len > 29 ? 29 : tmp_len;
1740 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1741 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1742 hdr_desc.hdr_ext.len = tmp;
1743 } else {
1744 data = hdr_desc.hdr.data;
1745 tmp = tmp_len > 24 ? 24 : tmp_len;
1746 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1747 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1748 hdr_desc.hdr.len = tmp;
1749 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1750 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1751 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1752 hdr_desc.hdr.flag = hdr_field << 1;
1753 }
1754 memcpy(data, cur, tmp);
1755 tmp_len -= tmp;
1756 *scrq_arr = hdr_desc;
1757 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001758 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001759 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001760
1761 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001762}
1763
1764/**
1765 * build_hdr_descs_arr - build a header descriptor array
Lijun Pan73214a62021-06-11 10:43:39 -05001766 * @skb: tx socket buffer
1767 * @indir_arr: indirect array
Lee Jones80708602021-01-15 20:09:03 +00001768 * @num_entries: number of descriptors to be sent
1769 * @hdr_field: bit field determining which headers will be sent
Thomas Falconad7775d2016-04-01 17:20:34 -05001770 *
1771 * This function will build a TX descriptor array with applicable
1772 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1773 */
1774
Thomas Falconc62aa372020-11-18 19:12:20 -06001775static void build_hdr_descs_arr(struct sk_buff *skb,
1776 union sub_crq *indir_arr,
Thomas Falconad7775d2016-04-01 17:20:34 -05001777 int *num_entries, u8 hdr_field)
1778{
1779 int hdr_len[3] = {0, 0, 0};
Thomas Falconc62aa372020-11-18 19:12:20 -06001780 u8 hdr_data[140] = {0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001781 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001782
Thomas Falconc62aa372020-11-18 19:12:20 -06001783 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1784 hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001785 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconc62aa372020-11-18 19:12:20 -06001786 indir_arr + 1);
Thomas Falconad7775d2016-04-01 17:20:34 -05001787}
1788
Thomas Falcon1f247a62018-03-12 11:51:04 -05001789static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1790 struct net_device *netdev)
1791{
1792 /* For some backing devices, mishandling of small packets
1793 * can result in a loss of connection or TX stall. Device
1794 * architects recommend that no packet should be smaller
1795 * than the minimum MTU value provided to the driver, so
1796 * pad any packets to that length
1797 */
1798 if (skb->len < netdev->min_mtu)
1799 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001800
1801 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001802}
1803
Thomas Falcon0d973382020-11-18 19:12:19 -06001804static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1805 struct ibmvnic_sub_crq_queue *tx_scrq)
1806{
1807 struct ibmvnic_ind_xmit_queue *ind_bufp;
1808 struct ibmvnic_tx_buff *tx_buff;
1809 struct ibmvnic_tx_pool *tx_pool;
1810 union sub_crq tx_scrq_entry;
1811 int queue_num;
1812 int entries;
1813 int index;
1814 int i;
1815
1816 ind_bufp = &tx_scrq->ind_buf;
1817 entries = (u64)ind_bufp->index;
1818 queue_num = tx_scrq->pool_index;
1819
1820 for (i = entries - 1; i >= 0; --i) {
1821 tx_scrq_entry = ind_bufp->indir_arr[i];
1822 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1823 continue;
1824 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1825 if (index & IBMVNIC_TSO_POOL_MASK) {
1826 tx_pool = &adapter->tso_pool[queue_num];
1827 index &= ~IBMVNIC_TSO_POOL_MASK;
1828 } else {
1829 tx_pool = &adapter->tx_pool[queue_num];
1830 }
1831 tx_pool->free_map[tx_pool->consumer_index] = index;
1832 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1833 tx_pool->num_buffers - 1 :
1834 tx_pool->consumer_index - 1;
1835 tx_buff = &tx_pool->tx_buff[index];
1836 adapter->netdev->stats.tx_packets--;
1837 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1838 adapter->tx_stats_buffers[queue_num].packets--;
1839 adapter->tx_stats_buffers[queue_num].bytes -=
1840 tx_buff->skb->len;
1841 dev_kfree_skb_any(tx_buff->skb);
1842 tx_buff->skb = NULL;
1843 adapter->netdev->stats.tx_dropped++;
1844 }
1845 ind_bufp->index = 0;
1846 if (atomic_sub_return(entries, &tx_scrq->used) <=
1847 (adapter->req_tx_entries_per_subcrq / 2) &&
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -07001848 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1849 !test_bit(0, &adapter->resetting)) {
Thomas Falcon0d973382020-11-18 19:12:19 -06001850 netif_wake_subqueue(adapter->netdev, queue_num);
1851 netdev_dbg(adapter->netdev, "Started queue %d\n",
1852 queue_num);
1853 }
1854}
1855
1856static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1857 struct ibmvnic_sub_crq_queue *tx_scrq)
1858{
1859 struct ibmvnic_ind_xmit_queue *ind_bufp;
1860 u64 dma_addr;
1861 u64 entries;
1862 u64 handle;
1863 int rc;
1864
1865 ind_bufp = &tx_scrq->ind_buf;
1866 dma_addr = (u64)ind_bufp->indir_dma;
1867 entries = (u64)ind_bufp->index;
1868 handle = tx_scrq->handle;
1869
1870 if (!entries)
1871 return 0;
1872 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1873 if (rc)
1874 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1875 else
1876 ind_bufp->index = 0;
1877 return 0;
1878}
1879
YueHaibing94b2bb22018-09-18 14:35:47 +08001880static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001881{
1882 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1883 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001884 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001885 struct device *dev = &adapter->vdev->dev;
Thomas Falcon0d973382020-11-18 19:12:19 -06001886 struct ibmvnic_ind_xmit_queue *ind_bufp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001887 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001888 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001889 struct ibmvnic_tx_pool *tx_pool;
1890 unsigned int tx_send_failed = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001891 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001892 unsigned int tx_map_failed = 0;
Thomas Falconc62aa372020-11-18 19:12:20 -06001893 union sub_crq indir_arr[16];
Thomas Falcon032c5e82015-12-21 11:26:06 -06001894 unsigned int tx_dropped = 0;
1895 unsigned int tx_packets = 0;
1896 unsigned int tx_bytes = 0;
1897 dma_addr_t data_dma_addr;
1898 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001899 unsigned long lpar_rc;
1900 union sub_crq tx_crq;
1901 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001902 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001903 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001904 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001905 u8 proto = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001906
1907 tx_scrq = adapter->tx_scrq[queue_num];
1908 txq = netdev_get_tx_queue(netdev, queue_num);
1909 ind_bufp = &tx_scrq->ind_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001910
Juliet Kim7ed5b312019-09-20 16:11:23 -04001911 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001912 dev_kfree_skb_any(skb);
1913
Thomas Falcon032c5e82015-12-21 11:26:06 -06001914 tx_send_failed++;
1915 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001916 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001917 goto out;
1918 }
1919
Thomas Falcon7083a452018-03-12 21:05:26 -05001920 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001921 tx_dropped++;
1922 tx_send_failed++;
1923 ret = NETDEV_TX_OK;
Thomas Falcon0d973382020-11-18 19:12:19 -06001924 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon1f247a62018-03-12 11:51:04 -05001925 goto out;
1926 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001927 if (skb_is_gso(skb))
1928 tx_pool = &adapter->tso_pool[queue_num];
1929 else
1930 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001931
Thomas Falcon032c5e82015-12-21 11:26:06 -06001932 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001933
Thomas Falcon86b61a52018-03-16 20:00:29 -05001934 if (index == IBMVNIC_INVALID_MAP) {
1935 dev_kfree_skb_any(skb);
1936 tx_send_failed++;
1937 tx_dropped++;
Sukadev Bhattiprolubb553622021-07-20 19:34:39 -07001938 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon86b61a52018-03-16 20:00:29 -05001939 ret = NETDEV_TX_OK;
1940 goto out;
1941 }
1942
1943 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1944
Thomas Falcon06b3e352018-03-16 20:00:28 -05001945 offset = index * tx_pool->buf_size;
1946 dst = tx_pool->long_term_buff.buff + offset;
1947 memset(dst, 0, tx_pool->buf_size);
1948 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001949
Thomas Falcon15482052017-10-17 12:36:54 -05001950 if (skb_shinfo(skb)->nr_frags) {
1951 int cur, i;
1952
1953 /* Copy the head */
1954 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1955 cur = skb_headlen(skb);
1956
1957 /* Copy the frags */
1958 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1959 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1960
Christophe JAILLETc3105f82021-04-04 10:54:37 +02001961 memcpy(dst + cur, skb_frag_address(frag),
1962 skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001963 cur += skb_frag_size(frag);
1964 }
1965 } else {
1966 skb_copy_from_linear_data(skb, dst, skb->len);
1967 }
1968
Lijun Pan42557da2021-02-12 20:48:40 -06001969 /* post changes to long_term_buff *dst before VIOS accessing it */
1970 dma_wmb();
1971
Thomas Falcon032c5e82015-12-21 11:26:06 -06001972 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001973 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001974
1975 tx_buff = &tx_pool->tx_buff[index];
1976 tx_buff->skb = skb;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001977 tx_buff->index = index;
1978 tx_buff->pool_index = queue_num;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001979
1980 memset(&tx_crq, 0, sizeof(tx_crq));
1981 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1982 tx_crq.v1.type = IBMVNIC_TX_DESC;
1983 tx_crq.v1.n_crq_elem = 1;
1984 tx_crq.v1.n_sge = 1;
1985 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001986
Thomas Falconfdb06102017-10-17 12:36:55 -05001987 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001988 tx_crq.v1.correlator =
1989 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001990 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001991 tx_crq.v1.correlator = cpu_to_be32(index);
1992 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001993 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1994 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1995
Michał Mirosławe84b4792018-11-07 17:50:52 +01001996 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001997 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1998 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1999 }
2000
2001 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06002002 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2003 proto = ip_hdr(skb)->protocol;
2004 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2005 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2006 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002007 }
2008
Thomas Falcona0dca102018-01-18 19:29:48 -06002009 if (proto == IPPROTO_TCP)
2010 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2011 else if (proto == IPPROTO_UDP)
2012 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2013
Thomas Falconad7775d2016-04-01 17:20:34 -05002014 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002015 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05002016 hdrs += 2;
2017 }
Thomas Falconfdb06102017-10-17 12:36:55 -05002018 if (skb_is_gso(skb)) {
2019 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2020 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2021 hdrs += 2;
2022 }
Thomas Falcon0d973382020-11-18 19:12:19 -06002023
2024 if ((*hdrs >> 7) & 1)
Thomas Falconc62aa372020-11-18 19:12:20 -06002025 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
Thomas Falcon0d973382020-11-18 19:12:19 -06002026
2027 tx_crq.v1.n_crq_elem = num_entries;
2028 tx_buff->num_entries = num_entries;
2029 /* flush buffer if current entry can not fit */
2030 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2031 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2032 if (lpar_rc != H_SUCCESS)
2033 goto tx_flush_err;
Thomas Falconad7775d2016-04-01 17:20:34 -05002034 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04002035
Thomas Falconc62aa372020-11-18 19:12:20 -06002036 indir_arr[0] = tx_crq;
2037 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
Thomas Falcon0d973382020-11-18 19:12:19 -06002038 num_entries * sizeof(struct ibmvnic_generic_scrq));
2039 ind_bufp->index += num_entries;
2040 if (__netdev_tx_sent_queue(txq, skb->len,
2041 netdev_xmit_more() &&
2042 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2043 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2044 if (lpar_rc != H_SUCCESS)
2045 goto tx_err;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002046 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002047
Thomas Falconffc385b2018-02-18 10:08:41 -06002048 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04002049 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06002050 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002051 netif_stop_subqueue(netdev, queue_num);
2052 }
2053
Thomas Falcon032c5e82015-12-21 11:26:06 -06002054 tx_packets++;
2055 tx_bytes += skb->len;
Eric Dumazet53378242021-11-16 19:29:22 -08002056 txq_trans_cond_update(txq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002057 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05002058 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002059
Thomas Falcon0d973382020-11-18 19:12:19 -06002060tx_flush_err:
2061 dev_kfree_skb_any(skb);
2062 tx_buff->skb = NULL;
2063 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2064 tx_pool->num_buffers - 1 :
2065 tx_pool->consumer_index - 1;
2066 tx_dropped++;
2067tx_err:
2068 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2069 dev_err_ratelimited(dev, "tx: send failed\n");
2070
2071 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2072 /* Disable TX and report carrier off if queue is closed
2073 * or pending failover.
2074 * Firmware guarantees that a signal will be sent to the
2075 * driver, triggering a reset or some other action.
2076 */
2077 netif_tx_stop_all_queues(netdev);
2078 netif_carrier_off(netdev);
2079 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002080out:
2081 netdev->stats.tx_dropped += tx_dropped;
2082 netdev->stats.tx_bytes += tx_bytes;
2083 netdev->stats.tx_packets += tx_packets;
2084 adapter->tx_send_failed += tx_send_failed;
2085 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05002086 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2087 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2088 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002089
2090 return ret;
2091}
2092
2093static void ibmvnic_set_multi(struct net_device *netdev)
2094{
2095 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2096 struct netdev_hw_addr *ha;
2097 union ibmvnic_crq crq;
2098
2099 memset(&crq, 0, sizeof(crq));
2100 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2101 crq.request_capability.cmd = REQUEST_CAPABILITY;
2102
2103 if (netdev->flags & IFF_PROMISC) {
2104 if (!adapter->promisc_supported)
2105 return;
2106 } else {
2107 if (netdev->flags & IFF_ALLMULTI) {
2108 /* Accept all multicast */
2109 memset(&crq, 0, sizeof(crq));
2110 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2111 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2112 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2113 ibmvnic_send_crq(adapter, &crq);
2114 } else if (netdev_mc_empty(netdev)) {
2115 /* Reject all multicast */
2116 memset(&crq, 0, sizeof(crq));
2117 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2118 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2119 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2120 ibmvnic_send_crq(adapter, &crq);
2121 } else {
2122 /* Accept one or more multicast(s) */
2123 netdev_for_each_mc_addr(ha, netdev) {
2124 memset(&crq, 0, sizeof(crq));
2125 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2126 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2127 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2128 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2129 ha->addr);
2130 ibmvnic_send_crq(adapter, &crq);
2131 }
2132 }
2133 }
2134}
2135
Thomas Falcon62740e92019-05-09 23:13:43 -05002136static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002137{
2138 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002139 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002140 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002141
Thomas Falcon62740e92019-05-09 23:13:43 -05002142 if (!is_valid_ether_addr(dev_addr)) {
2143 rc = -EADDRNOTAVAIL;
2144 goto err;
2145 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002146
2147 memset(&crq, 0, sizeof(crq));
2148 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2149 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05002150 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06002151
Thomas Falconff25dcb2019-11-25 17:12:56 -06002152 mutex_lock(&adapter->fw_lock);
2153 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06002154 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06002155
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002156 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05002157 if (rc) {
2158 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06002159 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002160 goto err;
2161 }
2162
Thomas Falcon476d96c2019-11-25 17:12:55 -06002163 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002164 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06002165 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05002166 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06002167 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002168 goto err;
2169 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06002170 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002171 return 0;
2172err:
2173 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2174 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002175}
2176
John Allenc26eba02017-10-26 16:23:25 -05002177static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2178{
2179 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2180 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06002181 int rc;
John Allenc26eba02017-10-26 16:23:25 -05002182
Thomas Falcon62740e92019-05-09 23:13:43 -05002183 rc = 0;
Lijun Pan8fc36722020-10-27 17:04:56 -05002184 if (!is_valid_ether_addr(addr->sa_data))
2185 return -EADDRNOTAVAIL;
2186
Jiri Wiesner67eb2112021-03-04 17:18:28 +01002187 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2188 if (adapter->state != VNIC_PROBED)
Thomas Falcon62740e92019-05-09 23:13:43 -05002189 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
John Allenc26eba02017-10-26 16:23:25 -05002190
Thomas Falconf8136142018-01-29 13:45:05 -06002191 return rc;
John Allenc26eba02017-10-26 16:23:25 -05002192}
2193
Lijun Pancaee7bf2021-04-12 02:41:27 -05002194static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2195{
2196 switch (reason) {
2197 case VNIC_RESET_FAILOVER:
2198 return "FAILOVER";
2199 case VNIC_RESET_MOBILITY:
2200 return "MOBILITY";
2201 case VNIC_RESET_FATAL:
2202 return "FATAL";
2203 case VNIC_RESET_NON_FATAL:
2204 return "NON_FATAL";
2205 case VNIC_RESET_TIMEOUT:
2206 return "TIMEOUT";
2207 case VNIC_RESET_CHANGE_PARAM:
2208 return "CHANGE_PARAM";
Lijun Pan822ebc22021-06-11 10:35:37 -05002209 case VNIC_RESET_PASSIVE_INIT:
2210 return "PASSIVE_INIT";
Lijun Pancaee7bf2021-04-12 02:41:27 -05002211 }
Michal Suchanek07b5dc12021-05-20 08:50:34 +02002212 return "UNKNOWN";
Lijun Pancaee7bf2021-04-12 02:41:27 -05002213}
2214
Lee Jones80708602021-01-15 20:09:03 +00002215/*
Nathan Fontenoted651a12017-05-03 14:04:38 -04002216 * do_reset returns zero if we are able to keep processing reset events, or
2217 * non-zero if we hit a fatal error and must halt.
2218 */
2219static int do_reset(struct ibmvnic_adapter *adapter,
2220 struct ibmvnic_rwi *rwi, u32 reset_state)
2221{
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07002222 struct net_device *netdev = adapter->netdev;
John Allen896d8692018-01-18 16:26:31 -06002223 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002224 u64 old_num_rx_slots, old_num_tx_slots;
Lijun Pand3a6abc2021-04-14 02:46:15 -05002225 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002226
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002227 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002228 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2229 adapter_state_to_string(adapter->state),
2230 adapter->failover_pending,
2231 reset_reason_to_string(rwi->reset_reason),
2232 adapter_state_to_string(reset_state));
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002233
Lijun Pan3f5ec372021-01-06 15:35:14 -06002234 adapter->reset_reason = rwi->reset_reason;
2235 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2236 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2237 rtnl_lock();
2238
Lijun Panbab08be2021-02-11 00:43:19 -06002239 /* Now that we have the rtnl lock, clear any pending failover.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002240 * This will ensure ibmvnic_open() has either completed or will
2241 * block until failover is complete.
2242 */
2243 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2244 adapter->failover_pending = false;
Juliet Kimb27507b2019-09-20 16:11:22 -04002245
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002246 /* read the state and check (again) after getting rtnl */
2247 reset_state = adapter->state;
2248
2249 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2250 rc = -EBUSY;
2251 goto out;
2252 }
2253
Nathan Fontenoted651a12017-05-03 14:04:38 -04002254 netif_carrier_off(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002255
John Allen896d8692018-01-18 16:26:31 -06002256 old_num_rx_queues = adapter->req_rx_queues;
2257 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002258 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2259 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06002260
Nathan Fontenot30f79622018-04-06 18:37:06 -05002261 ibmvnic_cleanup(netdev);
2262
Thomas Falcon1f946082019-06-07 16:03:53 -05002263 if (reset_state == VNIC_OPEN &&
2264 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05002265 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Lijun Pan3f5ec372021-01-06 15:35:14 -06002266 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2267 rc = __ibmvnic_close(netdev);
2268 if (rc)
2269 goto out;
2270 } else {
2271 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002272
Lijun Pan3f5ec372021-01-06 15:35:14 -06002273 /* Release the RTNL lock before link state change and
2274 * re-acquire after the link state change to allow
2275 * linkwatch_event to grab the RTNL lock and run during
2276 * a reset.
2277 */
2278 rtnl_unlock();
2279 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2280 rtnl_lock();
2281 if (rc)
2282 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04002283
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002284 if (adapter->state == VNIC_OPEN) {
2285 /* When we dropped rtnl, ibmvnic_open() got
2286 * it and noticed that we are resetting and
2287 * set the adapter state to OPEN. Update our
2288 * new "target" state, and resume the reset
2289 * from VNIC_CLOSING state.
2290 */
2291 netdev_dbg(netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002292 "Open changed state from %s, updating.\n",
2293 adapter_state_to_string(reset_state));
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002294 reset_state = VNIC_OPEN;
2295 adapter->state = VNIC_CLOSING;
2296 }
2297
Lijun Pan3f5ec372021-01-06 15:35:14 -06002298 if (adapter->state != VNIC_CLOSING) {
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002299 /* If someone else changed the adapter state
2300 * when we dropped the rtnl, fail the reset
2301 */
Dany Maddenb6ee5662021-12-14 00:17:47 -05002302 rc = -EAGAIN;
Lijun Pan3f5ec372021-01-06 15:35:14 -06002303 goto out;
2304 }
Lijun Pan3f5ec372021-01-06 15:35:14 -06002305 adapter->state = VNIC_CLOSED;
Juliet Kimb27507b2019-09-20 16:11:22 -04002306 }
Lijun Pan3f5ec372021-01-06 15:35:14 -06002307 }
Juliet Kimb27507b2019-09-20 16:11:22 -04002308
Lijun Pan3f5ec372021-01-06 15:35:14 -06002309 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2310 release_resources(adapter);
2311 release_sub_crqs(adapter, 1);
2312 release_crq_queue(adapter);
John Allenc26eba02017-10-26 16:23:25 -05002313 }
2314
John Allen8cb31cf2017-05-26 10:30:37 -04002315 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2316 /* remove the closed state so when we call open it appears
2317 * we are coming from the probed state.
2318 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002319 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04002320
Lijun Pan3f5ec372021-01-06 15:35:14 -06002321 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2322 rc = init_crq_queue(adapter);
2323 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002324 rc = ibmvnic_reenable_crq_queue(adapter);
2325 release_sub_crqs(adapter, 1);
2326 } else {
2327 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04002328 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002329 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04002330 if (rc)
2331 netdev_err(adapter->netdev,
2332 "Reset failed to enable interrupts. rc=%d\n",
2333 rc);
2334 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05002335 }
2336
2337 if (rc) {
2338 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04002339 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002340 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05002341 }
2342
Lijun Pan635e4422020-08-19 17:52:26 -05002343 rc = ibmvnic_reset_init(adapter, true);
Dany Maddenb6ee5662021-12-14 00:17:47 -05002344 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002345 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002346
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002347 /* If the adapter was in PROBE or DOWN state prior to the reset,
John Allen8cb31cf2017-05-26 10:30:37 -04002348 * exit here.
2349 */
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002350 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002351 rc = 0;
2352 goto out;
2353 }
John Allen8cb31cf2017-05-26 10:30:37 -04002354
2355 rc = ibmvnic_login(netdev);
Lijun Panf78afaa2021-02-11 00:43:20 -06002356 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002357 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002358
Lijun Pan3f5ec372021-01-06 15:35:14 -06002359 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2360 rc = init_resources(adapter);
2361 if (rc)
2362 goto out;
2363 } else if (adapter->req_rx_queues != old_num_rx_queues ||
Juliet Kimb27507b2019-09-20 16:11:22 -04002364 adapter->req_tx_queues != old_num_tx_queues ||
2365 adapter->req_rx_add_entries_per_subcrq !=
2366 old_num_rx_slots ||
2367 adapter->req_tx_entries_per_subcrq !=
Mingming Cao9f134572020-08-25 13:26:41 -04002368 old_num_tx_slots ||
2369 !adapter->rx_pool ||
2370 !adapter->tso_pool ||
2371 !adapter->tx_pool) {
Juliet Kima5681e22018-11-19 15:59:22 -06002372 release_napi(adapter);
2373 release_vpd_data(adapter);
2374
2375 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002376 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002377 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002378
John Allenc26eba02017-10-26 16:23:25 -05002379 } else {
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07002380 rc = init_tx_pools(netdev);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002381 if (rc) {
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07002382 netdev_dbg(netdev,
2383 "init tx pools failed (%d)\n",
Lijun Pan91dc5d22021-02-11 00:43:22 -06002384 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002385 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002386 }
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002387
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07002388 rc = init_rx_pools(netdev);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002389 if (rc) {
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07002390 netdev_dbg(netdev,
2391 "init rx pools failed (%d)\n",
Lijun Pan91dc5d22021-02-11 00:43:22 -06002392 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002393 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002394 }
John Allenc26eba02017-10-26 16:23:25 -05002395 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002396 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002397 }
John Allene676d812018-03-14 10:41:29 -05002398 adapter->state = VNIC_CLOSED;
2399
Juliet Kimb27507b2019-09-20 16:11:22 -04002400 if (reset_state == VNIC_CLOSED) {
2401 rc = 0;
2402 goto out;
2403 }
John Allene676d812018-03-14 10:41:29 -05002404
Nathan Fontenoted651a12017-05-03 14:04:38 -04002405 rc = __ibmvnic_open(netdev);
2406 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002407 rc = IBMVNIC_OPEN_FAILED;
2408 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002409 }
2410
Thomas Falconbe32a242019-06-07 16:03:54 -05002411 /* refresh device's multicast list */
2412 ibmvnic_set_multi(netdev);
2413
Lijun Pan98025bc2020-11-20 16:40:12 -06002414 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
Lijun Pan6be46662020-12-14 15:19:29 -06002415 adapter->reset_reason == VNIC_RESET_MOBILITY)
2416 __netdev_notify_peers(netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002417
Juliet Kimb27507b2019-09-20 16:11:22 -04002418 rc = 0;
2419
2420out:
Dany Madden0cb4bc62020-11-25 18:04:27 -06002421 /* restore the adapter state if reset failed */
2422 if (rc)
2423 adapter->state = reset_state;
Lijun Pan3f5ec372021-01-06 15:35:14 -06002424 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2425 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2426 rtnl_unlock();
Juliet Kimb27507b2019-09-20 16:11:22 -04002427
Lijun Pan0666ef72021-04-12 02:41:28 -05002428 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2429 adapter_state_to_string(adapter->state),
2430 adapter->failover_pending, rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002431 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002432}
2433
Thomas Falcon2770a792018-05-23 13:38:02 -05002434static int do_hard_reset(struct ibmvnic_adapter *adapter,
2435 struct ibmvnic_rwi *rwi, u32 reset_state)
2436{
2437 struct net_device *netdev = adapter->netdev;
2438 int rc;
2439
Lijun Pancaee7bf2021-04-12 02:41:27 -05002440 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2441 reset_reason_to_string(rwi->reset_reason));
Thomas Falcon2770a792018-05-23 13:38:02 -05002442
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002443 /* read the state and check (again) after getting rtnl */
2444 reset_state = adapter->state;
2445
2446 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2447 rc = -EBUSY;
2448 goto out;
2449 }
2450
Thomas Falcon2770a792018-05-23 13:38:02 -05002451 netif_carrier_off(netdev);
2452 adapter->reset_reason = rwi->reset_reason;
2453
2454 ibmvnic_cleanup(netdev);
2455 release_resources(adapter);
2456 release_sub_crqs(adapter, 0);
2457 release_crq_queue(adapter);
2458
2459 /* remove the closed state so when we call open it appears
2460 * we are coming from the probed state.
2461 */
2462 adapter->state = VNIC_PROBED;
2463
Thomas Falconbbd669a2019-04-04 18:58:26 -05002464 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002465 rc = init_crq_queue(adapter);
2466 if (rc) {
2467 netdev_err(adapter->netdev,
2468 "Couldn't initialize crq. rc=%d\n", rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002469 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002470 }
2471
Lijun Pan635e4422020-08-19 17:52:26 -05002472 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002473 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002474 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002475
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002476 /* If the adapter was in PROBE or DOWN state prior to the reset,
Thomas Falcon2770a792018-05-23 13:38:02 -05002477 * exit here.
2478 */
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002479 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002480 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002481
2482 rc = ibmvnic_login(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002483 if (rc)
2484 goto out;
Juliet Kima5681e22018-11-19 15:59:22 -06002485
2486 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002487 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002488 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002489
2490 ibmvnic_disable_irqs(adapter);
2491 adapter->state = VNIC_CLOSED;
2492
2493 if (reset_state == VNIC_CLOSED)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002494 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002495
2496 rc = __ibmvnic_open(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002497 if (rc) {
2498 rc = IBMVNIC_OPEN_FAILED;
2499 goto out;
2500 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002501
Lijun Pan6be46662020-12-14 15:19:29 -06002502 __netdev_notify_peers(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002503out:
2504 /* restore adapter state if reset failed */
2505 if (rc)
2506 adapter->state = reset_state;
Lijun Pan0666ef72021-04-12 02:41:28 -05002507 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2508 adapter_state_to_string(adapter->state),
2509 adapter->failover_pending, rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002510 return rc;
Thomas Falcon2770a792018-05-23 13:38:02 -05002511}
2512
Nathan Fontenoted651a12017-05-03 14:04:38 -04002513static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2514{
2515 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002516 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002517
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002518 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002519
2520 if (!list_empty(&adapter->rwi_list)) {
2521 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2522 list);
2523 list_del(&rwi->list);
2524 } else {
2525 rwi = NULL;
2526 }
2527
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002528 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002529 return rwi;
2530}
2531
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002532/**
2533 * do_passive_init - complete probing when partner device is detected.
2534 * @adapter: ibmvnic_adapter struct
2535 *
2536 * If the ibmvnic device does not have a partner device to communicate with at boot
2537 * and that partner device comes online at a later time, this function is called
2538 * to complete the initialization process of ibmvnic device.
2539 * Caller is expected to hold rtnl_lock().
2540 *
2541 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2542 * in the down state.
2543 * Returns 0 upon success and the device is in PROBED state.
2544 */
2545
2546static int do_passive_init(struct ibmvnic_adapter *adapter)
2547{
2548 unsigned long timeout = msecs_to_jiffies(30000);
2549 struct net_device *netdev = adapter->netdev;
2550 struct device *dev = &adapter->vdev->dev;
2551 int rc;
2552
2553 netdev_dbg(netdev, "Partner device found, probing.\n");
2554
2555 adapter->state = VNIC_PROBING;
2556 reinit_completion(&adapter->init_done);
2557 adapter->init_done_rc = 0;
2558 adapter->crq.active = true;
2559
2560 rc = send_crq_init_complete(adapter);
2561 if (rc)
2562 goto out;
2563
2564 rc = send_version_xchg(adapter);
2565 if (rc)
2566 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2567
2568 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2569 dev_err(dev, "Initialization sequence timed out\n");
2570 rc = -ETIMEDOUT;
2571 goto out;
2572 }
2573
2574 rc = init_sub_crqs(adapter);
2575 if (rc) {
2576 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2577 goto out;
2578 }
2579
2580 rc = init_sub_crq_irqs(adapter);
2581 if (rc) {
2582 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2583 goto init_failed;
2584 }
2585
2586 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2587 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2588 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2589
2590 adapter->state = VNIC_PROBED;
2591 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2592
2593 return 0;
2594
2595init_failed:
2596 release_sub_crqs(adapter, 1);
2597out:
2598 adapter->state = VNIC_DOWN;
2599 return rc;
2600}
2601
Nathan Fontenoted651a12017-05-03 14:04:38 -04002602static void __ibmvnic_reset(struct work_struct *work)
2603{
Nathan Fontenoted651a12017-05-03 14:04:38 -04002604 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002605 bool saved_state = false;
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002606 struct ibmvnic_rwi *tmprwi;
2607 struct ibmvnic_rwi *rwi;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002608 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002609 u32 reset_state;
Sukadev Bhattiproludb9f0e82022-01-21 18:59:18 -08002610 int num_fails = 0;
John Allenc26eba02017-10-26 16:23:25 -05002611 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002612
2613 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002614
Juliet Kim7ed5b312019-09-20 16:11:23 -04002615 if (test_and_set_bit_lock(0, &adapter->resetting)) {
Lijun Pan870e04a2021-04-13 14:33:39 -05002616 queue_delayed_work(system_long_wq,
2617 &adapter->ibmvnic_delayed_reset,
2618 IBMVNIC_RESET_DELAY);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002619 return;
2620 }
2621
Nathan Fontenoted651a12017-05-03 14:04:38 -04002622 rwi = get_next_rwi(adapter);
2623 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002624 spin_lock_irqsave(&adapter->state_lock, flags);
2625
Thomas Falcon36f10312019-08-27 11:10:04 -05002626 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002627 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002628 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002629 kfree(rwi);
2630 rc = EBUSY;
2631 break;
2632 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002633
Juliet Kim7d7195a2020-03-10 09:23:58 -05002634 if (!saved_state) {
2635 reset_state = adapter->state;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002636 saved_state = true;
2637 }
2638 spin_unlock_irqrestore(&adapter->state_lock, flags);
2639
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002640 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2641 rtnl_lock();
2642 rc = do_passive_init(adapter);
2643 rtnl_unlock();
2644 if (!rc)
2645 netif_carrier_on(adapter->netdev);
2646 } else if (adapter->force_reset_recovery) {
Lijun Panbab08be2021-02-11 00:43:19 -06002647 /* Since we are doing a hard reset now, clear the
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002648 * failover_pending flag so we don't ignore any
2649 * future MOBILITY or other resets.
2650 */
2651 adapter->failover_pending = false;
2652
Juliet Kimb27507b2019-09-20 16:11:22 -04002653 /* Transport event occurred during previous reset */
2654 if (adapter->wait_for_reset) {
2655 /* Previous was CHANGE_PARAM; caller locked */
2656 adapter->force_reset_recovery = false;
2657 rc = do_hard_reset(adapter, rwi, reset_state);
2658 } else {
2659 rtnl_lock();
2660 adapter->force_reset_recovery = false;
2661 rc = do_hard_reset(adapter, rwi, reset_state);
2662 rtnl_unlock();
2663 }
Sukadev Bhattiproludb9f0e82022-01-21 18:59:18 -08002664 if (rc)
2665 num_fails++;
2666 else
2667 num_fails = 0;
2668
2669 /* If auto-priority-failover is enabled we can get
2670 * back to back failovers during resets, resulting
2671 * in at least two failed resets (from high-priority
2672 * backing device to low-priority one and then back)
2673 * If resets continue to fail beyond that, give the
2674 * adapter some time to settle down before retrying.
2675 */
2676 if (num_fails >= 3) {
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002677 netdev_dbg(adapter->netdev,
Sukadev Bhattiproludb9f0e82022-01-21 18:59:18 -08002678 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2679 adapter_state_to_string(adapter->state),
2680 num_fails);
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002681 set_current_state(TASK_UNINTERRUPTIBLE);
2682 schedule_timeout(60 * HZ);
2683 }
Lijun Pan1f45dc22020-12-23 14:49:04 -06002684 } else {
Thomas Falcon2770a792018-05-23 13:38:02 -05002685 rc = do_reset(adapter, rwi, reset_state);
2686 }
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002687 tmprwi = rwi;
Dany Maddena86d5c62020-11-25 18:04:31 -06002688 adapter->last_reset_time = jiffies;
Dany Madden0cb4bc62020-11-25 18:04:27 -06002689
Dany Madden18f141b2020-11-25 18:04:25 -06002690 if (rc)
2691 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002692
2693 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002694
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002695 /*
2696 * If there is another reset queued, free the previous rwi
2697 * and process the new reset even if previous reset failed
2698 * (the previous reset could have failed because of a fail
2699 * over for instance, so process the fail over).
2700 *
2701 * If there are no resets queued and the previous reset failed,
2702 * the adapter would be in an undefined state. So retry the
2703 * previous reset as a hard reset.
2704 */
2705 if (rwi)
2706 kfree(tmprwi);
2707 else if (rc)
2708 rwi = tmprwi;
2709
Juliet Kim7ed5b312019-09-20 16:11:23 -04002710 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002711 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
Juliet Kim7ed5b312019-09-20 16:11:23 -04002712 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002713 }
2714
John Allenc26eba02017-10-26 16:23:25 -05002715 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002716 adapter->reset_done_rc = rc;
2717 complete(&adapter->reset_done);
2718 }
2719
Juliet Kim7ed5b312019-09-20 16:11:23 -04002720 clear_bit_unlock(0, &adapter->resetting);
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002721
2722 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002723 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2724 adapter_state_to_string(adapter->state),
2725 adapter->force_reset_recovery,
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002726 adapter->wait_for_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002727}
2728
2729static void __ibmvnic_delayed_reset(struct work_struct *work)
2730{
2731 struct ibmvnic_adapter *adapter;
2732
2733 adapter = container_of(work, struct ibmvnic_adapter,
2734 ibmvnic_delayed_reset.work);
2735 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002736}
2737
Thomas Falconaf894d22018-04-06 18:37:04 -05002738static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2739 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002740{
Thomas Falcon2770a792018-05-23 13:38:02 -05002741 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002742 struct ibmvnic_rwi *rwi, *tmp;
2743 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002744 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002745 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002746
Jakub Kicinskib646acd52021-02-16 22:58:44 -08002747 spin_lock_irqsave(&adapter->rwi_lock, flags);
2748
2749 /* If failover is pending don't schedule any other reset.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002750 * Instead let the failover complete. If there is already a
2751 * a failover reset scheduled, we will detect and drop the
2752 * duplicate reset when walking the ->rwi_list below.
2753 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002754 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002755 adapter->state == VNIC_REMOVED ||
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002756 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002757 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002758 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002759 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002760 }
2761
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002762 if (adapter->state == VNIC_PROBING) {
2763 netdev_warn(netdev, "Adapter reset during probe\n");
Sukadev Bhattiprolu6b278c02021-10-29 15:03:16 -07002764 adapter->init_done_rc = -EAGAIN;
Lijun Pan91dc5d22021-02-11 00:43:22 -06002765 ret = EAGAIN;
Thomas Falconaf894d22018-04-06 18:37:04 -05002766 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002767 }
2768
Wang Hai3e98ae02021-06-10 20:54:17 +08002769 list_for_each_entry(tmp, &adapter->rwi_list, list) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04002770 if (tmp->reset_reason == reason) {
Lijun Pancaee7bf2021-04-12 02:41:27 -05002771 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2772 reset_reason_to_string(reason));
Thomas Falconaf894d22018-04-06 18:37:04 -05002773 ret = EBUSY;
2774 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002775 }
2776 }
2777
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002778 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002779 if (!rwi) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002780 ret = ENOMEM;
2781 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002782 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002783 /* if we just received a transport event,
2784 * flush reset queue and process this reset
2785 */
2786 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2787 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2788 list_del(entry);
2789 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002790 rwi->reset_reason = reason;
2791 list_add_tail(&rwi->list, &adapter->rwi_list);
Lijun Pancaee7bf2021-04-12 02:41:27 -05002792 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2793 reset_reason_to_string(reason));
Lijun Pan870e04a2021-04-13 14:33:39 -05002794 queue_work(system_long_wq, &adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002795
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08002796 ret = 0;
Thomas Falconaf894d22018-04-06 18:37:04 -05002797err:
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08002798 /* ibmvnic_close() below can block, so drop the lock first */
2799 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2800
2801 if (ret == ENOMEM)
2802 ibmvnic_close(netdev);
2803
Thomas Falconaf894d22018-04-06 18:37:04 -05002804 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002805}
2806
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002807static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002808{
2809 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002810
Lijun Pan855a6312020-11-20 16:40:13 -06002811 if (test_bit(0, &adapter->resetting)) {
2812 netdev_err(adapter->netdev,
2813 "Adapter is resetting, skip timeout reset\n");
2814 return;
2815 }
Dany Maddena86d5c62020-11-25 18:04:31 -06002816 /* No queuing up reset until at least 5 seconds (default watchdog val)
2817 * after last reset
2818 */
2819 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2820 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2821 return;
2822 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002823 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002824}
2825
2826static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2827 struct ibmvnic_rx_buff *rx_buff)
2828{
2829 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2830
2831 rx_buff->skb = NULL;
2832
2833 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2834 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2835
2836 atomic_dec(&pool->available);
2837}
2838
2839static int ibmvnic_poll(struct napi_struct *napi, int budget)
2840{
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002841 struct ibmvnic_sub_crq_queue *rx_scrq;
2842 struct ibmvnic_adapter *adapter;
2843 struct net_device *netdev;
2844 int frames_processed;
2845 int scrq_num;
2846
2847 netdev = napi->dev;
2848 adapter = netdev_priv(netdev);
2849 scrq_num = (int)(napi - adapter->napi);
2850 frames_processed = 0;
2851 rx_scrq = adapter->rx_scrq[scrq_num];
Nathan Fontenot152ce472017-05-26 10:30:54 -04002852
Thomas Falcon032c5e82015-12-21 11:26:06 -06002853restart_poll:
2854 while (frames_processed < budget) {
2855 struct sk_buff *skb;
2856 struct ibmvnic_rx_buff *rx_buff;
2857 union sub_crq *next;
2858 u32 length;
2859 u16 offset;
2860 u8 flags = 0;
2861
Juliet Kim7ed5b312019-09-20 16:11:23 -04002862 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002863 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002864 enable_scrq_irq(adapter, rx_scrq);
Thomas Falcon21ecba62017-06-14 23:50:09 -05002865 napi_complete_done(napi, frames_processed);
2866 return frames_processed;
2867 }
2868
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002869 if (!pending_scrq(adapter, rx_scrq))
Thomas Falcon032c5e82015-12-21 11:26:06 -06002870 break;
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002871 next = ibmvnic_next_scrq(adapter, rx_scrq);
Lijun Pan914789a2021-02-11 00:43:21 -06002872 rx_buff = (struct ibmvnic_rx_buff *)
2873 be64_to_cpu(next->rx_comp.correlator);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002874 /* do error checking */
2875 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002876 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2877 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002878 /* free the entry */
2879 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002880 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002881 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002882 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002883 } else if (!rx_buff->skb) {
2884 /* free the entry */
2885 next->rx_comp.first = 0;
2886 remove_buff_from_pool(adapter, rx_buff);
2887 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002888 }
2889
2890 length = be32_to_cpu(next->rx_comp.len);
2891 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2892 flags = next->rx_comp.flags;
2893 skb = rx_buff->skb;
Lijun Pan42557da2021-02-12 20:48:40 -06002894 /* load long_term_buff before copying to skb */
2895 dma_rmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06002896 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2897 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002898
2899 /* VLAN Header has been stripped by the system firmware and
2900 * needs to be inserted by the driver
2901 */
2902 if (adapter->rx_vlan_header_insertion &&
2903 (flags & IBMVNIC_VLAN_STRIPPED))
2904 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2905 ntohs(next->rx_comp.vlan_tci));
2906
Thomas Falcon032c5e82015-12-21 11:26:06 -06002907 /* free the entry */
2908 next->rx_comp.first = 0;
2909 remove_buff_from_pool(adapter, rx_buff);
2910
2911 skb_put(skb, length);
2912 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002913 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002914
2915 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2916 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2917 skb->ip_summed = CHECKSUM_UNNECESSARY;
2918 }
2919
2920 length = skb->len;
2921 napi_gro_receive(napi, skb); /* send it up */
2922 netdev->stats.rx_packets++;
2923 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002924 adapter->rx_stats_buffers[scrq_num].packets++;
2925 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002926 frames_processed++;
2927 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002928
Dwip N. Banerjee41ed0a02020-11-18 19:12:25 -06002929 if (adapter->state != VNIC_CLOSING &&
2930 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2931 adapter->req_rx_add_entries_per_subcrq / 2) ||
2932 frames_processed < budget))
Nathan Fontenot152ce472017-05-26 10:30:54 -04002933 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002934 if (frames_processed < budget) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002935 if (napi_complete_done(napi, frames_processed)) {
2936 enable_scrq_irq(adapter, rx_scrq);
2937 if (pending_scrq(adapter, rx_scrq)) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002938 if (napi_reschedule(napi)) {
2939 disable_scrq_irq(adapter, rx_scrq);
2940 goto restart_poll;
2941 }
2942 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002943 }
2944 }
2945 return frames_processed;
2946}
2947
John Allenc26eba02017-10-26 16:23:25 -05002948static int wait_for_reset(struct ibmvnic_adapter *adapter)
2949{
Thomas Falconaf894d22018-04-06 18:37:04 -05002950 int rc, ret;
2951
John Allenc26eba02017-10-26 16:23:25 -05002952 adapter->fallback.mtu = adapter->req_mtu;
2953 adapter->fallback.rx_queues = adapter->req_rx_queues;
2954 adapter->fallback.tx_queues = adapter->req_tx_queues;
2955 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2956 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2957
Thomas Falcon070eca92019-11-25 17:12:53 -06002958 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002959 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002960 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002961
2962 if (rc) {
2963 ret = rc;
2964 goto out;
2965 }
2966 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2967 if (rc) {
2968 ret = -ENODEV;
2969 goto out;
2970 }
John Allenc26eba02017-10-26 16:23:25 -05002971
Thomas Falconaf894d22018-04-06 18:37:04 -05002972 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002973 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002974 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002975 adapter->desired.mtu = adapter->fallback.mtu;
2976 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2977 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2978 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2979 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2980
Thomas Falcon070eca92019-11-25 17:12:53 -06002981 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002982 adapter->wait_for_reset = true;
2983 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002984 if (rc) {
2985 ret = rc;
2986 goto out;
2987 }
2988 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2989 60000);
2990 if (rc) {
2991 ret = -ENODEV;
2992 goto out;
2993 }
John Allenc26eba02017-10-26 16:23:25 -05002994 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002995out:
John Allenc26eba02017-10-26 16:23:25 -05002996 adapter->wait_for_reset = false;
2997
Thomas Falconaf894d22018-04-06 18:37:04 -05002998 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002999}
3000
John Allen3a807b72017-06-06 16:55:52 -05003001static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3002{
John Allenc26eba02017-10-26 16:23:25 -05003003 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3004
3005 adapter->desired.mtu = new_mtu + ETH_HLEN;
3006
3007 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05003008}
3009
Thomas Falconf10b09e2018-03-12 11:51:05 -05003010static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3011 struct net_device *dev,
3012 netdev_features_t features)
3013{
3014 /* Some backing hardware adapters can not
3015 * handle packets with a MSS less than 224
3016 * or with only one segment.
3017 */
3018 if (skb_is_gso(skb)) {
3019 if (skb_shinfo(skb)->gso_size < 224 ||
3020 skb_shinfo(skb)->gso_segs == 1)
3021 features &= ~NETIF_F_GSO_MASK;
3022 }
3023
3024 return features;
3025}
3026
Thomas Falcon032c5e82015-12-21 11:26:06 -06003027static const struct net_device_ops ibmvnic_netdev_ops = {
3028 .ndo_open = ibmvnic_open,
3029 .ndo_stop = ibmvnic_close,
3030 .ndo_start_xmit = ibmvnic_xmit,
3031 .ndo_set_rx_mode = ibmvnic_set_multi,
3032 .ndo_set_mac_address = ibmvnic_set_mac,
3033 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003034 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05003035 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05003036 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003037};
3038
3039/* ethtool functions */
3040
Philippe Reynes8a433792017-01-07 22:37:29 +01003041static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3042 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003043{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03003044 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3045 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01003046
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03003047 rc = send_query_phys_parms(adapter);
3048 if (rc) {
3049 adapter->speed = SPEED_UNKNOWN;
3050 adapter->duplex = DUPLEX_UNKNOWN;
3051 }
3052 cmd->base.speed = adapter->speed;
3053 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01003054 cmd->base.port = PORT_FIBRE;
3055 cmd->base.phy_address = 0;
3056 cmd->base.autoneg = AUTONEG_ENABLE;
3057
Thomas Falcon032c5e82015-12-21 11:26:06 -06003058 return 0;
3059}
3060
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003061static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003062 struct ethtool_drvinfo *info)
3063{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003064 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3065
Lijun Pan8a96c802021-02-11 00:43:25 -06003066 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3067 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3068 strscpy(info->fw_version, adapter->fw_version,
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003069 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003070}
3071
3072static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3073{
3074 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3075
3076 return adapter->msg_enable;
3077}
3078
3079static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3080{
3081 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3082
3083 adapter->msg_enable = data;
3084}
3085
3086static u32 ibmvnic_get_link(struct net_device *netdev)
3087{
3088 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3089
3090 /* Don't need to send a query because we request a logical link up at
3091 * init and then we wait for link state indications
3092 */
3093 return adapter->logical_link_state;
3094}
3095
3096static void ibmvnic_get_ringparam(struct net_device *netdev,
Hao Chen74624942021-11-18 20:12:43 +08003097 struct ethtool_ringparam *ring,
3098 struct kernel_ethtool_ringparam *kernel_ring,
3099 struct netlink_ext_ack *extack)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003100{
John Allenbc131b32017-08-02 16:46:30 -05003101 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3102
Thomas Falcon723ad912018-09-28 18:38:26 -05003103 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3104 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3105 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3106 } else {
3107 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3108 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3109 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003110 ring->rx_mini_max_pending = 0;
3111 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05003112 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3113 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003114 ring->rx_mini_pending = 0;
3115 ring->rx_jumbo_pending = 0;
3116}
3117
John Allenc26eba02017-10-26 16:23:25 -05003118static int ibmvnic_set_ringparam(struct net_device *netdev,
Hao Chen74624942021-11-18 20:12:43 +08003119 struct ethtool_ringparam *ring,
3120 struct kernel_ethtool_ringparam *kernel_ring,
3121 struct netlink_ext_ack *extack)
John Allenc26eba02017-10-26 16:23:25 -05003122{
3123 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05003124 int ret;
John Allenc26eba02017-10-26 16:23:25 -05003125
Thomas Falcon723ad912018-09-28 18:38:26 -05003126 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05003127 adapter->desired.rx_entries = ring->rx_pending;
3128 adapter->desired.tx_entries = ring->tx_pending;
3129
Thomas Falcon723ad912018-09-28 18:38:26 -05003130 ret = wait_for_reset(adapter);
3131
3132 if (!ret &&
3133 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
3134 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
3135 netdev_info(netdev,
3136 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3137 ring->rx_pending, ring->tx_pending,
3138 adapter->req_rx_add_entries_per_subcrq,
3139 adapter->req_tx_entries_per_subcrq);
3140 return ret;
John Allenc26eba02017-10-26 16:23:25 -05003141}
3142
John Allenc2dbeb62017-08-02 16:47:17 -05003143static void ibmvnic_get_channels(struct net_device *netdev,
3144 struct ethtool_channels *channels)
3145{
3146 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3147
Thomas Falcon723ad912018-09-28 18:38:26 -05003148 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3149 channels->max_rx = adapter->max_rx_queues;
3150 channels->max_tx = adapter->max_tx_queues;
3151 } else {
3152 channels->max_rx = IBMVNIC_MAX_QUEUES;
3153 channels->max_tx = IBMVNIC_MAX_QUEUES;
3154 }
3155
John Allenc2dbeb62017-08-02 16:47:17 -05003156 channels->max_other = 0;
3157 channels->max_combined = 0;
3158 channels->rx_count = adapter->req_rx_queues;
3159 channels->tx_count = adapter->req_tx_queues;
3160 channels->other_count = 0;
3161 channels->combined_count = 0;
3162}
3163
John Allenc26eba02017-10-26 16:23:25 -05003164static int ibmvnic_set_channels(struct net_device *netdev,
3165 struct ethtool_channels *channels)
3166{
3167 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05003168 int ret;
John Allenc26eba02017-10-26 16:23:25 -05003169
Thomas Falcon723ad912018-09-28 18:38:26 -05003170 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05003171 adapter->desired.rx_queues = channels->rx_count;
3172 adapter->desired.tx_queues = channels->tx_count;
3173
Thomas Falcon723ad912018-09-28 18:38:26 -05003174 ret = wait_for_reset(adapter);
3175
3176 if (!ret &&
3177 (adapter->req_rx_queues != channels->rx_count ||
3178 adapter->req_tx_queues != channels->tx_count))
3179 netdev_info(netdev,
3180 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3181 channels->rx_count, channels->tx_count,
3182 adapter->req_rx_queues, adapter->req_tx_queues);
3183 return ret;
John Allenc26eba02017-10-26 16:23:25 -05003184}
3185
Thomas Falcon032c5e82015-12-21 11:26:06 -06003186static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3187{
John Allen3d52b592017-08-02 16:44:14 -05003188 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003189 int i;
3190
Thomas Falcon723ad912018-09-28 18:38:26 -05003191 switch (stringset) {
3192 case ETH_SS_STATS:
3193 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3194 i++, data += ETH_GSTRING_LEN)
3195 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3196
3197 for (i = 0; i < adapter->req_tx_queues; i++) {
3198 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3199 data += ETH_GSTRING_LEN;
3200
3201 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3202 data += ETH_GSTRING_LEN;
3203
3204 snprintf(data, ETH_GSTRING_LEN,
3205 "tx%d_dropped_packets", i);
3206 data += ETH_GSTRING_LEN;
3207 }
3208
3209 for (i = 0; i < adapter->req_rx_queues; i++) {
3210 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3211 data += ETH_GSTRING_LEN;
3212
3213 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3214 data += ETH_GSTRING_LEN;
3215
3216 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3217 data += ETH_GSTRING_LEN;
3218 }
3219 break;
3220
3221 case ETH_SS_PRIV_FLAGS:
3222 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3223 strcpy(data + i * ETH_GSTRING_LEN,
3224 ibmvnic_priv_flags[i]);
3225 break;
3226 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06003227 return;
John Allen3d52b592017-08-02 16:44:14 -05003228 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003229}
3230
3231static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3232{
John Allen3d52b592017-08-02 16:44:14 -05003233 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3234
Thomas Falcon032c5e82015-12-21 11:26:06 -06003235 switch (sset) {
3236 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05003237 return ARRAY_SIZE(ibmvnic_stats) +
3238 adapter->req_tx_queues * NUM_TX_STATS +
3239 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05003240 case ETH_SS_PRIV_FLAGS:
3241 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003242 default:
3243 return -EOPNOTSUPP;
3244 }
3245}
3246
3247static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3248 struct ethtool_stats *stats, u64 *data)
3249{
3250 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3251 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05003252 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003253 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003254
3255 memset(&crq, 0, sizeof(crq));
3256 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3257 crq.request_statistics.cmd = REQUEST_STATISTICS;
3258 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3259 crq.request_statistics.len =
3260 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003261
3262 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06003263 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003264 rc = ibmvnic_send_crq(adapter, &crq);
3265 if (rc)
3266 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06003267 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3268 if (rc)
3269 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003270
3271 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
Lijun Pan91dc5d22021-02-11 00:43:22 -06003272 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3273 (adapter, ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05003274
3275 for (j = 0; j < adapter->req_tx_queues; j++) {
3276 data[i] = adapter->tx_stats_buffers[j].packets;
3277 i++;
3278 data[i] = adapter->tx_stats_buffers[j].bytes;
3279 i++;
3280 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3281 i++;
3282 }
3283
3284 for (j = 0; j < adapter->req_rx_queues; j++) {
3285 data[i] = adapter->rx_stats_buffers[j].packets;
3286 i++;
3287 data[i] = adapter->rx_stats_buffers[j].bytes;
3288 i++;
3289 data[i] = adapter->rx_stats_buffers[j].interrupts;
3290 i++;
3291 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003292}
3293
Thomas Falcon723ad912018-09-28 18:38:26 -05003294static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3295{
3296 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3297
3298 return adapter->priv_flags;
3299}
3300
3301static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3302{
3303 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3304 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3305
3306 if (which_maxes)
3307 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3308 else
3309 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3310
3311 return 0;
3312}
Lijun Pan91dc5d22021-02-11 00:43:22 -06003313
Thomas Falcon032c5e82015-12-21 11:26:06 -06003314static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003315 .get_drvinfo = ibmvnic_get_drvinfo,
3316 .get_msglevel = ibmvnic_get_msglevel,
3317 .set_msglevel = ibmvnic_set_msglevel,
3318 .get_link = ibmvnic_get_link,
3319 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05003320 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05003321 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05003322 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003323 .get_strings = ibmvnic_get_strings,
3324 .get_sset_count = ibmvnic_get_sset_count,
3325 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01003326 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05003327 .get_priv_flags = ibmvnic_get_priv_flags,
3328 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003329};
3330
3331/* Routines for managing CRQs/sCRQs */
3332
Nathan Fontenot57a49432017-05-26 10:31:12 -04003333static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3334 struct ibmvnic_sub_crq_queue *scrq)
3335{
3336 int rc;
3337
Dany Madden9281cf22020-11-25 18:04:26 -06003338 if (!scrq) {
YANG LI862aecb2020-12-30 15:23:14 +08003339 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
Dany Madden9281cf22020-11-25 18:04:26 -06003340 return -EINVAL;
3341 }
3342
Nathan Fontenot57a49432017-05-26 10:31:12 -04003343 if (scrq->irq) {
3344 free_irq(scrq->irq, scrq);
3345 irq_dispose_mapping(scrq->irq);
3346 scrq->irq = 0;
3347 }
3348
Dany Madden9281cf22020-11-25 18:04:26 -06003349 if (scrq->msgs) {
3350 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3351 atomic_set(&scrq->used, 0);
3352 scrq->cur = 0;
Jakub Kicinski55fd59b2020-12-03 15:42:13 -08003353 scrq->ind_buf.index = 0;
Dany Madden9281cf22020-11-25 18:04:26 -06003354 } else {
3355 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3356 return -EINVAL;
3357 }
Nathan Fontenot57a49432017-05-26 10:31:12 -04003358
3359 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3360 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3361 return rc;
3362}
3363
3364static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3365{
3366 int i, rc;
3367
Lijun Pana0faaa22020-11-23 13:35:45 -06003368 if (!adapter->tx_scrq || !adapter->rx_scrq)
3369 return -EINVAL;
3370
Nathan Fontenot57a49432017-05-26 10:31:12 -04003371 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003372 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04003373 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3374 if (rc)
3375 return rc;
3376 }
3377
3378 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003379 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04003380 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3381 if (rc)
3382 return rc;
3383 }
3384
Nathan Fontenot57a49432017-05-26 10:31:12 -04003385 return rc;
3386}
3387
Thomas Falcon032c5e82015-12-21 11:26:06 -06003388static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003389 struct ibmvnic_sub_crq_queue *scrq,
3390 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003391{
3392 struct device *dev = &adapter->vdev->dev;
3393 long rc;
3394
3395 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3396
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003397 if (do_h_free) {
3398 /* Close the sub-crqs */
3399 do {
3400 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3401 adapter->vdev->unit_address,
3402 scrq->crq_num);
3403 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003404
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003405 if (rc) {
3406 netdev_err(adapter->netdev,
3407 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3408 scrq->crq_num, rc);
3409 }
Thomas Falconffa73852017-04-19 13:44:29 -04003410 }
3411
Thomas Falconf019fb62020-11-18 19:12:17 -06003412 dma_free_coherent(dev,
3413 IBMVNIC_IND_ARR_SZ,
3414 scrq->ind_buf.indir_arr,
3415 scrq->ind_buf.indir_dma);
3416
Thomas Falcon032c5e82015-12-21 11:26:06 -06003417 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3418 DMA_BIDIRECTIONAL);
3419 free_pages((unsigned long)scrq->msgs, 2);
3420 kfree(scrq);
3421}
3422
3423static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3424 *adapter)
3425{
3426 struct device *dev = &adapter->vdev->dev;
3427 struct ibmvnic_sub_crq_queue *scrq;
3428 int rc;
3429
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003430 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003431 if (!scrq)
3432 return NULL;
3433
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04003434 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003435 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003436 if (!scrq->msgs) {
3437 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3438 goto zero_page_failed;
3439 }
3440
3441 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3442 DMA_BIDIRECTIONAL);
3443 if (dma_mapping_error(dev, scrq->msg_token)) {
3444 dev_warn(dev, "Couldn't map crq queue messages page\n");
3445 goto map_failed;
3446 }
3447
3448 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3449 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3450
3451 if (rc == H_RESOURCE)
3452 rc = ibmvnic_reset_crq(adapter);
3453
3454 if (rc == H_CLOSED) {
3455 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3456 } else if (rc) {
3457 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3458 goto reg_failed;
3459 }
3460
Thomas Falcon032c5e82015-12-21 11:26:06 -06003461 scrq->adapter = adapter;
3462 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falconf019fb62020-11-18 19:12:17 -06003463 scrq->ind_buf.index = 0;
3464
3465 scrq->ind_buf.indir_arr =
3466 dma_alloc_coherent(dev,
3467 IBMVNIC_IND_ARR_SZ,
3468 &scrq->ind_buf.indir_dma,
3469 GFP_KERNEL);
3470
3471 if (!scrq->ind_buf.indir_arr)
3472 goto indir_failed;
3473
Thomas Falcon032c5e82015-12-21 11:26:06 -06003474 spin_lock_init(&scrq->lock);
3475
3476 netdev_dbg(adapter->netdev,
3477 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3478 scrq->crq_num, scrq->hw_irq, scrq->irq);
3479
3480 return scrq;
3481
Thomas Falconf019fb62020-11-18 19:12:17 -06003482indir_failed:
3483 do {
3484 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3485 adapter->vdev->unit_address,
3486 scrq->crq_num);
3487 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003488reg_failed:
3489 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3490 DMA_BIDIRECTIONAL);
3491map_failed:
3492 free_pages((unsigned long)scrq->msgs, 2);
3493zero_page_failed:
3494 kfree(scrq);
3495
3496 return NULL;
3497}
3498
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003499static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003500{
3501 int i;
3502
3503 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003504 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003505 if (!adapter->tx_scrq[i])
3506 continue;
3507
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003508 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3509 i);
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -07003510 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003511 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003512 free_irq(adapter->tx_scrq[i]->irq,
3513 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003514 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003515 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003516 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003517
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003518 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3519 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003520 }
3521
Nathan Fontenot9501df32017-03-15 23:38:07 -04003522 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003523 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003524 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003525 }
3526
3527 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003528 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003529 if (!adapter->rx_scrq[i])
3530 continue;
3531
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003532 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3533 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003534 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003535 free_irq(adapter->rx_scrq[i]->irq,
3536 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003537 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003538 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003539 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003540
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003541 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3542 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003543 }
3544
Nathan Fontenot9501df32017-03-15 23:38:07 -04003545 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003546 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003547 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003548 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003549}
3550
3551static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3552 struct ibmvnic_sub_crq_queue *scrq)
3553{
3554 struct device *dev = &adapter->vdev->dev;
3555 unsigned long rc;
3556
3557 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3558 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3559 if (rc)
3560 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3561 scrq->hw_irq, rc);
3562 return rc;
3563}
3564
3565static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3566 struct ibmvnic_sub_crq_queue *scrq)
3567{
3568 struct device *dev = &adapter->vdev->dev;
3569 unsigned long rc;
3570
3571 if (scrq->hw_irq > 0x100000000ULL) {
3572 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3573 return 1;
3574 }
3575
Juliet Kim7ed5b312019-09-20 16:11:23 -04003576 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003577 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003578 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003579
Juliet Kim284f87d2019-11-20 10:50:03 -05003580 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003581 /* H_EOI would fail with rc = H_FUNCTION when running
3582 * in XIVE mode which is expected, but not an error.
3583 */
Sukadev Bhattiprolu154b3b22021-06-23 21:13:16 -07003584 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003585 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3586 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003587 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003588
Thomas Falcon032c5e82015-12-21 11:26:06 -06003589 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3590 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3591 if (rc)
3592 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3593 scrq->hw_irq, rc);
3594 return rc;
3595}
3596
3597static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3598 struct ibmvnic_sub_crq_queue *scrq)
3599{
3600 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003601 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003602 struct ibmvnic_tx_buff *txbuff;
Thomas Falcon0d973382020-11-18 19:12:19 -06003603 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003604 union sub_crq *next;
3605 int index;
Thomas Falconc62aa372020-11-18 19:12:20 -06003606 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003607
3608restart_loop:
3609 while (pending_scrq(adapter, scrq)) {
3610 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003611 int num_entries = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06003612 int total_bytes = 0;
3613 int num_packets = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003614
3615 next = ibmvnic_next_scrq(adapter, scrq);
3616 for (i = 0; i < next->tx_comp.num_comps; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003617 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003618 if (index & IBMVNIC_TSO_POOL_MASK) {
3619 tx_pool = &adapter->tso_pool[pool];
3620 index &= ~IBMVNIC_TSO_POOL_MASK;
3621 } else {
3622 tx_pool = &adapter->tx_pool[pool];
3623 }
3624
3625 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon0d973382020-11-18 19:12:19 -06003626 num_packets++;
Thomas Falconffc385b2018-02-18 10:08:41 -06003627 num_entries += txbuff->num_entries;
Thomas Falcon0d973382020-11-18 19:12:19 -06003628 if (txbuff->skb) {
3629 total_bytes += txbuff->skb->len;
Lijun Panca09bf72021-04-13 03:33:25 -05003630 if (next->tx_comp.rcs[i]) {
3631 dev_err(dev, "tx error %x\n",
3632 next->tx_comp.rcs[i]);
3633 dev_kfree_skb_irq(txbuff->skb);
3634 } else {
3635 dev_consume_skb_irq(txbuff->skb);
3636 }
Thomas Falcon0d973382020-11-18 19:12:19 -06003637 txbuff->skb = NULL;
3638 } else {
3639 netdev_warn(adapter->netdev,
3640 "TX completion received with NULL socket buffer\n");
3641 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05003642 tx_pool->free_map[tx_pool->producer_index] = index;
3643 tx_pool->producer_index =
3644 (tx_pool->producer_index + 1) %
3645 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003646 }
3647 /* remove tx_comp scrq*/
3648 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003649
Thomas Falcon0d973382020-11-18 19:12:19 -06003650 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3651 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3652
Thomas Falconffc385b2018-02-18 10:08:41 -06003653 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003654 (adapter->req_tx_entries_per_subcrq / 2) &&
3655 __netif_subqueue_stopped(adapter->netdev,
3656 scrq->pool_index)) {
3657 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003658 netdev_dbg(adapter->netdev, "Started queue %d\n",
3659 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003660 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003661 }
3662
3663 enable_scrq_irq(adapter, scrq);
3664
3665 if (pending_scrq(adapter, scrq)) {
3666 disable_scrq_irq(adapter, scrq);
3667 goto restart_loop;
3668 }
3669
3670 return 0;
3671}
3672
3673static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3674{
3675 struct ibmvnic_sub_crq_queue *scrq = instance;
3676 struct ibmvnic_adapter *adapter = scrq->adapter;
3677
3678 disable_scrq_irq(adapter, scrq);
3679 ibmvnic_complete_tx(adapter, scrq);
3680
3681 return IRQ_HANDLED;
3682}
3683
3684static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3685{
3686 struct ibmvnic_sub_crq_queue *scrq = instance;
3687 struct ibmvnic_adapter *adapter = scrq->adapter;
3688
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003689 /* When booting a kdump kernel we can hit pending interrupts
3690 * prior to completing driver initialization.
3691 */
3692 if (unlikely(adapter->state != VNIC_OPEN))
3693 return IRQ_NONE;
3694
John Allen3d52b592017-08-02 16:44:14 -05003695 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3696
Thomas Falcon032c5e82015-12-21 11:26:06 -06003697 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3698 disable_scrq_irq(adapter, scrq);
3699 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3700 }
3701
3702 return IRQ_HANDLED;
3703}
3704
Thomas Falconea22d512016-07-06 15:35:17 -05003705static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3706{
3707 struct device *dev = &adapter->vdev->dev;
3708 struct ibmvnic_sub_crq_queue *scrq;
3709 int i = 0, j = 0;
3710 int rc = 0;
3711
3712 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003713 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3714 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003715 scrq = adapter->tx_scrq[i];
3716 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3717
Michael Ellerman99c17902016-09-10 19:59:05 +10003718 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003719 rc = -EINVAL;
3720 dev_err(dev, "Error mapping irq\n");
3721 goto req_tx_irq_failed;
3722 }
3723
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003724 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3725 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003726 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003727 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003728
3729 if (rc) {
3730 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3731 scrq->irq, rc);
3732 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003733 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003734 }
3735 }
3736
3737 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003738 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3739 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003740 scrq = adapter->rx_scrq[i];
3741 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003742 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003743 rc = -EINVAL;
3744 dev_err(dev, "Error mapping irq\n");
3745 goto req_rx_irq_failed;
3746 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003747 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3748 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003749 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003750 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003751 if (rc) {
3752 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3753 scrq->irq, rc);
3754 irq_dispose_mapping(scrq->irq);
3755 goto req_rx_irq_failed;
3756 }
3757 }
3758 return rc;
3759
3760req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003761 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003762 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3763 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003764 }
Thomas Falconea22d512016-07-06 15:35:17 -05003765 i = adapter->req_tx_queues;
3766req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003767 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003768 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003769 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003770 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003771 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003772 return rc;
3773}
3774
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003775static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003776{
3777 struct device *dev = &adapter->vdev->dev;
3778 struct ibmvnic_sub_crq_queue **allqueues;
3779 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003780 int total_queues;
3781 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003782 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003783
Thomas Falcon032c5e82015-12-21 11:26:06 -06003784 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3785
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003786 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003787 if (!allqueues)
Dany Maddenb6ee5662021-12-14 00:17:47 -05003788 return -ENOMEM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003789
3790 for (i = 0; i < total_queues; i++) {
3791 allqueues[i] = init_sub_crq_queue(adapter);
3792 if (!allqueues[i]) {
3793 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3794 break;
3795 }
3796 registered_queues++;
3797 }
3798
3799 /* Make sure we were able to register the minimum number of queues */
3800 if (registered_queues <
3801 adapter->min_tx_queues + adapter->min_rx_queues) {
3802 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3803 goto tx_failed;
3804 }
3805
3806 /* Distribute the failed allocated queues*/
3807 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3808 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3809 switch (i % 3) {
3810 case 0:
3811 if (adapter->req_rx_queues > adapter->min_rx_queues)
3812 adapter->req_rx_queues--;
3813 else
3814 more++;
3815 break;
3816 case 1:
3817 if (adapter->req_tx_queues > adapter->min_tx_queues)
3818 adapter->req_tx_queues--;
3819 else
3820 more++;
3821 break;
3822 }
3823 }
3824
3825 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003826 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003827 if (!adapter->tx_scrq)
3828 goto tx_failed;
3829
3830 for (i = 0; i < adapter->req_tx_queues; i++) {
3831 adapter->tx_scrq[i] = allqueues[i];
3832 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003833 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003834 }
3835
3836 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003837 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003838 if (!adapter->rx_scrq)
3839 goto rx_failed;
3840
3841 for (i = 0; i < adapter->req_rx_queues; i++) {
3842 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3843 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003844 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003845 }
3846
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003847 kfree(allqueues);
3848 return 0;
3849
3850rx_failed:
3851 kfree(adapter->tx_scrq);
3852 adapter->tx_scrq = NULL;
3853tx_failed:
3854 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003855 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003856 kfree(allqueues);
Dany Maddenb6ee5662021-12-14 00:17:47 -05003857 return -ENOMEM;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003858}
3859
Lijun Pan09081b92020-09-27 20:13:27 -05003860static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003861{
3862 struct device *dev = &adapter->vdev->dev;
3863 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003864 int max_entries;
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003865 int cap_reqs;
3866
3867 /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
3868 * the PROMISC flag). Initialize this count upfront. When the tasklet
3869 * receives a response to all of these, it will send the next protocol
3870 * message (QUERY_IP_OFFLOAD).
3871 */
3872 if (!(adapter->netdev->flags & IFF_PROMISC) ||
3873 adapter->promisc_supported)
3874 cap_reqs = 7;
3875 else
3876 cap_reqs = 6;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003877
3878 if (!retry) {
3879 /* Sub-CRQ entries are 32 byte long */
3880 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3881
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003882 atomic_set(&adapter->running_cap_crqs, cap_reqs);
3883
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003884 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3885 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3886 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3887 return;
3888 }
3889
John Allenc26eba02017-10-26 16:23:25 -05003890 if (adapter->desired.mtu)
3891 adapter->req_mtu = adapter->desired.mtu;
3892 else
3893 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003894
John Allenc26eba02017-10-26 16:23:25 -05003895 if (!adapter->desired.tx_entries)
3896 adapter->desired.tx_entries =
3897 adapter->max_tx_entries_per_subcrq;
3898 if (!adapter->desired.rx_entries)
3899 adapter->desired.rx_entries =
3900 adapter->max_rx_add_entries_per_subcrq;
3901
3902 max_entries = IBMVNIC_MAX_LTB_SIZE /
3903 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3904
3905 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3906 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3907 adapter->desired.tx_entries = max_entries;
3908 }
3909
3910 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3911 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3912 adapter->desired.rx_entries = max_entries;
3913 }
3914
3915 if (adapter->desired.tx_entries)
3916 adapter->req_tx_entries_per_subcrq =
3917 adapter->desired.tx_entries;
3918 else
3919 adapter->req_tx_entries_per_subcrq =
3920 adapter->max_tx_entries_per_subcrq;
3921
3922 if (adapter->desired.rx_entries)
3923 adapter->req_rx_add_entries_per_subcrq =
3924 adapter->desired.rx_entries;
3925 else
3926 adapter->req_rx_add_entries_per_subcrq =
3927 adapter->max_rx_add_entries_per_subcrq;
3928
3929 if (adapter->desired.tx_queues)
3930 adapter->req_tx_queues =
3931 adapter->desired.tx_queues;
3932 else
3933 adapter->req_tx_queues =
3934 adapter->opt_tx_comp_sub_queues;
3935
3936 if (adapter->desired.rx_queues)
3937 adapter->req_rx_queues =
3938 adapter->desired.rx_queues;
3939 else
3940 adapter->req_rx_queues =
3941 adapter->opt_rx_comp_queues;
3942
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003943 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003944 } else {
3945 atomic_add(cap_reqs, &adapter->running_cap_crqs);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003946 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003947 memset(&crq, 0, sizeof(crq));
3948 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3949 crq.request_capability.cmd = REQUEST_CAPABILITY;
3950
3951 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003952 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003953 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003954 ibmvnic_send_crq(adapter, &crq);
3955
3956 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003957 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003958 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003959 ibmvnic_send_crq(adapter, &crq);
3960
3961 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003962 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003963 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003964 ibmvnic_send_crq(adapter, &crq);
3965
3966 crq.request_capability.capability =
3967 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3968 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003969 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003970 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003971 ibmvnic_send_crq(adapter, &crq);
3972
3973 crq.request_capability.capability =
3974 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3975 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003976 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003977 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003978 ibmvnic_send_crq(adapter, &crq);
3979
3980 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003981 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003982 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003983 ibmvnic_send_crq(adapter, &crq);
3984
3985 if (adapter->netdev->flags & IFF_PROMISC) {
3986 if (adapter->promisc_supported) {
3987 crq.request_capability.capability =
3988 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003989 crq.request_capability.number = cpu_to_be64(1);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003990 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003991 ibmvnic_send_crq(adapter, &crq);
3992 }
3993 } else {
3994 crq.request_capability.capability =
3995 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003996 crq.request_capability.number = cpu_to_be64(0);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08003997 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003998 ibmvnic_send_crq(adapter, &crq);
3999 }
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004000
4001 /* Keep at end to catch any discrepancy between expected and actual
4002 * CRQs sent.
4003 */
4004 WARN_ON(cap_reqs != 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004005}
4006
4007static int pending_scrq(struct ibmvnic_adapter *adapter,
4008 struct ibmvnic_sub_crq_queue *scrq)
4009{
4010 union sub_crq *entry = &scrq->msgs[scrq->cur];
Lijun Pan665ab1e2021-01-29 19:19:04 -06004011 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004012
Lijun Pan665ab1e2021-01-29 19:19:04 -06004013 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4014
4015 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4016 * contents of the SCRQ descriptor
4017 */
4018 dma_rmb();
4019
4020 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004021}
4022
4023static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4024 struct ibmvnic_sub_crq_queue *scrq)
4025{
4026 union sub_crq *entry;
4027 unsigned long flags;
4028
4029 spin_lock_irqsave(&scrq->lock, flags);
4030 entry = &scrq->msgs[scrq->cur];
4031 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4032 if (++scrq->cur == scrq->size)
4033 scrq->cur = 0;
4034 } else {
4035 entry = NULL;
4036 }
4037 spin_unlock_irqrestore(&scrq->lock, flags);
4038
Lijun Pan665ab1e2021-01-29 19:19:04 -06004039 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4040 * contents of the SCRQ descriptor
Thomas Falconb71ec952020-12-01 09:52:10 -06004041 */
4042 dma_rmb();
4043
Thomas Falcon032c5e82015-12-21 11:26:06 -06004044 return entry;
4045}
4046
4047static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4048{
4049 struct ibmvnic_crq_queue *queue = &adapter->crq;
4050 union ibmvnic_crq *crq;
4051
4052 crq = &queue->msgs[queue->cur];
4053 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4054 if (++queue->cur == queue->size)
4055 queue->cur = 0;
4056 } else {
4057 crq = NULL;
4058 }
4059
4060 return crq;
4061}
4062
Thomas Falcon2d14d372018-07-13 12:03:32 -05004063static void print_subcrq_error(struct device *dev, int rc, const char *func)
4064{
4065 switch (rc) {
4066 case H_PARAMETER:
4067 dev_warn_ratelimited(dev,
4068 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4069 func, rc);
4070 break;
4071 case H_CLOSED:
4072 dev_warn_ratelimited(dev,
4073 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4074 func, rc);
4075 break;
4076 default:
4077 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4078 break;
4079 }
4080}
4081
Thomas Falconad7775d2016-04-01 17:20:34 -05004082static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4083 u64 remote_handle, u64 ioba, u64 num_entries)
4084{
4085 unsigned int ua = adapter->vdev->unit_address;
4086 struct device *dev = &adapter->vdev->dev;
4087 int rc;
4088
4089 /* Make sure the hypervisor sees the complete request */
Lijun Pan1a421562021-02-12 20:36:46 -06004090 dma_wmb();
Thomas Falconad7775d2016-04-01 17:20:34 -05004091 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4092 cpu_to_be64(remote_handle),
4093 ioba, num_entries);
4094
Thomas Falcon2d14d372018-07-13 12:03:32 -05004095 if (rc)
4096 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05004097
4098 return rc;
4099}
4100
Thomas Falcon032c5e82015-12-21 11:26:06 -06004101static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4102 union ibmvnic_crq *crq)
4103{
4104 unsigned int ua = adapter->vdev->unit_address;
4105 struct device *dev = &adapter->vdev->dev;
4106 u64 *u64_crq = (u64 *)crq;
4107 int rc;
4108
4109 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004110 (unsigned long)cpu_to_be64(u64_crq[0]),
4111 (unsigned long)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004112
Thomas Falcon51536982018-05-23 13:37:56 -05004113 if (!adapter->crq.active &&
4114 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4115 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4116 return -EINVAL;
4117 }
4118
Thomas Falcon032c5e82015-12-21 11:26:06 -06004119 /* Make sure the hypervisor sees the complete request */
Lijun Pan1a421562021-02-12 20:36:46 -06004120 dma_wmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06004121
4122 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4123 cpu_to_be64(u64_crq[0]),
4124 cpu_to_be64(u64_crq[1]));
4125
4126 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06004127 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06004128 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05004129 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06004130 }
4131
Thomas Falcon032c5e82015-12-21 11:26:06 -06004132 dev_warn(dev, "Send error (rc=%d)\n", rc);
4133 }
4134
4135 return rc;
4136}
4137
4138static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4139{
Thomas Falcon36a782f2020-08-31 11:59:57 -05004140 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004141 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05004142 int retries = 100;
4143 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004144
4145 memset(&crq, 0, sizeof(crq));
4146 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4147 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4148 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4149
Thomas Falcon36a782f2020-08-31 11:59:57 -05004150 do {
4151 rc = ibmvnic_send_crq(adapter, &crq);
4152 if (rc != H_CLOSED)
4153 break;
4154 retries--;
4155 msleep(50);
4156
4157 } while (retries > 0);
4158
4159 if (rc) {
4160 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4161 return rc;
4162 }
4163
4164 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004165}
4166
Nathan Fontenot37798d02017-11-08 11:23:56 -06004167struct vnic_login_client_data {
4168 u8 type;
4169 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07004170 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06004171} __packed;
4172
4173static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4174{
4175 int len;
4176
4177 /* Calculate the amount of buffer space needed for the
4178 * vnic client data in the login buffer. There are four entries,
4179 * OS name, LPAR name, device name, and a null last entry.
4180 */
4181 len = 4 * sizeof(struct vnic_login_client_data);
4182 len += 6; /* "Linux" plus NULL */
4183 len += strlen(utsname()->nodename) + 1;
4184 len += strlen(adapter->netdev->name) + 1;
4185
4186 return len;
4187}
4188
4189static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4190 struct vnic_login_client_data *vlcd)
4191{
4192 const char *os_name = "Linux";
4193 int len;
4194
4195 /* Type 1 - LPAR OS */
4196 vlcd->type = 1;
4197 len = strlen(os_name) + 1;
4198 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004199 strscpy(vlcd->name, os_name, len);
Kees Cook08ea5562018-04-10 15:26:43 -07004200 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004201
4202 /* Type 2 - LPAR name */
4203 vlcd->type = 2;
4204 len = strlen(utsname()->nodename) + 1;
4205 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004206 strscpy(vlcd->name, utsname()->nodename, len);
Kees Cook08ea5562018-04-10 15:26:43 -07004207 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004208
4209 /* Type 3 - device name */
4210 vlcd->type = 3;
4211 len = strlen(adapter->netdev->name) + 1;
4212 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004213 strscpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004214}
4215
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004216static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004217{
4218 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4219 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004220 struct device *dev = &adapter->vdev->dev;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004221 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004222 dma_addr_t rsp_buffer_token;
4223 dma_addr_t buffer_token;
4224 size_t rsp_buffer_size;
4225 union ibmvnic_crq crq;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004226 int client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004227 size_t buffer_size;
4228 __be64 *tx_list_p;
4229 __be64 *rx_list_p;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004230 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004231 int i;
4232
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004233 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4234 netdev_err(adapter->netdev,
4235 "RX or TX queues are not allocated, device login failed\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05004236 return -ENOMEM;
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004237 }
4238
Lijun Pana0c8be52020-12-19 15:39:19 -06004239 release_login_buffer(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06004240 release_login_rsp_buffer(adapter);
Lijun Pana0c8be52020-12-19 15:39:19 -06004241
Nathan Fontenot37798d02017-11-08 11:23:56 -06004242 client_data_len = vnic_client_data_len(adapter);
4243
Thomas Falcon032c5e82015-12-21 11:26:06 -06004244 buffer_size =
4245 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06004246 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4247 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004248
Nathan Fontenot37798d02017-11-08 11:23:56 -06004249 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004250 if (!login_buffer)
4251 goto buf_alloc_failed;
4252
4253 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4254 DMA_TO_DEVICE);
4255 if (dma_mapping_error(dev, buffer_token)) {
4256 dev_err(dev, "Couldn't map login buffer\n");
4257 goto buf_map_failed;
4258 }
4259
John Allen498cd8e2016-04-06 11:49:55 -05004260 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4261 sizeof(u64) * adapter->req_tx_queues +
4262 sizeof(u64) * adapter->req_rx_queues +
4263 sizeof(u64) * adapter->req_rx_queues +
4264 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004265
4266 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4267 if (!login_rsp_buffer)
4268 goto buf_rsp_alloc_failed;
4269
4270 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4271 rsp_buffer_size, DMA_FROM_DEVICE);
4272 if (dma_mapping_error(dev, rsp_buffer_token)) {
4273 dev_err(dev, "Couldn't map login rsp buffer\n");
4274 goto buf_rsp_map_failed;
4275 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04004276
Thomas Falcon032c5e82015-12-21 11:26:06 -06004277 adapter->login_buf = login_buffer;
4278 adapter->login_buf_token = buffer_token;
4279 adapter->login_buf_sz = buffer_size;
4280 adapter->login_rsp_buf = login_rsp_buffer;
4281 adapter->login_rsp_buf_token = rsp_buffer_token;
4282 adapter->login_rsp_buf_sz = rsp_buffer_size;
4283
4284 login_buffer->len = cpu_to_be32(buffer_size);
4285 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4286 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4287 login_buffer->off_txcomp_subcrqs =
4288 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4289 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4290 login_buffer->off_rxcomp_subcrqs =
4291 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4292 sizeof(u64) * adapter->req_tx_queues);
4293 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4294 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4295
4296 tx_list_p = (__be64 *)((char *)login_buffer +
4297 sizeof(struct ibmvnic_login_buffer));
4298 rx_list_p = (__be64 *)((char *)login_buffer +
4299 sizeof(struct ibmvnic_login_buffer) +
4300 sizeof(u64) * adapter->req_tx_queues);
4301
4302 for (i = 0; i < adapter->req_tx_queues; i++) {
4303 if (adapter->tx_scrq[i]) {
Lijun Pan914789a2021-02-11 00:43:21 -06004304 tx_list_p[i] =
4305 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004306 }
4307 }
4308
4309 for (i = 0; i < adapter->req_rx_queues; i++) {
4310 if (adapter->rx_scrq[i]) {
Lijun Pan914789a2021-02-11 00:43:21 -06004311 rx_list_p[i] =
4312 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004313 }
4314 }
4315
Nathan Fontenot37798d02017-11-08 11:23:56 -06004316 /* Insert vNIC login client data */
4317 vlcd = (struct vnic_login_client_data *)
4318 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4319 login_buffer->client_data_offset =
4320 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4321 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4322
4323 vnic_add_client_data(adapter, vlcd);
4324
Thomas Falcon032c5e82015-12-21 11:26:06 -06004325 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4326 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4327 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004328 ((unsigned long *)(adapter->login_buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004329 }
4330
4331 memset(&crq, 0, sizeof(crq));
4332 crq.login.first = IBMVNIC_CRQ_CMD;
4333 crq.login.cmd = LOGIN;
4334 crq.login.ioba = cpu_to_be32(buffer_token);
4335 crq.login.len = cpu_to_be32(buffer_size);
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06004336
4337 adapter->login_pending = true;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004338 rc = ibmvnic_send_crq(adapter, &crq);
4339 if (rc) {
4340 adapter->login_pending = false;
4341 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4342 goto buf_rsp_map_failed;
4343 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004344
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004345 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004346
Thomas Falcon032c5e82015-12-21 11:26:06 -06004347buf_rsp_map_failed:
4348 kfree(login_rsp_buffer);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004349 adapter->login_rsp_buf = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004350buf_rsp_alloc_failed:
4351 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4352buf_map_failed:
4353 kfree(login_buffer);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004354 adapter->login_buf = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004355buf_alloc_failed:
Dany Maddenb6ee5662021-12-14 00:17:47 -05004356 return -ENOMEM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004357}
4358
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004359static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4360 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004361{
4362 union ibmvnic_crq crq;
4363
4364 memset(&crq, 0, sizeof(crq));
4365 crq.request_map.first = IBMVNIC_CRQ_CMD;
4366 crq.request_map.cmd = REQUEST_MAP;
4367 crq.request_map.map_id = map_id;
4368 crq.request_map.ioba = cpu_to_be32(addr);
4369 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004370 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004371}
4372
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004373static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004374{
4375 union ibmvnic_crq crq;
4376
4377 memset(&crq, 0, sizeof(crq));
4378 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4379 crq.request_unmap.cmd = REQUEST_UNMAP;
4380 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004381 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004382}
4383
Lijun Pan69980d02020-09-27 20:13:28 -05004384static void send_query_map(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004385{
4386 union ibmvnic_crq crq;
4387
4388 memset(&crq, 0, sizeof(crq));
4389 crq.query_map.first = IBMVNIC_CRQ_CMD;
4390 crq.query_map.cmd = QUERY_MAP;
4391 ibmvnic_send_crq(adapter, &crq);
4392}
4393
4394/* Send a series of CRQs requesting various capabilities of the VNIC server */
Lijun Pan491099a2020-09-27 20:13:26 -05004395static void send_query_cap(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004396{
4397 union ibmvnic_crq crq;
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004398 int cap_reqs;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004399
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004400 /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
4401 * upfront. When the tasklet receives a response to all of these, it
4402 * can send out the next protocol messaage (REQUEST_CAPABILITY).
4403 */
4404 cap_reqs = 25;
4405
4406 atomic_set(&adapter->running_cap_crqs, cap_reqs);
4407
Thomas Falcon032c5e82015-12-21 11:26:06 -06004408 memset(&crq, 0, sizeof(crq));
4409 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4410 crq.query_capability.cmd = QUERY_CAPABILITY;
4411
4412 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004413 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004414 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004415
4416 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004417 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004418 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004419
4420 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004421 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004422 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004423
4424 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004425 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004426 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004427
4428 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004429 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004430 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004431
4432 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004433 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004434 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004435
4436 crq.query_capability.capability =
4437 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004438 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004439 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004440
4441 crq.query_capability.capability =
4442 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004443 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004444 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004445
4446 crq.query_capability.capability =
4447 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004448 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004449 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004450
4451 crq.query_capability.capability =
4452 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004453 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004454 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004455
4456 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004457 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004458 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004459
4460 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004461 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004462 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004463
4464 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004465 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004466 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004467
4468 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004469 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004470 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004471
4472 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004473 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004474 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004475
4476 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004477 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004478 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004479
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004480 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004481 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004482 cap_reqs--;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004483
Thomas Falcon032c5e82015-12-21 11:26:06 -06004484 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004485 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004486 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004487
4488 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004489 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004490 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004491
4492 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004493 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004494 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004495
4496 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004497 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004498 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004499
4500 crq.query_capability.capability =
4501 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004502 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004503 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004504
4505 crq.query_capability.capability =
4506 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004507 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004508 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004509
4510 crq.query_capability.capability =
4511 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004512 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004513 cap_reqs--;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004514
4515 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004516
Thomas Falcon032c5e82015-12-21 11:26:06 -06004517 ibmvnic_send_crq(adapter, &crq);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004518 cap_reqs--;
4519
4520 /* Keep at end to catch any discrepancy between expected and actual
4521 * CRQs sent.
4522 */
4523 WARN_ON(cap_reqs != 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004524}
4525
Lijun Pan16e811f2020-09-27 20:13:29 -05004526static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4527{
4528 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4529 struct device *dev = &adapter->vdev->dev;
4530 union ibmvnic_crq crq;
4531
4532 adapter->ip_offload_tok =
4533 dma_map_single(dev,
4534 &adapter->ip_offload_buf,
4535 buf_sz,
4536 DMA_FROM_DEVICE);
4537
4538 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4539 if (!firmware_has_feature(FW_FEATURE_CMO))
4540 dev_err(dev, "Couldn't map offload buffer\n");
4541 return;
4542 }
4543
4544 memset(&crq, 0, sizeof(crq));
4545 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4546 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4547 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4548 crq.query_ip_offload.ioba =
4549 cpu_to_be32(adapter->ip_offload_tok);
4550
4551 ibmvnic_send_crq(adapter, &crq);
4552}
4553
Lijun Pan46899bd2020-09-27 20:13:30 -05004554static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4555{
4556 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4557 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4558 struct device *dev = &adapter->vdev->dev;
4559 netdev_features_t old_hw_features = 0;
4560 union ibmvnic_crq crq;
4561
4562 adapter->ip_offload_ctrl_tok =
4563 dma_map_single(dev,
4564 ctrl_buf,
4565 sizeof(adapter->ip_offload_ctrl),
4566 DMA_TO_DEVICE);
4567
4568 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4569 dev_err(dev, "Couldn't map ip offload control buffer\n");
4570 return;
4571 }
4572
4573 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4574 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4575 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4576 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4577 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4578 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4579 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4580 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4581 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4582 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4583
4584 /* large_rx disabled for now, additional features needed */
4585 ctrl_buf->large_rx_ipv4 = 0;
4586 ctrl_buf->large_rx_ipv6 = 0;
4587
4588 if (adapter->state != VNIC_PROBING) {
4589 old_hw_features = adapter->netdev->hw_features;
4590 adapter->netdev->hw_features = 0;
4591 }
4592
4593 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4594
4595 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4596 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4597
4598 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4599 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4600
4601 if ((adapter->netdev->features &
4602 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4603 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4604
4605 if (buf->large_tx_ipv4)
4606 adapter->netdev->hw_features |= NETIF_F_TSO;
4607 if (buf->large_tx_ipv6)
4608 adapter->netdev->hw_features |= NETIF_F_TSO6;
4609
4610 if (adapter->state == VNIC_PROBING) {
4611 adapter->netdev->features |= adapter->netdev->hw_features;
4612 } else if (old_hw_features != adapter->netdev->hw_features) {
4613 netdev_features_t tmp = 0;
4614
4615 /* disable features no longer supported */
4616 adapter->netdev->features &= adapter->netdev->hw_features;
4617 /* turn on features now supported if previously enabled */
4618 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4619 adapter->netdev->hw_features;
4620 adapter->netdev->features |=
4621 tmp & adapter->netdev->wanted_features;
4622 }
4623
4624 memset(&crq, 0, sizeof(crq));
4625 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4626 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4627 crq.control_ip_offload.len =
4628 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4629 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4630 ibmvnic_send_crq(adapter, &crq);
4631}
4632
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004633static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4634 struct ibmvnic_adapter *adapter)
4635{
4636 struct device *dev = &adapter->vdev->dev;
4637
4638 if (crq->get_vpd_size_rsp.rc.code) {
4639 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4640 crq->get_vpd_size_rsp.rc.code);
4641 complete(&adapter->fw_done);
4642 return;
4643 }
4644
4645 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4646 complete(&adapter->fw_done);
4647}
4648
4649static void handle_vpd_rsp(union ibmvnic_crq *crq,
4650 struct ibmvnic_adapter *adapter)
4651{
4652 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004653 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004654 u8 fw_level_len = 0;
4655
4656 memset(adapter->fw_version, 0, 32);
4657
4658 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4659 DMA_FROM_DEVICE);
4660
4661 if (crq->get_vpd_rsp.rc.code) {
4662 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4663 crq->get_vpd_rsp.rc.code);
4664 goto complete;
4665 }
4666
4667 /* get the position of the firmware version info
4668 * located after the ASCII 'RM' substring in the buffer
4669 */
4670 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4671 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02004672 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004673 goto complete;
4674 }
4675
4676 /* get length of firmware level ASCII substring */
4677 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4678 fw_level_len = *(substr + 2);
4679 } else {
4680 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4681 goto complete;
4682 }
4683
4684 /* copy firmware version string from vpd into adapter */
4685 if ((substr + 3 + fw_level_len) <
4686 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004687 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004688 } else {
4689 dev_info(dev, "FW substr extrapolated VPD buff\n");
4690 }
4691
4692complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004693 if (adapter->fw_version[0] == '\0')
Lijun Pan0b217d32021-06-11 13:33:53 -05004694 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004695 complete(&adapter->fw_done);
4696}
4697
Thomas Falcon032c5e82015-12-21 11:26:06 -06004698static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4699{
4700 struct device *dev = &adapter->vdev->dev;
4701 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004702 int i;
4703
4704 dma_unmap_single(dev, adapter->ip_offload_tok,
4705 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4706
4707 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4708 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4709 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004710 ((unsigned long *)(buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004711
4712 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4713 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4714 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4715 buf->tcp_ipv4_chksum);
4716 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4717 buf->tcp_ipv6_chksum);
4718 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4719 buf->udp_ipv4_chksum);
4720 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4721 buf->udp_ipv6_chksum);
4722 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4723 buf->large_tx_ipv4);
4724 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4725 buf->large_tx_ipv6);
4726 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4727 buf->large_rx_ipv4);
4728 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4729 buf->large_rx_ipv6);
4730 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4731 buf->max_ipv4_header_size);
4732 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4733 buf->max_ipv6_header_size);
4734 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4735 buf->max_tcp_header_size);
4736 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4737 buf->max_udp_header_size);
4738 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4739 buf->max_large_tx_size);
4740 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4741 buf->max_large_rx_size);
4742 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4743 buf->ipv6_extension_header);
4744 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4745 buf->tcp_pseudosum_req);
4746 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4747 buf->num_ipv6_ext_headers);
4748 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4749 buf->off_ipv6_ext_headers);
4750
Lijun Pan46899bd2020-09-27 20:13:30 -05004751 send_control_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004752}
4753
Thomas Falconc9008d32018-08-06 21:39:59 -05004754static const char *ibmvnic_fw_err_cause(u16 cause)
4755{
4756 switch (cause) {
4757 case ADAPTER_PROBLEM:
4758 return "adapter problem";
4759 case BUS_PROBLEM:
4760 return "bus problem";
4761 case FW_PROBLEM:
4762 return "firmware problem";
4763 case DD_PROBLEM:
4764 return "device driver problem";
4765 case EEH_RECOVERY:
4766 return "EEH recovery";
4767 case FW_UPDATED:
4768 return "firmware updated";
4769 case LOW_MEMORY:
4770 return "low Memory";
4771 default:
4772 return "unknown";
4773 }
4774}
4775
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004776static void handle_error_indication(union ibmvnic_crq *crq,
4777 struct ibmvnic_adapter *adapter)
4778{
4779 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004780 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004781
Thomas Falconc9008d32018-08-06 21:39:59 -05004782 cause = be16_to_cpu(crq->error_indication.error_cause);
4783
4784 dev_warn_ratelimited(dev,
4785 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4786 crq->error_indication.flags
4787 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4788 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004789
Nathan Fontenoted651a12017-05-03 14:04:38 -04004790 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4791 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004792 else
4793 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004794}
4795
Thomas Falconf8136142018-01-29 13:45:05 -06004796static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4797 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004798{
4799 struct net_device *netdev = adapter->netdev;
4800 struct device *dev = &adapter->vdev->dev;
4801 long rc;
4802
4803 rc = crq->change_mac_addr_rsp.rc.code;
4804 if (rc) {
4805 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004806 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004807 }
Lijun Pand9b0e592020-10-20 17:39:19 -05004808 /* crq->change_mac_addr.mac_addr is the requested one
4809 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4810 */
Jakub Kicinskif3956eb2021-10-01 14:32:23 -07004811 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
Lijun Pand9b0e592020-10-20 17:39:19 -05004812 ether_addr_copy(adapter->mac_addr,
4813 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004814out:
4815 complete(&adapter->fw_done);
4816 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004817}
4818
4819static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4820 struct ibmvnic_adapter *adapter)
4821{
4822 struct device *dev = &adapter->vdev->dev;
4823 u64 *req_value;
4824 char *name;
4825
Thomas Falcon901e0402017-02-15 12:17:59 -06004826 atomic_dec(&adapter->running_cap_crqs);
Sukadev Bhattiprolu151b6a5c2022-01-21 18:59:19 -08004827 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
4828 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004829 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4830 case REQ_TX_QUEUES:
4831 req_value = &adapter->req_tx_queues;
4832 name = "tx";
4833 break;
4834 case REQ_RX_QUEUES:
4835 req_value = &adapter->req_rx_queues;
4836 name = "rx";
4837 break;
4838 case REQ_RX_ADD_QUEUES:
4839 req_value = &adapter->req_rx_add_queues;
4840 name = "rx_add";
4841 break;
4842 case REQ_TX_ENTRIES_PER_SUBCRQ:
4843 req_value = &adapter->req_tx_entries_per_subcrq;
4844 name = "tx_entries_per_subcrq";
4845 break;
4846 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4847 req_value = &adapter->req_rx_add_entries_per_subcrq;
4848 name = "rx_add_entries_per_subcrq";
4849 break;
4850 case REQ_MTU:
4851 req_value = &adapter->req_mtu;
4852 name = "mtu";
4853 break;
4854 case PROMISC_REQUESTED:
4855 req_value = &adapter->promisc;
4856 name = "promisc";
4857 break;
4858 default:
4859 dev_err(dev, "Got invalid cap request rsp %d\n",
4860 crq->request_capability.capability);
4861 return;
4862 }
4863
4864 switch (crq->request_capability_rsp.rc.code) {
4865 case SUCCESS:
4866 break;
4867 case PARTIALSUCCESS:
4868 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4869 *req_value,
Lijun Pan914789a2021-02-11 00:43:21 -06004870 (long)be64_to_cpu(crq->request_capability_rsp.number),
4871 name);
John Allene7913802018-01-18 16:27:12 -06004872
4873 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4874 REQ_MTU) {
4875 pr_err("mtu of %llu is not supported. Reverting.\n",
4876 *req_value);
4877 *req_value = adapter->fallback.mtu;
4878 } else {
4879 *req_value =
4880 be64_to_cpu(crq->request_capability_rsp.number);
4881 }
4882
Lijun Pan09081b92020-09-27 20:13:27 -05004883 send_request_cap(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004884 return;
4885 default:
4886 dev_err(dev, "Error %d in request cap rsp\n",
4887 crq->request_capability_rsp.rc.code);
4888 return;
4889 }
4890
4891 /* Done receiving requested capabilities, query IP offload support */
Sukadev Bhattiprolu3a5d9db2022-01-21 18:59:21 -08004892 if (atomic_read(&adapter->running_cap_crqs) == 0)
Lijun Pan16e811f2020-09-27 20:13:29 -05004893 send_query_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004894}
4895
4896static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4897 struct ibmvnic_adapter *adapter)
4898{
4899 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004900 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004901 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4902 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004903 u64 *tx_handle_array;
4904 u64 *rx_handle_array;
4905 int num_tx_pools;
4906 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004907 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004908 int i;
4909
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06004910 /* CHECK: Test/set of login_pending does not need to be atomic
4911 * because only ibmvnic_tasklet tests/clears this.
4912 */
4913 if (!adapter->login_pending) {
4914 netdev_warn(netdev, "Ignoring unexpected login response\n");
4915 return 0;
4916 }
4917 adapter->login_pending = false;
4918
Thomas Falcon032c5e82015-12-21 11:26:06 -06004919 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004920 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004921 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004922 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004923
John Allen498cd8e2016-04-06 11:49:55 -05004924 /* If the number of queues requested can't be allocated by the
4925 * server, the login response will return with code 1. We will need
4926 * to resend the login buffer with fewer queues requested.
4927 */
4928 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004929 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004930 complete(&adapter->init_done);
4931 return 0;
4932 }
4933
Sukadev Bhattiprolud437f5a2021-09-07 22:07:03 -07004934 if (adapter->failover_pending) {
4935 adapter->init_done_rc = -EAGAIN;
4936 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
4937 complete(&adapter->init_done);
4938 /* login response buffer will be released on reset */
4939 return 0;
4940 }
4941
John Allenc26eba02017-10-26 16:23:25 -05004942 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4943
Thomas Falcon032c5e82015-12-21 11:26:06 -06004944 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4945 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4946 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004947 ((unsigned long *)(adapter->login_rsp_buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004948 }
4949
4950 /* Sanity checks */
4951 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4952 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4953 adapter->req_rx_add_queues !=
4954 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4955 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
Dany Madden31d6b402020-11-25 18:04:24 -06004956 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004957 return -EIO;
4958 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004959 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4960 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4961 /* variable buffer sizes are not supported, so just read the
4962 * first entry.
4963 */
4964 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004965
4966 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4967 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4968
4969 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4970 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4971 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4972 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4973
4974 for (i = 0; i < num_tx_pools; i++)
4975 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4976
4977 for (i = 0; i < num_rx_pools; i++)
4978 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4979
Thomas Falcon507ebe62020-08-21 13:39:01 -05004980 adapter->num_active_tx_scrqs = num_tx_pools;
4981 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004982 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004983 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004984 complete(&adapter->init_done);
4985
Thomas Falcon032c5e82015-12-21 11:26:06 -06004986 return 0;
4987}
4988
Thomas Falcon032c5e82015-12-21 11:26:06 -06004989static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4990 struct ibmvnic_adapter *adapter)
4991{
4992 struct device *dev = &adapter->vdev->dev;
4993 long rc;
4994
4995 rc = crq->request_unmap_rsp.rc.code;
4996 if (rc)
4997 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4998}
4999
5000static void handle_query_map_rsp(union ibmvnic_crq *crq,
5001 struct ibmvnic_adapter *adapter)
5002{
5003 struct net_device *netdev = adapter->netdev;
5004 struct device *dev = &adapter->vdev->dev;
5005 long rc;
5006
5007 rc = crq->query_map_rsp.rc.code;
5008 if (rc) {
5009 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5010 return;
5011 }
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -07005012 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5013 crq->query_map_rsp.page_size,
5014 __be32_to_cpu(crq->query_map_rsp.tot_pages),
5015 __be32_to_cpu(crq->query_map_rsp.free_pages));
Thomas Falcon032c5e82015-12-21 11:26:06 -06005016}
5017
5018static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5019 struct ibmvnic_adapter *adapter)
5020{
5021 struct net_device *netdev = adapter->netdev;
5022 struct device *dev = &adapter->vdev->dev;
5023 long rc;
5024
Thomas Falcon901e0402017-02-15 12:17:59 -06005025 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005026 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06005027 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06005028 rc = crq->query_capability.rc.code;
5029 if (rc) {
5030 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5031 goto out;
5032 }
5033
5034 switch (be16_to_cpu(crq->query_capability.capability)) {
5035 case MIN_TX_QUEUES:
5036 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005037 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005038 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5039 adapter->min_tx_queues);
5040 break;
5041 case MIN_RX_QUEUES:
5042 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005043 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005044 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5045 adapter->min_rx_queues);
5046 break;
5047 case MIN_RX_ADD_QUEUES:
5048 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005049 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005050 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5051 adapter->min_rx_add_queues);
5052 break;
5053 case MAX_TX_QUEUES:
5054 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005055 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005056 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5057 adapter->max_tx_queues);
5058 break;
5059 case MAX_RX_QUEUES:
5060 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005061 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005062 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5063 adapter->max_rx_queues);
5064 break;
5065 case MAX_RX_ADD_QUEUES:
5066 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005067 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005068 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5069 adapter->max_rx_add_queues);
5070 break;
5071 case MIN_TX_ENTRIES_PER_SUBCRQ:
5072 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005073 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005074 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5075 adapter->min_tx_entries_per_subcrq);
5076 break;
5077 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5078 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005079 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005080 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5081 adapter->min_rx_add_entries_per_subcrq);
5082 break;
5083 case MAX_TX_ENTRIES_PER_SUBCRQ:
5084 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005085 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005086 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5087 adapter->max_tx_entries_per_subcrq);
5088 break;
5089 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5090 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005091 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005092 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5093 adapter->max_rx_add_entries_per_subcrq);
5094 break;
5095 case TCP_IP_OFFLOAD:
5096 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06005097 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005098 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5099 adapter->tcp_ip_offload);
5100 break;
5101 case PROMISC_SUPPORTED:
5102 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06005103 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005104 netdev_dbg(netdev, "promisc_supported = %lld\n",
5105 adapter->promisc_supported);
5106 break;
5107 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06005108 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06005109 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005110 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5111 break;
5112 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06005113 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06005114 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005115 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5116 break;
5117 case MAX_MULTICAST_FILTERS:
5118 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06005119 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005120 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5121 adapter->max_multicast_filters);
5122 break;
5123 case VLAN_HEADER_INSERTION:
5124 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06005125 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005126 if (adapter->vlan_header_insertion)
5127 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5128 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5129 adapter->vlan_header_insertion);
5130 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04005131 case RX_VLAN_HEADER_INSERTION:
5132 adapter->rx_vlan_header_insertion =
5133 be64_to_cpu(crq->query_capability.number);
5134 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5135 adapter->rx_vlan_header_insertion);
5136 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005137 case MAX_TX_SG_ENTRIES:
5138 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06005139 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005140 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5141 adapter->max_tx_sg_entries);
5142 break;
5143 case RX_SG_SUPPORTED:
5144 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06005145 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005146 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5147 adapter->rx_sg_supported);
5148 break;
5149 case OPT_TX_COMP_SUB_QUEUES:
5150 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005151 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005152 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5153 adapter->opt_tx_comp_sub_queues);
5154 break;
5155 case OPT_RX_COMP_QUEUES:
5156 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005157 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005158 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5159 adapter->opt_rx_comp_queues);
5160 break;
5161 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5162 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06005163 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005164 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5165 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5166 break;
5167 case OPT_TX_ENTRIES_PER_SUBCRQ:
5168 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005169 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005170 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5171 adapter->opt_tx_entries_per_subcrq);
5172 break;
5173 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5174 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005175 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005176 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5177 adapter->opt_rxba_entries_per_subcrq);
5178 break;
5179 case TX_RX_DESC_REQ:
5180 adapter->tx_rx_desc_req = crq->query_capability.number;
5181 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5182 adapter->tx_rx_desc_req);
5183 break;
5184
5185 default:
5186 netdev_err(netdev, "Got invalid cap rsp %d\n",
5187 crq->query_capability.capability);
5188 }
5189
5190out:
Sukadev Bhattiprolu3a5d9db2022-01-21 18:59:21 -08005191 if (atomic_read(&adapter->running_cap_crqs) == 0)
Lijun Pan09081b92020-09-27 20:13:27 -05005192 send_request_cap(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005193}
5194
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005195static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5196{
5197 union ibmvnic_crq crq;
5198 int rc;
5199
5200 memset(&crq, 0, sizeof(crq));
5201 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5202 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005203
5204 mutex_lock(&adapter->fw_lock);
5205 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06005206 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005207
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005208 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005209 if (rc) {
5210 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005211 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005212 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06005213
5214 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005215 if (rc) {
5216 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06005217 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005218 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06005219
Thomas Falconff25dcb2019-11-25 17:12:56 -06005220 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005221 return adapter->fw_done_rc ? -EIO : 0;
5222}
5223
5224static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5225 struct ibmvnic_adapter *adapter)
5226{
5227 struct net_device *netdev = adapter->netdev;
5228 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005229 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005230
5231 rc = crq->query_phys_parms_rsp.rc.code;
5232 if (rc) {
5233 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5234 return rc;
5235 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005236 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005237 case IBMVNIC_10MBPS:
5238 adapter->speed = SPEED_10;
5239 break;
5240 case IBMVNIC_100MBPS:
5241 adapter->speed = SPEED_100;
5242 break;
5243 case IBMVNIC_1GBPS:
5244 adapter->speed = SPEED_1000;
5245 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05005246 case IBMVNIC_10GBPS:
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005247 adapter->speed = SPEED_10000;
5248 break;
5249 case IBMVNIC_25GBPS:
5250 adapter->speed = SPEED_25000;
5251 break;
5252 case IBMVNIC_40GBPS:
5253 adapter->speed = SPEED_40000;
5254 break;
5255 case IBMVNIC_50GBPS:
5256 adapter->speed = SPEED_50000;
5257 break;
5258 case IBMVNIC_100GBPS:
5259 adapter->speed = SPEED_100000;
5260 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05005261 case IBMVNIC_200GBPS:
5262 adapter->speed = SPEED_200000;
5263 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005264 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005265 if (netif_carrier_ok(netdev))
5266 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005267 adapter->speed = SPEED_UNKNOWN;
5268 }
5269 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5270 adapter->duplex = DUPLEX_FULL;
5271 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5272 adapter->duplex = DUPLEX_HALF;
5273 else
5274 adapter->duplex = DUPLEX_UNKNOWN;
5275
5276 return rc;
5277}
5278
Thomas Falcon032c5e82015-12-21 11:26:06 -06005279static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5280 struct ibmvnic_adapter *adapter)
5281{
5282 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5283 struct net_device *netdev = adapter->netdev;
5284 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04005285 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005286 long rc;
5287
5288 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06005289 (unsigned long)cpu_to_be64(u64_crq[0]),
5290 (unsigned long)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06005291 switch (gen_crq->first) {
5292 case IBMVNIC_CRQ_INIT_RSP:
5293 switch (gen_crq->cmd) {
5294 case IBMVNIC_CRQ_INIT:
5295 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04005296 adapter->from_passive_init = true;
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06005297 /* Discard any stale login responses from prev reset.
5298 * CHECK: should we clear even on INIT_COMPLETE?
5299 */
5300 adapter->login_pending = false;
5301
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005302 if (adapter->state == VNIC_DOWN)
5303 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5304 else
5305 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5306
Sukadev Bhattiproluef66a1e2021-02-02 21:08:02 -08005307 if (rc && rc != -EBUSY) {
5308 /* We were unable to schedule the failover
5309 * reset either because the adapter was still
5310 * probing (eg: during kexec) or we could not
5311 * allocate memory. Clear the failover_pending
5312 * flag since no one else will. We ignore
5313 * EBUSY because it means either FAILOVER reset
5314 * is already scheduled or the adapter is
5315 * being removed.
5316 */
5317 netdev_err(netdev,
5318 "Error %ld scheduling failover reset\n",
5319 rc);
5320 adapter->failover_pending = false;
5321 }
Sukadev Bhattiprolu6b278c02021-10-29 15:03:16 -07005322
5323 if (!completion_done(&adapter->init_done)) {
5324 complete(&adapter->init_done);
5325 if (!adapter->init_done_rc)
5326 adapter->init_done_rc = -EAGAIN;
5327 }
5328
Thomas Falcon032c5e82015-12-21 11:26:06 -06005329 break;
5330 case IBMVNIC_CRQ_INIT_COMPLETE:
5331 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05005332 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005333 send_version_xchg(adapter);
5334 break;
5335 default:
5336 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5337 }
5338 return;
5339 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04005340 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05005341 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06005342 /* terminate any thread waiting for a response
5343 * from the device
5344 */
5345 if (!completion_done(&adapter->fw_done)) {
5346 adapter->fw_done_rc = -EIO;
5347 complete(&adapter->fw_done);
5348 }
5349 if (!completion_done(&adapter->stats_done))
5350 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005351 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05005352 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005353 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04005354 dev_info(dev, "Migrated, re-enabling adapter\n");
5355 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05005356 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5357 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05005358 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005359 } else {
5360 /* The adapter lost the connection */
5361 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5362 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005363 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005364 }
5365 return;
5366 case IBMVNIC_CRQ_CMD_RSP:
5367 break;
5368 default:
5369 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5370 gen_crq->first);
5371 return;
5372 }
5373
5374 switch (gen_crq->cmd) {
5375 case VERSION_EXCHANGE_RSP:
5376 rc = crq->version_exchange_rsp.rc.code;
5377 if (rc) {
5378 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5379 break;
5380 }
Thomas Falcon78468892020-05-28 11:19:17 -05005381 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06005382 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05005383 dev_info(dev, "Partner protocol version is %d\n",
5384 ibmvnic_version);
Lijun Pan491099a2020-09-27 20:13:26 -05005385 send_query_cap(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005386 break;
5387 case QUERY_CAPABILITY_RSP:
5388 handle_query_cap_rsp(crq, adapter);
5389 break;
5390 case QUERY_MAP_RSP:
5391 handle_query_map_rsp(crq, adapter);
5392 break;
5393 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05005394 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5395 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005396 break;
5397 case REQUEST_UNMAP_RSP:
5398 handle_request_unmap_rsp(crq, adapter);
5399 break;
5400 case REQUEST_CAPABILITY_RSP:
5401 handle_request_cap_rsp(crq, adapter);
5402 break;
5403 case LOGIN_RSP:
5404 netdev_dbg(netdev, "Got Login Response\n");
5405 handle_login_rsp(crq, adapter);
5406 break;
5407 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04005408 netdev_dbg(netdev,
5409 "Got Logical Link State Response, state: %d rc: %d\n",
5410 crq->logical_link_state_rsp.link_state,
5411 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005412 adapter->logical_link_state =
5413 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04005414 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5415 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005416 break;
5417 case LINK_STATE_INDICATION:
5418 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5419 adapter->phys_link_state =
5420 crq->link_state_indication.phys_link_state;
5421 adapter->logical_link_state =
5422 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05005423 if (adapter->phys_link_state && adapter->logical_link_state)
5424 netif_carrier_on(netdev);
5425 else
5426 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005427 break;
5428 case CHANGE_MAC_ADDR_RSP:
5429 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06005430 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005431 break;
5432 case ERROR_INDICATION:
5433 netdev_dbg(netdev, "Got Error Indication\n");
5434 handle_error_indication(crq, adapter);
5435 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005436 case REQUEST_STATISTICS_RSP:
5437 netdev_dbg(netdev, "Got Statistics Response\n");
5438 complete(&adapter->stats_done);
5439 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005440 case QUERY_IP_OFFLOAD_RSP:
5441 netdev_dbg(netdev, "Got Query IP offload Response\n");
5442 handle_query_ip_offload_rsp(adapter);
5443 break;
5444 case MULTICAST_CTRL_RSP:
5445 netdev_dbg(netdev, "Got multicast control Response\n");
5446 break;
5447 case CONTROL_IP_OFFLOAD_RSP:
5448 netdev_dbg(netdev, "Got Control IP offload Response\n");
5449 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5450 sizeof(adapter->ip_offload_ctrl),
5451 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05005452 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005453 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005454 case COLLECT_FW_TRACE_RSP:
5455 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5456 complete(&adapter->fw_done);
5457 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02005458 case GET_VPD_SIZE_RSP:
5459 handle_vpd_size_rsp(crq, adapter);
5460 break;
5461 case GET_VPD_RSP:
5462 handle_vpd_rsp(crq, adapter);
5463 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005464 case QUERY_PHYS_PARMS_RSP:
5465 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5466 complete(&adapter->fw_done);
5467 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005468 default:
5469 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5470 gen_crq->cmd);
5471 }
5472}
5473
5474static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5475{
5476 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06005477
Thomas Falcon6c267b32017-02-15 12:17:58 -06005478 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005479 return IRQ_HANDLED;
5480}
5481
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305482static void ibmvnic_tasklet(struct tasklet_struct *t)
Thomas Falcon6c267b32017-02-15 12:17:58 -06005483{
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305484 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005485 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005486 union ibmvnic_crq *crq;
5487 unsigned long flags;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005488
5489 spin_lock_irqsave(&queue->lock, flags);
Sukadev Bhattiprolu3a5d9db2022-01-21 18:59:21 -08005490
5491 /* Pull all the valid messages off the CRQ */
5492 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5493 /* This barrier makes sure ibmvnic_next_crq()'s
5494 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5495 * before ibmvnic_handle_crq()'s
5496 * switch(gen_crq->first) and switch(gen_crq->cmd).
5497 */
5498 dma_rmb();
5499 ibmvnic_handle_crq(crq, adapter);
5500 crq->generic.first = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005501 }
Sukadev Bhattiprolu3a5d9db2022-01-21 18:59:21 -08005502
Thomas Falcon032c5e82015-12-21 11:26:06 -06005503 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005504}
5505
5506static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5507{
5508 struct vio_dev *vdev = adapter->vdev;
5509 int rc;
5510
5511 do {
5512 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5513 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5514
5515 if (rc)
5516 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5517
5518 return rc;
5519}
5520
5521static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5522{
5523 struct ibmvnic_crq_queue *crq = &adapter->crq;
5524 struct device *dev = &adapter->vdev->dev;
5525 struct vio_dev *vdev = adapter->vdev;
5526 int rc;
5527
5528 /* Close the CRQ */
5529 do {
5530 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5531 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5532
5533 /* Clean out the queue */
Lijun Pan0e435be2020-11-23 13:35:46 -06005534 if (!crq->msgs)
5535 return -EINVAL;
5536
Thomas Falcon032c5e82015-12-21 11:26:06 -06005537 memset(crq->msgs, 0, PAGE_SIZE);
5538 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05005539 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005540
5541 /* And re-open it again */
5542 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5543 crq->msg_token, PAGE_SIZE);
5544
5545 if (rc == H_CLOSED)
5546 /* Adapter is good, but other end is not ready */
5547 dev_warn(dev, "Partner adapter not ready\n");
5548 else if (rc != 0)
5549 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5550
5551 return rc;
5552}
5553
Nathan Fontenotf9928872017-03-30 02:48:54 -04005554static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005555{
5556 struct ibmvnic_crq_queue *crq = &adapter->crq;
5557 struct vio_dev *vdev = adapter->vdev;
5558 long rc;
5559
Nathan Fontenotf9928872017-03-30 02:48:54 -04005560 if (!crq->msgs)
5561 return;
5562
Thomas Falcon032c5e82015-12-21 11:26:06 -06005563 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5564 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005565 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005566 do {
5567 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5568 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5569
5570 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5571 DMA_BIDIRECTIONAL);
5572 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005573 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05005574 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005575}
5576
Nathan Fontenotf9928872017-03-30 02:48:54 -04005577static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005578{
5579 struct ibmvnic_crq_queue *crq = &adapter->crq;
5580 struct device *dev = &adapter->vdev->dev;
5581 struct vio_dev *vdev = adapter->vdev;
5582 int rc, retrc = -ENOMEM;
5583
Nathan Fontenotf9928872017-03-30 02:48:54 -04005584 if (crq->msgs)
5585 return 0;
5586
Thomas Falcon032c5e82015-12-21 11:26:06 -06005587 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5588 /* Should we allocate more than one page? */
5589
5590 if (!crq->msgs)
5591 return -ENOMEM;
5592
5593 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5594 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5595 DMA_BIDIRECTIONAL);
5596 if (dma_mapping_error(dev, crq->msg_token))
5597 goto map_failed;
5598
5599 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5600 crq->msg_token, PAGE_SIZE);
5601
5602 if (rc == H_RESOURCE)
5603 /* maybe kexecing and resource is busy. try a reset */
5604 rc = ibmvnic_reset_crq(adapter);
5605 retrc = rc;
5606
5607 if (rc == H_CLOSED) {
5608 dev_warn(dev, "Partner adapter not ready\n");
5609 } else if (rc) {
5610 dev_warn(dev, "Error %d opening adapter\n", rc);
5611 goto reg_crq_failed;
5612 }
5613
5614 retrc = 0;
5615
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305616 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005617
Thomas Falcon032c5e82015-12-21 11:26:06 -06005618 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03005619 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5620 adapter->vdev->unit_address);
5621 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005622 if (rc) {
5623 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5624 vdev->irq, rc);
5625 goto req_irq_failed;
5626 }
5627
5628 rc = vio_enable_interrupts(vdev);
5629 if (rc) {
5630 dev_err(dev, "Error %d enabling interrupts\n", rc);
5631 goto req_irq_failed;
5632 }
5633
5634 crq->cur = 0;
5635 spin_lock_init(&crq->lock);
5636
Sukadev Bhattiprolu6e20d002021-10-29 15:03:15 -07005637 /* process any CRQs that were queued before we enabled interrupts */
5638 tasklet_schedule(&adapter->tasklet);
5639
Thomas Falcon032c5e82015-12-21 11:26:06 -06005640 return retrc;
5641
5642req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06005643 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005644 do {
5645 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5646 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5647reg_crq_failed:
5648 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5649map_failed:
5650 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005651 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005652 return retrc;
5653}
5654
Lijun Pan635e4422020-08-19 17:52:26 -05005655static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005656{
5657 struct device *dev = &adapter->vdev->dev;
Dany Madden98c41f02020-11-25 18:04:32 -06005658 unsigned long timeout = msecs_to_jiffies(20000);
Michal Suchanek6881b072021-03-02 20:47:47 +01005659 u64 old_num_rx_queues = adapter->req_rx_queues;
5660 u64 old_num_tx_queues = adapter->req_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005661 int rc;
5662
John Allen017892c12017-05-26 10:30:19 -04005663 adapter->from_passive_init = false;
5664
Michal Suchanek6881b072021-03-02 20:47:47 +01005665 if (reset)
Lijun Pan635e4422020-08-19 17:52:26 -05005666 reinit_completion(&adapter->init_done);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005667
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005668 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005669 rc = ibmvnic_send_crq_init(adapter);
5670 if (rc) {
5671 dev_err(dev, "Send crq init failed with error %d\n", rc);
5672 return rc;
5673 }
5674
John Allenf6ef6402017-03-17 17:13:42 -05005675 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5676 dev_err(dev, "Initialization sequence timed out\n");
Dany Maddenb6ee5662021-12-14 00:17:47 -05005677 return -ETIMEDOUT;
John Allen017892c12017-05-26 10:30:19 -04005678 }
5679
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005680 if (adapter->init_done_rc) {
5681 release_crq_queue(adapter);
5682 return adapter->init_done_rc;
5683 }
5684
Lijun Pan785a2b12020-09-17 21:12:46 -05005685 if (adapter->from_passive_init) {
5686 adapter->state = VNIC_OPEN;
5687 adapter->from_passive_init = false;
Dany Maddenb6ee5662021-12-14 00:17:47 -05005688 return -EINVAL;
Lijun Pan785a2b12020-09-17 21:12:46 -05005689 }
5690
Lijun Pan635e4422020-08-19 17:52:26 -05005691 if (reset &&
5692 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005693 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005694 if (adapter->req_rx_queues != old_num_rx_queues ||
5695 adapter->req_tx_queues != old_num_tx_queues) {
5696 release_sub_crqs(adapter, 0);
5697 rc = init_sub_crqs(adapter);
5698 } else {
5699 rc = reset_sub_crq_queues(adapter);
5700 }
5701 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005702 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005703 }
5704
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005705 if (rc) {
5706 dev_err(dev, "Initialization of sub crqs failed\n");
5707 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005708 return rc;
5709 }
5710
5711 rc = init_sub_crq_irqs(adapter);
5712 if (rc) {
5713 dev_err(dev, "Failed to initialize sub crq irqs\n");
5714 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005715 }
5716
5717 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005718}
5719
Thomas Falcon40c9db82017-06-12 12:35:04 -05005720static struct device_attribute dev_attr_failover;
5721
Thomas Falcon032c5e82015-12-21 11:26:06 -06005722static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5723{
5724 struct ibmvnic_adapter *adapter;
5725 struct net_device *netdev;
5726 unsigned char *mac_addr_p;
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005727 bool init_success;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005728 int rc;
5729
5730 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5731 dev->unit_address);
5732
5733 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5734 VETH_MAC_ADDR, NULL);
5735 if (!mac_addr_p) {
5736 dev_err(&dev->dev,
5737 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5738 __FILE__, __LINE__);
5739 return 0;
5740 }
5741
5742 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005743 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005744 if (!netdev)
5745 return -ENOMEM;
5746
5747 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005748 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005749 dev_set_drvdata(&dev->dev, netdev);
5750 adapter->vdev = dev;
5751 adapter->netdev = netdev;
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06005752 adapter->login_pending = false;
Sukadev Bhattiprolu129854f02021-09-14 20:52:56 -07005753 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
5754 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
5755 bitmap_set(adapter->map_ids, 0, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005756
5757 ether_addr_copy(adapter->mac_addr, mac_addr_p);
Jakub Kicinskif3956eb2021-10-01 14:32:23 -07005758 eth_hw_addr_set(netdev, adapter->mac_addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005759 netdev->irq = dev->irq;
5760 netdev->netdev_ops = &ibmvnic_netdev_ops;
5761 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5762 SET_NETDEV_DEV(netdev, &dev->dev);
5763
Nathan Fontenoted651a12017-05-03 14:04:38 -04005764 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005765 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5766 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005767 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005768 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005769 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005770 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005771 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005772 init_completion(&adapter->fw_done);
5773 init_completion(&adapter->reset_done);
5774 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005775 clear_bit(0, &adapter->resetting);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07005776 adapter->prev_rx_buf_sz = 0;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07005777 adapter->prev_mtu = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04005778
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005779 init_success = false;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005780 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005781 rc = init_crq_queue(adapter);
5782 if (rc) {
5783 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5784 rc);
5785 goto ibmvnic_init_fail;
5786 }
5787
Lijun Pan635e4422020-08-19 17:52:26 -05005788 rc = ibmvnic_reset_init(adapter, false);
Sukadev Bhattiprolu6b278c02021-10-29 15:03:16 -07005789 } while (rc == -EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005790
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005791 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5792 * partner is not ready. CRQ is not active. When the partner becomes
5793 * ready, we will do the passive init reset.
5794 */
5795
5796 if (!rc)
5797 init_success = true;
5798
Thomas Falcon07184212018-05-16 15:49:05 -05005799 rc = init_stats_buffers(adapter);
5800 if (rc)
5801 goto ibmvnic_init_fail;
5802
5803 rc = init_stats_token(adapter);
5804 if (rc)
5805 goto ibmvnic_stats_fail;
5806
Thomas Falcon40c9db82017-06-12 12:35:04 -05005807 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005808 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005809 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005810
Mick Tarsele876a8a2017-09-28 13:53:18 -07005811 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005812 rc = register_netdev(netdev);
5813 if (rc) {
5814 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005815 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005816 }
5817 dev_info(&dev->dev, "ibmvnic registered\n");
5818
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005819 if (init_success) {
5820 adapter->state = VNIC_PROBED;
5821 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5822 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5823 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5824 } else {
5825 adapter->state = VNIC_DOWN;
5826 }
John Allenc26eba02017-10-26 16:23:25 -05005827
5828 adapter->wait_for_reset = false;
Dany Maddena86d5c62020-11-25 18:04:31 -06005829 adapter->last_reset_time = jiffies;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005830 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005831
5832ibmvnic_register_fail:
5833 device_remove_file(&dev->dev, &dev_attr_failover);
5834
Thomas Falcon07184212018-05-16 15:49:05 -05005835ibmvnic_dev_file_err:
5836 release_stats_token(adapter);
5837
5838ibmvnic_stats_fail:
5839 release_stats_buffers(adapter);
5840
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005841ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005842 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005843 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005844 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005845 free_netdev(netdev);
5846
5847 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005848}
5849
Uwe Kleine-König386a9662021-02-25 23:18:34 +01005850static void ibmvnic_remove(struct vio_dev *dev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005851{
5852 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005853 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005854 unsigned long flags;
5855
5856 spin_lock_irqsave(&adapter->state_lock, flags);
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08005857
5858 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5859 * finish. Then, set the state to REMOVING to prevent it from
5860 * scheduling any more work and to have reset functions ignore
5861 * any resets that have already been scheduled. Drop the lock
5862 * after setting state, so __ibmvnic_reset() which is called
5863 * from the flush_work() below, can make progress.
5864 */
Junlin Yang69cdb792021-03-05 16:48:39 +08005865 spin_lock(&adapter->rwi_lock);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005866 adapter->state = VNIC_REMOVING;
Junlin Yang69cdb792021-03-05 16:48:39 +08005867 spin_unlock(&adapter->rwi_lock);
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08005868
Juliet Kim7d7195a2020-03-10 09:23:58 -05005869 spin_unlock_irqrestore(&adapter->state_lock, flags);
5870
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005871 flush_work(&adapter->ibmvnic_reset);
5872 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5873
Juliet Kima5681e22018-11-19 15:59:22 -06005874 rtnl_lock();
5875 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005876
5877 release_resources(adapter);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07005878 release_rx_pools(adapter);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07005879 release_tx_pools(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005880 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005881 release_crq_queue(adapter);
5882
Thomas Falcon53cc7722018-02-26 18:10:56 -06005883 release_stats_token(adapter);
5884 release_stats_buffers(adapter);
5885
Nathan Fontenot90c80142017-05-03 14:04:32 -04005886 adapter->state = VNIC_REMOVED;
5887
Juliet Kima5681e22018-11-19 15:59:22 -06005888 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005889 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005890 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005891 free_netdev(netdev);
5892 dev_set_drvdata(&dev->dev, NULL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005893}
5894
Thomas Falcon40c9db82017-06-12 12:35:04 -05005895static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5896 const char *buf, size_t count)
5897{
5898 struct net_device *netdev = dev_get_drvdata(dev);
5899 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5900 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5901 __be64 session_token;
5902 long rc;
5903
5904 if (!sysfs_streq(buf, "1"))
5905 return -EINVAL;
5906
5907 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5908 H_GET_SESSION_TOKEN, 0, 0, 0);
5909 if (rc) {
5910 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5911 rc);
Lijun Pan334c4242021-04-13 03:31:44 -05005912 goto last_resort;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005913 }
5914
5915 session_token = (__be64)retbuf[0];
5916 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5917 be64_to_cpu(session_token));
5918 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5919 H_SESSION_ERR_DETECTED, session_token, 0, 0);
Lijun Pan334c4242021-04-13 03:31:44 -05005920 if (rc)
5921 netdev_err(netdev,
5922 "H_VIOCTL initiated failover failed, rc %ld\n",
Thomas Falcon40c9db82017-06-12 12:35:04 -05005923 rc);
Lijun Pan334c4242021-04-13 03:31:44 -05005924
5925last_resort:
5926 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5927 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005928
5929 return count;
5930}
Joe Perches6cbaefb2017-12-19 10:15:09 -08005931static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005932
Thomas Falcon032c5e82015-12-21 11:26:06 -06005933static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5934{
5935 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5936 struct ibmvnic_adapter *adapter;
5937 struct iommu_table *tbl;
5938 unsigned long ret = 0;
5939 int i;
5940
5941 tbl = get_iommu_table_base(&vdev->dev);
5942
5943 /* netdev inits at probe time along with the structures we need below*/
5944 if (!netdev)
5945 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5946
5947 adapter = netdev_priv(netdev);
5948
5949 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005950 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5951
5952 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5953 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5954
Thomas Falcon507ebe62020-08-21 13:39:01 -05005955 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005956 ret += adapter->rx_pool[i].size *
5957 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5958
5959 return ret;
5960}
5961
5962static int ibmvnic_resume(struct device *dev)
5963{
5964 struct net_device *netdev = dev_get_drvdata(dev);
5965 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005966
John Allencb89ba22017-06-19 11:27:53 -05005967 if (adapter->state != VNIC_OPEN)
5968 return 0;
5969
John Allena2488782017-07-24 13:26:06 -05005970 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005971
5972 return 0;
5973}
5974
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305975static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005976 {"network", "IBM,vnic"},
5977 {"", "" }
5978};
5979MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5980
5981static const struct dev_pm_ops ibmvnic_pm_ops = {
5982 .resume = ibmvnic_resume
5983};
5984
5985static struct vio_driver ibmvnic_driver = {
5986 .id_table = ibmvnic_device_table,
5987 .probe = ibmvnic_probe,
5988 .remove = ibmvnic_remove,
5989 .get_desired_dma = ibmvnic_get_desired_dma,
5990 .name = ibmvnic_driver_name,
5991 .pm = &ibmvnic_pm_ops,
5992};
5993
5994/* module functions */
5995static int __init ibmvnic_module_init(void)
5996{
5997 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5998 IBMVNIC_DRIVER_VERSION);
5999
6000 return vio_register_driver(&ibmvnic_driver);
6001}
6002
6003static void __exit ibmvnic_module_exit(void)
6004{
6005 vio_unregister_driver(&ibmvnic_driver);
6006}
6007
6008module_init(ibmvnic_module_init);
6009module_exit(ibmvnic_module_exit);