blob: 3cca51735421a7435f4c7f32fa3f5af9003f2d37 [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060081static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060082static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
Thomas Falconad7775d2016-04-01 17:20:34 -050086static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060087static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96static int ibmvnic_poll(struct napi_struct *napi, int data);
Lijun Pan69980d02020-09-27 20:13:28 -050097static void send_query_map(struct ibmvnic_adapter *adapter);
Lijun Pan673ead22021-06-14 00:20:45 -050098static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -050099static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600100static int send_login(struct ibmvnic_adapter *adapter);
Lijun Pan491099a2020-09-27 20:13:26 -0500101static void send_query_cap(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500102static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500103static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500104static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400105static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500106static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500107static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300108static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -0700109static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700111static void free_long_term_buff(struct ibmvnic_adapter *adapter,
112 struct ibmvnic_long_term_buff *ltb);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
Lijun Pan91dc5d22021-02-11 00:43:22 -0600121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
Thomas Falcon032c5e82015-12-21 11:26:06 -0600122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
Cristobal Forno53f8b1b2021-06-10 11:08:35 -0600148static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
149{
150 union ibmvnic_crq crq;
151
152 memset(&crq, 0, sizeof(crq));
153 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
154 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
155
156 return ibmvnic_send_crq(adapter, &crq);
157}
158
159static int send_version_xchg(struct ibmvnic_adapter *adapter)
160{
161 union ibmvnic_crq crq;
162
163 memset(&crq, 0, sizeof(crq));
164 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
165 crq.version_exchange.cmd = VERSION_EXCHANGE;
166 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
167
168 return ibmvnic_send_crq(adapter, &crq);
169}
170
Thomas Falcon032c5e82015-12-21 11:26:06 -0600171static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
172 unsigned long length, unsigned long *number,
173 unsigned long *irq)
174{
175 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
176 long rc;
177
178 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
179 *number = retbuf[0];
180 *irq = retbuf[1];
181
182 return rc;
183}
184
Thomas Falcon476d96c2019-11-25 17:12:55 -0600185/**
186 * ibmvnic_wait_for_completion - Check device state and wait for completion
187 * @adapter: private device data
188 * @comp_done: completion structure to wait for
189 * @timeout: time to wait in milliseconds
190 *
191 * Wait for a completion signal or until the timeout limit is reached
192 * while checking that the device is still active.
193 */
194static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
195 struct completion *comp_done,
196 unsigned long timeout)
197{
198 struct net_device *netdev;
199 unsigned long div_timeout;
200 u8 retry;
201
202 netdev = adapter->netdev;
203 retry = 5;
204 div_timeout = msecs_to_jiffies(timeout / retry);
205 while (true) {
206 if (!adapter->crq.active) {
207 netdev_err(netdev, "Device down!\n");
208 return -ENODEV;
209 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600210 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600211 break;
212 if (wait_for_completion_timeout(comp_done, div_timeout))
213 return 0;
214 }
215 netdev_err(netdev, "Operation timed out.\n");
216 return -ETIMEDOUT;
217}
218
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700219/**
220 * reuse_ltb() - Check if a long term buffer can be reused
221 * @ltb: The long term buffer to be checked
222 * @size: The size of the long term buffer.
223 *
224 * An LTB can be reused unless its size has changed.
225 *
226 * Return: Return true if the LTB can be reused, false otherwise.
227 */
228static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
229{
230 return (ltb->buff && ltb->size == size);
231}
232
233/**
234 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
235 *
236 * @adapter: ibmvnic adapter associated to the LTB
237 * @ltb: container object for the LTB
238 * @size: size of the LTB
239 *
240 * Allocate an LTB of the specified size and notify VIOS.
241 *
242 * If the given @ltb already has the correct size, reuse it. Otherwise if
243 * its non-NULL, free it. Then allocate a new one of the correct size.
244 * Notify the VIOS either way since we may now be working with a new VIOS.
245 *
246 * Allocating larger chunks of memory during resets, specially LPM or under
247 * low memory situations can cause resets to fail/timeout and for LPAR to
248 * lose connectivity. So hold onto the LTB even if we fail to communicate
249 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
250 *
251 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
252 * a negative value otherwise.
253 */
Thomas Falcon032c5e82015-12-21 11:26:06 -0600254static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
255 struct ibmvnic_long_term_buff *ltb, int size)
256{
257 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500258 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600259
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700260 if (!reuse_ltb(ltb, size)) {
261 dev_dbg(dev,
262 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
263 ltb->size, size);
264 free_long_term_buff(adapter, ltb);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600265 }
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700266
267 if (ltb->buff) {
268 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
269 ltb->map_id, ltb->size);
270 } else {
271 ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
272 GFP_KERNEL);
273 if (!ltb->buff) {
274 dev_err(dev, "Couldn't alloc long term buffer\n");
275 return -ENOMEM;
276 }
277 ltb->size = size;
278
279 ltb->map_id = find_first_zero_bit(adapter->map_ids,
280 MAX_MAP_ID);
281 bitmap_set(adapter->map_ids, ltb->map_id, 1);
282
283 dev_dbg(dev,
284 "Allocated new LTB [map %d, size 0x%llx]\n",
285 ltb->map_id, ltb->size);
286 }
287
288 /* Ensure ltb is zeroed - specially when reusing it. */
289 memset(ltb->buff, 0, ltb->size);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500290
Thomas Falconff25dcb2019-11-25 17:12:56 -0600291 mutex_lock(&adapter->fw_lock);
292 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600293 reinit_completion(&adapter->fw_done);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700294
295 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500296 if (rc) {
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700297 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
298 goto out;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500299 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600300
301 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
302 if (rc) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700303 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
Thomas Falcon476d96c2019-11-25 17:12:55 -0600304 rc);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700305 goto out;
Thomas Falcon476d96c2019-11-25 17:12:55 -0600306 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500307
308 if (adapter->fw_done_rc) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700309 dev_err(dev, "Couldn't map LTB, rc = %d\n",
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500310 adapter->fw_done_rc);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700311 rc = -1;
312 goto out;
313 }
314 rc = 0;
315out:
Sukadev Bhattiproluf8ac0bf2021-09-14 20:52:57 -0700316 /* don't free LTB on communication error - see function header */
Thomas Falconff25dcb2019-11-25 17:12:56 -0600317 mutex_unlock(&adapter->fw_lock);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700318 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600319}
320
321static void free_long_term_buff(struct ibmvnic_adapter *adapter,
322 struct ibmvnic_long_term_buff *ltb)
323{
324 struct device *dev = &adapter->vdev->dev;
325
Nathan Fontenotc657e322017-03-30 02:49:06 -0400326 if (!ltb->buff)
327 return;
328
Lijun Pan7d3a7b92021-02-12 20:49:00 -0600329 /* VIOS automatically unmaps the long term buffer at remote
330 * end for the following resets:
331 * FAILOVER, MOBILITY, TIMEOUT.
332 */
Nathan Fontenoted651a12017-05-03 14:04:38 -0400333 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
Lijun Pan7d3a7b92021-02-12 20:49:00 -0600334 adapter->reset_reason != VNIC_RESET_MOBILITY &&
335 adapter->reset_reason != VNIC_RESET_TIMEOUT)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500336 send_request_unmap(adapter, ltb->map_id);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700337
Brian King59af56c2017-04-19 13:44:41 -0400338 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700339
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700340 ltb->buff = NULL;
Sukadev Bhattiprolu129854f02021-09-14 20:52:56 -0700341 /* mark this map_id free */
342 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700343 ltb->map_id = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600344}
345
Thomas Falconf185a492017-05-26 10:30:48 -0400346static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
347{
348 int i;
349
Thomas Falcon507ebe62020-08-21 13:39:01 -0500350 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400351 adapter->rx_pool[i].active = 0;
352}
353
Thomas Falcon032c5e82015-12-21 11:26:06 -0600354static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
355 struct ibmvnic_rx_pool *pool)
356{
357 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500358 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600359 struct device *dev = &adapter->vdev->dev;
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600360 struct ibmvnic_ind_xmit_queue *ind_bufp;
361 struct ibmvnic_sub_crq_queue *rx_scrq;
362 union sub_crq *sub_crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600363 int buffers_added = 0;
364 unsigned long lpar_rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600365 struct sk_buff *skb;
366 unsigned int offset;
367 dma_addr_t dma_addr;
368 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600369 int shift = 0;
370 int index;
371 int i;
372
Thomas Falconf185a492017-05-26 10:30:48 -0400373 if (!pool->active)
374 return;
375
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600376 rx_scrq = adapter->rx_scrq[pool->index];
377 ind_bufp = &rx_scrq->ind_buf;
Sukadev Bhattiprolu72368f82021-06-23 21:13:13 -0700378
379 /* netdev_skb_alloc() could have failed after we saved a few skbs
380 * in the indir_buf and we would not have sent them to VIOS yet.
381 * To account for them, start the loop at ind_bufp->index rather
382 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
383 * be 0.
384 */
385 for (i = ind_bufp->index; i < count; ++i) {
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700386 index = pool->free_map[pool->next_free];
387
388 /* We maybe reusing the skb from earlier resets. Allocate
389 * only if necessary. But since the LTB may have changed
390 * during reset (see init_rx_pools()), update LTB below
391 * even if reusing skb.
392 */
393 skb = pool->rx_buff[index].skb;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600394 if (!skb) {
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700395 skb = netdev_alloc_skb(adapter->netdev,
396 pool->buff_size);
397 if (!skb) {
398 dev_err(dev, "Couldn't replenish rx buff\n");
399 adapter->replenish_no_mem++;
400 break;
401 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600402 }
403
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700404 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
405 pool->next_free = (pool->next_free + 1) % pool->size;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600406
Thomas Falcon032c5e82015-12-21 11:26:06 -0600407 /* Copy the skb to the long term mapped DMA buffer */
408 offset = index * pool->buff_size;
409 dst = pool->long_term_buff.buff + offset;
410 memset(dst, 0, pool->buff_size);
411 dma_addr = pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600412
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700413 /* add the skb to an rx_buff in the pool */
414 pool->rx_buff[index].data = dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600415 pool->rx_buff[index].dma = dma_addr;
416 pool->rx_buff[index].skb = skb;
417 pool->rx_buff[index].pool_index = pool->index;
418 pool->rx_buff[index].size = pool->buff_size;
419
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700420 /* queue the rx_buff for the next send_subcrq_indirect */
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600421 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
422 memset(sub_crq, 0, sizeof(*sub_crq));
423 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
424 sub_crq->rx_add.correlator =
Thomas Falcon032c5e82015-12-21 11:26:06 -0600425 cpu_to_be64((u64)&pool->rx_buff[index]);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600426 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
427 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600428
429 /* The length field of the sCRQ is defined to be 24 bits so the
430 * buffer size needs to be left shifted by a byte before it is
431 * converted to big endian to prevent the last byte from being
432 * truncated.
433 */
434#ifdef __LITTLE_ENDIAN__
435 shift = 8;
436#endif
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600437 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700438
439 /* if send_subcrq_indirect queue is full, flush to VIOS */
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600440 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
441 i == count - 1) {
442 lpar_rc =
443 send_subcrq_indirect(adapter, handle,
444 (u64)ind_bufp->indir_dma,
445 (u64)ind_bufp->index);
446 if (lpar_rc != H_SUCCESS)
447 goto failure;
448 buffers_added += ind_bufp->index;
449 adapter->replenish_add_buff_success += ind_bufp->index;
450 ind_bufp->index = 0;
451 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600452 }
453 atomic_add(buffers_added, &pool->available);
454 return;
455
456failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500457 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
458 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600459 for (i = ind_bufp->index - 1; i >= 0; --i) {
460 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600461
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600462 pool->next_free = pool->next_free == 0 ?
463 pool->size - 1 : pool->next_free - 1;
464 sub_crq = &ind_bufp->indir_arr[i];
465 rx_buff = (struct ibmvnic_rx_buff *)
466 be64_to_cpu(sub_crq->rx_add.correlator);
467 index = (int)(rx_buff - pool->rx_buff);
468 pool->free_map[pool->next_free] = index;
469 dev_kfree_skb_any(pool->rx_buff[index].skb);
470 pool->rx_buff[index].skb = NULL;
471 }
Dwip N. Banerjeec2af6222020-12-09 20:53:31 -0500472 adapter->replenish_add_buff_failure += ind_bufp->index;
473 atomic_add(buffers_added, &pool->available);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600474 ind_bufp->index = 0;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500475 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400476 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500477 * queue is closed or pending failover.
478 * Firmware guarantees that a signal will be sent to the
479 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400480 */
481 deactivate_rx_pools(adapter);
482 netif_carrier_off(adapter->netdev);
483 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600484}
485
486static void replenish_pools(struct ibmvnic_adapter *adapter)
487{
488 int i;
489
Thomas Falcon032c5e82015-12-21 11:26:06 -0600490 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500491 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600492 if (adapter->rx_pool[i].active)
493 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
494 }
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -0800495
496 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600497}
498
John Allen3d52b592017-08-02 16:44:14 -0500499static void release_stats_buffers(struct ibmvnic_adapter *adapter)
500{
501 kfree(adapter->tx_stats_buffers);
502 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600503 adapter->tx_stats_buffers = NULL;
504 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500505}
506
507static int init_stats_buffers(struct ibmvnic_adapter *adapter)
508{
509 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600510 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500511 sizeof(struct ibmvnic_tx_queue_stats),
512 GFP_KERNEL);
513 if (!adapter->tx_stats_buffers)
514 return -ENOMEM;
515
516 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600517 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500518 sizeof(struct ibmvnic_rx_queue_stats),
519 GFP_KERNEL);
520 if (!adapter->rx_stats_buffers)
521 return -ENOMEM;
522
523 return 0;
524}
525
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400526static void release_stats_token(struct ibmvnic_adapter *adapter)
527{
528 struct device *dev = &adapter->vdev->dev;
529
530 if (!adapter->stats_token)
531 return;
532
533 dma_unmap_single(dev, adapter->stats_token,
534 sizeof(struct ibmvnic_statistics),
535 DMA_FROM_DEVICE);
536 adapter->stats_token = 0;
537}
538
539static int init_stats_token(struct ibmvnic_adapter *adapter)
540{
541 struct device *dev = &adapter->vdev->dev;
542 dma_addr_t stok;
543
544 stok = dma_map_single(dev, &adapter->stats,
545 sizeof(struct ibmvnic_statistics),
546 DMA_FROM_DEVICE);
547 if (dma_mapping_error(dev, stok)) {
548 dev_err(dev, "Couldn't map stats buffer\n");
549 return -1;
550 }
551
552 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500553 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400554 return 0;
555}
556
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700557/**
558 * release_rx_pools() - Release any rx pools attached to @adapter.
559 * @adapter: ibmvnic adapter
560 *
561 * Safe to call this multiple times - even if no pools are attached.
562 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400563static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600564{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400565 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400566 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600567
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400568 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600569 return;
570
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600571 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400572 rx_pool = &adapter->rx_pool[i];
573
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500574 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
575
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400576 kfree(rx_pool->free_map);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700577
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400578 free_long_term_buff(adapter, &rx_pool->long_term_buff);
579
580 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400581 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400582
583 for (j = 0; j < rx_pool->size; j++) {
584 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600585 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
586 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400587 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600588 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400589
590 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600591 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400592
593 kfree(adapter->rx_pool);
594 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600595 adapter->num_active_rx_pools = 0;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700596 adapter->prev_rx_pool_size = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400597}
598
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700599/**
600 * reuse_rx_pools() - Check if the existing rx pools can be reused.
601 * @adapter: ibmvnic adapter
602 *
603 * Check if the existing rx pools in the adapter can be reused. The
604 * pools can be reused if the pool parameters (number of pools,
605 * number of buffers in the pool and size of each buffer) have not
606 * changed.
607 *
608 * NOTE: This assumes that all pools have the same number of buffers
609 * which is the case currently. If that changes, we must fix this.
610 *
611 * Return: true if the rx pools can be reused, false otherwise.
612 */
613static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
614{
615 u64 old_num_pools, new_num_pools;
616 u64 old_pool_size, new_pool_size;
617 u64 old_buff_size, new_buff_size;
618
619 if (!adapter->rx_pool)
620 return false;
621
622 old_num_pools = adapter->num_active_rx_pools;
623 new_num_pools = adapter->req_rx_queues;
624
625 old_pool_size = adapter->prev_rx_pool_size;
626 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
627
628 old_buff_size = adapter->prev_rx_buf_sz;
629 new_buff_size = adapter->cur_rx_buf_sz;
630
631 /* Require buff size to be exactly same for now */
632 if (old_buff_size != new_buff_size)
633 return false;
634
635 if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
636 return true;
637
638 if (old_num_pools < adapter->min_rx_queues ||
639 old_num_pools > adapter->max_rx_queues ||
640 old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
641 old_pool_size > adapter->max_rx_add_entries_per_subcrq)
642 return false;
643
644 return true;
645}
646
647/**
648 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
649 * @netdev: net device associated with the vnic interface
650 *
651 * Initialize the set of receiver pools in the ibmvnic adapter associated
652 * with the net_device @netdev. If possible, reuse the existing rx pools.
653 * Otherwise free any existing pools and allocate a new set of pools
654 * before initializing them.
655 *
656 * Return: 0 on success and negative value on error.
657 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400658static int init_rx_pools(struct net_device *netdev)
659{
660 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
661 struct device *dev = &adapter->vdev->dev;
662 struct ibmvnic_rx_pool *rx_pool;
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700663 u64 num_pools;
664 u64 pool_size; /* # of buffers in one pool */
Thomas Falcon507ebe62020-08-21 13:39:01 -0500665 u64 buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400666 int i, j;
667
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700668 pool_size = adapter->req_rx_add_entries_per_subcrq;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700669 num_pools = adapter->req_rx_queues;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500670 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400671
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700672 if (reuse_rx_pools(adapter)) {
673 dev_dbg(dev, "Reusing rx pools\n");
674 goto update_ltb;
675 }
676
677 /* Allocate/populate the pools. */
678 release_rx_pools(adapter);
679
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700680 adapter->rx_pool = kcalloc(num_pools,
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400681 sizeof(struct ibmvnic_rx_pool),
682 GFP_KERNEL);
683 if (!adapter->rx_pool) {
684 dev_err(dev, "Failed to allocate rx pools\n");
685 return -1;
686 }
687
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700688 /* Set num_active_rx_pools early. If we fail below after partial
689 * allocation, release_rx_pools() will know how many to look for.
690 */
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700691 adapter->num_active_rx_pools = num_pools;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600692
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700693 for (i = 0; i < num_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400694 rx_pool = &adapter->rx_pool[i];
695
696 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500697 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700698 i, pool_size, buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400699
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700700 rx_pool->size = pool_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400701 rx_pool->index = i;
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600702 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400703
704 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
705 GFP_KERNEL);
706 if (!rx_pool->free_map) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700707 dev_err(dev, "Couldn't alloc free_map %d\n", i);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700708 goto out_release;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400709 }
710
711 rx_pool->rx_buff = kcalloc(rx_pool->size,
712 sizeof(struct ibmvnic_rx_buff),
713 GFP_KERNEL);
714 if (!rx_pool->rx_buff) {
715 dev_err(dev, "Couldn't alloc rx buffers\n");
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700716 goto out_release;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400717 }
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700718 }
719
720 adapter->prev_rx_pool_size = pool_size;
721 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
722
723update_ltb:
724 for (i = 0; i < num_pools; i++) {
725 rx_pool = &adapter->rx_pool[i];
726 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
727 i, rx_pool->size, rx_pool->buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400728
729 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700730 rx_pool->size * rx_pool->buff_size))
731 goto out;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400732
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700733 for (j = 0; j < rx_pool->size; ++j) {
734 struct ibmvnic_rx_buff *rx_buff;
735
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400736 rx_pool->free_map[j] = j;
737
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700738 /* NOTE: Don't clear rx_buff->skb here - will leak
739 * memory! replenish_rx_pool() will reuse skbs or
740 * allocate as necessary.
741 */
742 rx_buff = &rx_pool->rx_buff[j];
743 rx_buff->dma = 0;
744 rx_buff->data = 0;
745 rx_buff->size = 0;
746 rx_buff->pool_index = 0;
747 }
748
749 /* Mark pool "empty" so replenish_rx_pools() will
750 * update the LTB info for each buffer
751 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400752 atomic_set(&rx_pool->available, 0);
753 rx_pool->next_alloc = 0;
754 rx_pool->next_free = 0;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700755 /* replenish_rx_pool() may have called deactivate_rx_pools()
756 * on failover. Ensure pool is active now.
757 */
758 rx_pool->active = 1;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400759 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400760 return 0;
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -0700761out_release:
762 release_rx_pools(adapter);
763out:
764 /* We failed to allocate one or more LTBs or map them on the VIOS.
765 * Hold onto the pools and any LTBs that we did allocate/map.
766 */
767 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600768}
769
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200770static void release_vpd_data(struct ibmvnic_adapter *adapter)
771{
772 if (!adapter->vpd)
773 return;
774
775 kfree(adapter->vpd->buff);
776 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600777
778 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200779}
780
Thomas Falconfb794212018-03-16 20:00:26 -0500781static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
782 struct ibmvnic_tx_pool *tx_pool)
783{
784 kfree(tx_pool->tx_buff);
785 kfree(tx_pool->free_map);
786 free_long_term_buff(adapter, &tx_pool->long_term_buff);
787}
788
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700789/**
790 * release_tx_pools() - Release any tx pools attached to @adapter.
791 * @adapter: ibmvnic adapter
792 *
793 * Safe to call this multiple times - even if no pools are attached.
794 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400795static void release_tx_pools(struct ibmvnic_adapter *adapter)
796{
John Allen896d8692018-01-18 16:26:31 -0600797 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400798
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700799 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
800 * both NULL or both non-NULL. So we only need to check one.
801 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400802 if (!adapter->tx_pool)
803 return;
804
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600805 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500806 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
807 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400808 }
809
810 kfree(adapter->tx_pool);
811 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500812 kfree(adapter->tso_pool);
813 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600814 adapter->num_active_tx_pools = 0;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700815 adapter->prev_tx_pool_size = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400816}
817
Thomas Falcon32053062018-03-16 20:00:27 -0500818static int init_one_tx_pool(struct net_device *netdev,
819 struct ibmvnic_tx_pool *tx_pool,
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700820 int pool_size, int buf_size)
Thomas Falcon32053062018-03-16 20:00:27 -0500821{
Thomas Falcon32053062018-03-16 20:00:27 -0500822 int i;
823
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700824 tx_pool->tx_buff = kcalloc(pool_size,
Thomas Falcon32053062018-03-16 20:00:27 -0500825 sizeof(struct ibmvnic_tx_buff),
826 GFP_KERNEL);
827 if (!tx_pool->tx_buff)
828 return -1;
829
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700830 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700831 if (!tx_pool->free_map) {
832 kfree(tx_pool->tx_buff);
833 tx_pool->tx_buff = NULL;
Thomas Falcon32053062018-03-16 20:00:27 -0500834 return -1;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700835 }
Thomas Falcon32053062018-03-16 20:00:27 -0500836
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700837 for (i = 0; i < pool_size; i++)
Thomas Falcon32053062018-03-16 20:00:27 -0500838 tx_pool->free_map[i] = i;
839
840 tx_pool->consumer_index = 0;
841 tx_pool->producer_index = 0;
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700842 tx_pool->num_buffers = pool_size;
Thomas Falcon32053062018-03-16 20:00:27 -0500843 tx_pool->buf_size = buf_size;
844
845 return 0;
846}
847
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700848/**
849 * reuse_tx_pools() - Check if the existing tx pools can be reused.
850 * @adapter: ibmvnic adapter
851 *
852 * Check if the existing tx pools in the adapter can be reused. The
853 * pools can be reused if the pool parameters (number of pools,
854 * number of buffers in the pool and mtu) have not changed.
855 *
856 * NOTE: This assumes that all pools have the same number of buffers
857 * which is the case currently. If that changes, we must fix this.
858 *
859 * Return: true if the tx pools can be reused, false otherwise.
860 */
861static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
862{
863 u64 old_num_pools, new_num_pools;
864 u64 old_pool_size, new_pool_size;
865 u64 old_mtu, new_mtu;
866
867 if (!adapter->tx_pool)
868 return false;
869
870 old_num_pools = adapter->num_active_tx_pools;
871 new_num_pools = adapter->num_active_tx_scrqs;
872 old_pool_size = adapter->prev_tx_pool_size;
873 new_pool_size = adapter->req_tx_entries_per_subcrq;
874 old_mtu = adapter->prev_mtu;
875 new_mtu = adapter->req_mtu;
876
877 /* Require MTU to be exactly same to reuse pools for now */
878 if (old_mtu != new_mtu)
879 return false;
880
881 if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
882 return true;
883
884 if (old_num_pools < adapter->min_tx_queues ||
885 old_num_pools > adapter->max_tx_queues ||
886 old_pool_size < adapter->min_tx_entries_per_subcrq ||
887 old_pool_size > adapter->max_tx_entries_per_subcrq)
888 return false;
889
890 return true;
891}
892
893/**
894 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
895 * @netdev: net device associated with the vnic interface
896 *
897 * Initialize the set of transmit pools in the ibmvnic adapter associated
898 * with the net_device @netdev. If possible, reuse the existing tx pools.
899 * Otherwise free any existing pools and allocate a new set of pools
900 * before initializing them.
901 *
902 * Return: 0 on success and negative value on error.
903 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400904static int init_tx_pools(struct net_device *netdev)
905{
906 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700907 struct device *dev = &adapter->vdev->dev;
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700908 int num_pools;
909 u64 pool_size; /* # of buffers in pool */
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600910 u64 buff_size;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700911 int i, j, rc;
912
913 num_pools = adapter->req_tx_queues;
914
915 /* We must notify the VIOS about the LTB on all resets - but we only
916 * need to alloc/populate pools if either the number of buffers or
917 * size of each buffer in the pool has changed.
918 */
919 if (reuse_tx_pools(adapter)) {
920 netdev_dbg(netdev, "Reusing tx pools\n");
921 goto update_ltb;
922 }
923
924 /* Allocate/populate the pools. */
925 release_tx_pools(adapter);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400926
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700927 pool_size = adapter->req_tx_entries_per_subcrq;
928 num_pools = adapter->num_active_tx_scrqs;
929
930 adapter->tx_pool = kcalloc(num_pools,
Nathan Fontenotc657e322017-03-30 02:49:06 -0400931 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
932 if (!adapter->tx_pool)
933 return -1;
934
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700935 adapter->tso_pool = kcalloc(num_pools,
Thomas Falcon32053062018-03-16 20:00:27 -0500936 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700937 /* To simplify release_tx_pools() ensure that ->tx_pool and
938 * ->tso_pool are either both NULL or both non-NULL.
939 */
Sukadev Bhattiproluf6ebca82021-06-23 21:13:15 -0700940 if (!adapter->tso_pool) {
941 kfree(adapter->tx_pool);
942 adapter->tx_pool = NULL;
Thomas Falcon32053062018-03-16 20:00:27 -0500943 return -1;
Sukadev Bhattiproluf6ebca82021-06-23 21:13:15 -0700944 }
Thomas Falcon32053062018-03-16 20:00:27 -0500945
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700946 /* Set num_active_tx_pools early. If we fail below after partial
947 * allocation, release_tx_pools() will know how many to look for.
948 */
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700949 adapter->num_active_tx_pools = num_pools;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700950
Sukadev Bhattiprolu0d1af4f2021-09-14 20:52:55 -0700951 buff_size = adapter->req_mtu + VLAN_HLEN;
952 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600953
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700954 for (i = 0; i < num_pools; i++) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700955 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
956 i, adapter->req_tx_entries_per_subcrq, buff_size);
957
Thomas Falcon32053062018-03-16 20:00:27 -0500958 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700959 pool_size, buff_size);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700960 if (rc)
961 goto out_release;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400962
Thomas Falcon7c940b12019-06-07 16:03:55 -0500963 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
964 IBMVNIC_TSO_BUFS,
965 IBMVNIC_TSO_BUF_SZ);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -0700966 if (rc)
967 goto out_release;
968 }
969
970 adapter->prev_tx_pool_size = pool_size;
971 adapter->prev_mtu = adapter->req_mtu;
972
973update_ltb:
974 /* NOTE: All tx_pools have the same number of buffers (which is
975 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
976 * buffers (see calls init_one_tx_pool() for these).
977 * For consistency, we use tx_pool->num_buffers and
978 * tso_pool->num_buffers below.
979 */
980 rc = -1;
981 for (i = 0; i < num_pools; i++) {
982 struct ibmvnic_tx_pool *tso_pool;
983 struct ibmvnic_tx_pool *tx_pool;
984 u32 ltb_size;
985
986 tx_pool = &adapter->tx_pool[i];
987 ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
988 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
989 ltb_size))
990 goto out;
991
992 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
993 i, tx_pool->long_term_buff.buff,
994 tx_pool->num_buffers, tx_pool->buf_size);
995
996 tx_pool->consumer_index = 0;
997 tx_pool->producer_index = 0;
998
999 for (j = 0; j < tx_pool->num_buffers; j++)
1000 tx_pool->free_map[j] = j;
1001
1002 tso_pool = &adapter->tso_pool[i];
1003 ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
1004 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
1005 ltb_size))
1006 goto out;
1007
1008 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
1009 i, tso_pool->long_term_buff.buff,
1010 tso_pool->num_buffers, tso_pool->buf_size);
1011
1012 tso_pool->consumer_index = 0;
1013 tso_pool->producer_index = 0;
1014
1015 for (j = 0; j < tso_pool->num_buffers; j++)
1016 tso_pool->free_map[j] = j;
Nathan Fontenotc657e322017-03-30 02:49:06 -04001017 }
1018
1019 return 0;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07001020out_release:
1021 release_tx_pools(adapter);
1022out:
1023 /* We failed to allocate one or more LTBs or map them on the VIOS.
1024 * Hold onto the pools and any LTBs that we did allocate/map.
1025 */
1026 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -04001027}
1028
John Allend944c3d62017-05-26 10:30:13 -04001029static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1030{
1031 int i;
1032
1033 if (adapter->napi_enabled)
1034 return;
1035
1036 for (i = 0; i < adapter->req_rx_queues; i++)
1037 napi_enable(&adapter->napi[i]);
1038
1039 adapter->napi_enabled = true;
1040}
1041
1042static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1043{
1044 int i;
1045
1046 if (!adapter->napi_enabled)
1047 return;
1048
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001049 for (i = 0; i < adapter->req_rx_queues; i++) {
1050 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -04001051 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001052 }
John Allend944c3d62017-05-26 10:30:13 -04001053
1054 adapter->napi_enabled = false;
1055}
1056
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001057static int init_napi(struct ibmvnic_adapter *adapter)
1058{
1059 int i;
1060
1061 adapter->napi = kcalloc(adapter->req_rx_queues,
1062 sizeof(struct napi_struct), GFP_KERNEL);
1063 if (!adapter->napi)
1064 return -ENOMEM;
1065
1066 for (i = 0; i < adapter->req_rx_queues; i++) {
1067 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1068 netif_napi_add(adapter->netdev, &adapter->napi[i],
1069 ibmvnic_poll, NAPI_POLL_WEIGHT);
1070 }
1071
Nathan Fontenot82e3be32018-02-21 21:33:56 -06001072 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001073 return 0;
1074}
1075
1076static void release_napi(struct ibmvnic_adapter *adapter)
1077{
1078 int i;
1079
1080 if (!adapter->napi)
1081 return;
1082
Nathan Fontenot82e3be32018-02-21 21:33:56 -06001083 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +08001084 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1085 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001086 }
1087
1088 kfree(adapter->napi);
1089 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06001090 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -05001091 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001092}
1093
Lijun Pan0666ef72021-04-12 02:41:28 -05001094static const char *adapter_state_to_string(enum vnic_state state)
1095{
1096 switch (state) {
1097 case VNIC_PROBING:
1098 return "PROBING";
1099 case VNIC_PROBED:
1100 return "PROBED";
1101 case VNIC_OPENING:
1102 return "OPENING";
1103 case VNIC_OPEN:
1104 return "OPEN";
1105 case VNIC_CLOSING:
1106 return "CLOSING";
1107 case VNIC_CLOSED:
1108 return "CLOSED";
1109 case VNIC_REMOVING:
1110 return "REMOVING";
1111 case VNIC_REMOVED:
1112 return "REMOVED";
Lijun Pan822ebc22021-06-11 10:35:37 -05001113 case VNIC_DOWN:
1114 return "DOWN";
Lijun Pan0666ef72021-04-12 02:41:28 -05001115 }
Michal Suchanek07b5dc12021-05-20 08:50:34 +02001116 return "UNKNOWN";
Lijun Pan0666ef72021-04-12 02:41:28 -05001117}
1118
John Allena57a5d22017-03-17 17:13:41 -05001119static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001120{
1121 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Dany Madden98c41f02020-11-25 18:04:32 -06001122 unsigned long timeout = msecs_to_jiffies(20000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001123 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -05001124 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -05001125 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -05001126 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001127
John Allenbd0b6722017-03-17 17:13:40 -05001128 do {
Thomas Falconeb110412018-05-24 14:37:53 -05001129 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -05001130 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001131 netdev_warn(netdev, "Login attempts exceeded\n");
1132 return -1;
1133 }
1134
1135 adapter->init_done_rc = 0;
1136 reinit_completion(&adapter->init_done);
1137 rc = send_login(adapter);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06001138 if (rc)
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001139 return rc;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001140
1141 if (!wait_for_completion_timeout(&adapter->init_done,
1142 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -05001143 netdev_warn(netdev, "Login timed out, retrying...\n");
1144 retry = true;
1145 adapter->init_done_rc = 0;
1146 retry_count++;
1147 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001148 }
1149
Thomas Falcondff515a32020-06-15 10:29:23 -05001150 if (adapter->init_done_rc == ABORTED) {
1151 netdev_warn(netdev, "Login aborted, retrying...\n");
1152 retry = true;
1153 adapter->init_done_rc = 0;
1154 retry_count++;
1155 /* FW or device may be busy, so
1156 * wait a bit before retrying login
1157 */
1158 msleep(500);
1159 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001160 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06001161 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -05001162
Thomas Falconeb110412018-05-24 14:37:53 -05001163 retry = true;
1164 netdev_dbg(netdev,
1165 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001166 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -05001167 reinit_completion(&adapter->init_done);
Lijun Pan491099a2020-09-27 20:13:26 -05001168 send_query_cap(adapter);
John Allenbd0b6722017-03-17 17:13:40 -05001169 if (!wait_for_completion_timeout(&adapter->init_done,
1170 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001171 netdev_warn(netdev,
1172 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -05001173 return -1;
1174 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001175
Thomas Falcon4d96f122017-08-01 15:04:36 -05001176 rc = init_sub_crqs(adapter);
1177 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001178 netdev_warn(netdev,
1179 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -05001180 return -1;
1181 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001182
Thomas Falcon4d96f122017-08-01 15:04:36 -05001183 rc = init_sub_crq_irqs(adapter);
1184 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001185 netdev_warn(netdev,
1186 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -05001187 return -1;
1188 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001189 } else if (adapter->init_done_rc) {
1190 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -05001191 return -1;
1192 }
Thomas Falconeb110412018-05-24 14:37:53 -05001193 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -05001194
Thomas Falcon62740e92019-05-09 23:13:43 -05001195 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -06001196
Lijun Pan0666ef72021-04-12 02:41:28 -05001197 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
John Allena57a5d22017-03-17 17:13:41 -05001198 return 0;
1199}
1200
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06001201static void release_login_buffer(struct ibmvnic_adapter *adapter)
1202{
1203 kfree(adapter->login_buf);
1204 adapter->login_buf = NULL;
1205}
1206
1207static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1208{
1209 kfree(adapter->login_rsp_buf);
1210 adapter->login_rsp_buf = NULL;
1211}
1212
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001213static void release_resources(struct ibmvnic_adapter *adapter)
1214{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001215 release_vpd_data(adapter);
1216
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001217 release_napi(adapter);
Lijun Pana0c8be52020-12-19 15:39:19 -06001218 release_login_buffer(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06001219 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001220}
1221
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001222static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1223{
1224 struct net_device *netdev = adapter->netdev;
Dany Madden98c41f02020-11-25 18:04:32 -06001225 unsigned long timeout = msecs_to_jiffies(20000);
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001226 union ibmvnic_crq crq;
1227 bool resend;
1228 int rc;
1229
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001230 netdev_dbg(netdev, "setting link state %d\n", link_state);
1231
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001232 memset(&crq, 0, sizeof(crq));
1233 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1234 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1235 crq.logical_link_state.link_state = link_state;
1236
1237 do {
1238 resend = false;
1239
1240 reinit_completion(&adapter->init_done);
1241 rc = ibmvnic_send_crq(adapter, &crq);
1242 if (rc) {
1243 netdev_err(netdev, "Failed to set link state\n");
1244 return rc;
1245 }
1246
1247 if (!wait_for_completion_timeout(&adapter->init_done,
1248 timeout)) {
1249 netdev_err(netdev, "timeout setting link state\n");
1250 return -1;
1251 }
1252
Lijun Pan4c5f6af2020-08-19 17:52:23 -05001253 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001254 /* Partuial success, delay and re-send */
1255 mdelay(1000);
1256 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -05001257 } else if (adapter->init_done_rc) {
1258 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1259 adapter->init_done_rc);
1260 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001261 }
1262 } while (resend);
1263
1264 return 0;
1265}
1266
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001267static int set_real_num_queues(struct net_device *netdev)
1268{
1269 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1270 int rc;
1271
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001272 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1273 adapter->req_tx_queues, adapter->req_rx_queues);
1274
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001275 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1276 if (rc) {
1277 netdev_err(netdev, "failed to set the number of tx queues\n");
1278 return rc;
1279 }
1280
1281 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1282 if (rc)
1283 netdev_err(netdev, "failed to set the number of rx queues\n");
1284
1285 return rc;
1286}
1287
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001288static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1289{
1290 struct device *dev = &adapter->vdev->dev;
1291 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001292 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001293 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001294
1295 if (adapter->vpd->buff)
1296 len = adapter->vpd->len;
1297
Thomas Falconff25dcb2019-11-25 17:12:56 -06001298 mutex_lock(&adapter->fw_lock);
1299 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001300 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001301
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001302 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1303 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001304 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001305 if (rc) {
1306 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001307 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001308 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001309
1310 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1311 if (rc) {
1312 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001313 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001314 return rc;
1315 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001316 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001317
1318 if (!adapter->vpd->len)
1319 return -ENODATA;
1320
1321 if (!adapter->vpd->buff)
1322 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1323 else if (adapter->vpd->len != len)
1324 adapter->vpd->buff =
1325 krealloc(adapter->vpd->buff,
1326 adapter->vpd->len, GFP_KERNEL);
1327
1328 if (!adapter->vpd->buff) {
1329 dev_err(dev, "Could allocate VPD buffer\n");
1330 return -ENOMEM;
1331 }
1332
1333 adapter->vpd->dma_addr =
1334 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1335 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001336 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001337 dev_err(dev, "Could not map VPD buffer\n");
1338 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001339 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001340 return -ENOMEM;
1341 }
1342
Thomas Falconff25dcb2019-11-25 17:12:56 -06001343 mutex_lock(&adapter->fw_lock);
1344 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001345 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001346
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001347 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1348 crq.get_vpd.cmd = GET_VPD;
1349 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1350 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001351 rc = ibmvnic_send_crq(adapter, &crq);
1352 if (rc) {
1353 kfree(adapter->vpd->buff);
1354 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001355 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001356 return rc;
1357 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001358
1359 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1360 if (rc) {
1361 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1362 kfree(adapter->vpd->buff);
1363 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001364 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001365 return rc;
1366 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001367
Thomas Falconff25dcb2019-11-25 17:12:56 -06001368 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001369 return 0;
1370}
1371
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001372static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001373{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001374 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001375 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001376
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001377 rc = set_real_num_queues(netdev);
1378 if (rc)
1379 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001380
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001381 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1382 if (!adapter->vpd)
1383 return -ENOMEM;
1384
John Allen69d08dc2018-01-18 16:27:58 -06001385 /* Vital Product Data (VPD) */
1386 rc = ibmvnic_get_vpd(adapter);
1387 if (rc) {
1388 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1389 return rc;
1390 }
1391
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001392 rc = init_napi(adapter);
1393 if (rc)
1394 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001395
Lijun Pan69980d02020-09-27 20:13:28 -05001396 send_query_map(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001397
1398 rc = init_rx_pools(netdev);
1399 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001400 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001401
Nathan Fontenotc657e322017-03-30 02:49:06 -04001402 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001403 return rc;
1404}
1405
Nathan Fontenoted651a12017-05-03 14:04:38 -04001406static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001407{
1408 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001409 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001410 int i, rc;
1411
Nathan Fontenot90c80142017-05-03 14:04:32 -04001412 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001413 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001414 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001415
Thomas Falcon032c5e82015-12-21 11:26:06 -06001416 /* We're ready to receive frames, enable the sub-crq interrupts and
1417 * set the logical link state to up
1418 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001419 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001420 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001421 if (prev_state == VNIC_CLOSED)
1422 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001423 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001424 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001425
Nathan Fontenoted651a12017-05-03 14:04:38 -04001426 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001427 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001428 if (prev_state == VNIC_CLOSED)
1429 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001430 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Thomas Falcon0d973382020-11-18 19:12:19 -06001431 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
Nathan Fontenoted651a12017-05-03 14:04:38 -04001432 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001433
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001434 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001435 if (rc) {
Lijun Pan0775ebc2021-04-14 02:46:14 -05001436 ibmvnic_napi_disable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001437 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001438 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001439 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001440
Nathan Fontenoted651a12017-05-03 14:04:38 -04001441 netif_tx_start_all_queues(netdev);
1442
Dany Madden2ca220f2021-06-23 21:13:11 -07001443 if (prev_state == VNIC_CLOSED) {
1444 for (i = 0; i < adapter->req_rx_queues; i++)
1445 napi_schedule(&adapter->napi[i]);
1446 }
1447
Nathan Fontenoted651a12017-05-03 14:04:38 -04001448 adapter->state = VNIC_OPEN;
1449 return rc;
1450}
1451
1452static int ibmvnic_open(struct net_device *netdev)
1453{
1454 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001455 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001456
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001457 ASSERT_RTNL();
1458
1459 /* If device failover is pending or we are about to reset, just set
1460 * device state and return. Device operation will be handled by reset
1461 * routine.
1462 *
1463 * It should be safe to overwrite the adapter->state here. Since
1464 * we hold the rtnl, either the reset has not actually started or
1465 * the rtnl got dropped during the set_link_state() in do_reset().
1466 * In the former case, no one else is changing the state (again we
1467 * have the rtnl) and in the latter case, do_reset() will detect and
1468 * honor our setting below.
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001469 */
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001470 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
Lijun Pan0666ef72021-04-12 02:41:28 -05001471 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1472 adapter_state_to_string(adapter->state),
1473 adapter->failover_pending);
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001474 adapter->state = VNIC_OPEN;
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001475 rc = 0;
1476 goto out;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001477 }
1478
Nathan Fontenoted651a12017-05-03 14:04:38 -04001479 if (adapter->state != VNIC_CLOSED) {
1480 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001481 if (rc)
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001482 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001483
1484 rc = init_resources(adapter);
1485 if (rc) {
1486 netdev_err(netdev, "failed to initialize resources\n");
1487 release_resources(adapter);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07001488 release_rx_pools(adapter);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07001489 release_tx_pools(adapter);
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001490 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001491 }
1492 }
1493
1494 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001495
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001496out:
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001497 /* If open failed and there is a pending failover or in-progress reset,
1498 * set device state and return. Device operation will be handled by
1499 * reset routine. See also comments above regarding rtnl.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001500 */
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001501 if (rc &&
1502 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001503 adapter->state = VNIC_OPEN;
1504 rc = 0;
1505 }
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001506 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001507}
1508
Thomas Falcond0869c02018-02-13 18:23:43 -06001509static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1510{
1511 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001512 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001513 u64 rx_entries;
1514 int rx_scrqs;
1515 int i, j;
1516
1517 if (!adapter->rx_pool)
1518 return;
1519
Thomas Falcon660e3092018-04-20 14:25:32 -05001520 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001521 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1522
1523 /* Free any remaining skbs in the rx buffer pools */
1524 for (i = 0; i < rx_scrqs; i++) {
1525 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001526 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001527 continue;
1528
1529 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1530 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001531 rx_buff = &rx_pool->rx_buff[j];
1532 if (rx_buff && rx_buff->skb) {
1533 dev_kfree_skb_any(rx_buff->skb);
1534 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001535 }
1536 }
1537 }
1538}
1539
Thomas Falcone9e1e972018-03-16 20:00:30 -05001540static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1541 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001542{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001543 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001544 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001545 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001546
Dan Carpenter050e85c2018-03-23 14:36:15 +03001547 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001548 return;
1549
1550 tx_entries = tx_pool->num_buffers;
1551
1552 for (i = 0; i < tx_entries; i++) {
1553 tx_buff = &tx_pool->tx_buff[i];
1554 if (tx_buff && tx_buff->skb) {
1555 dev_kfree_skb_any(tx_buff->skb);
1556 tx_buff->skb = NULL;
1557 }
1558 }
1559}
1560
1561static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1562{
1563 int tx_scrqs;
1564 int i;
1565
1566 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001567 return;
1568
Thomas Falcon660e3092018-04-20 14:25:32 -05001569 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001570
1571 /* Free any remaining skbs in the tx buffer pools */
1572 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001573 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001574 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1575 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001576 }
1577}
1578
John Allen6095e592018-03-30 13:44:21 -05001579static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001580{
John Allen6095e592018-03-30 13:44:21 -05001581 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001582 int i;
1583
Nathan Fontenot46293b92017-05-03 14:05:02 -04001584 if (adapter->tx_scrq) {
1585 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001586 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001587 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001588 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001589 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001590 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001591 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001592 }
1593
Nathan Fontenot46293b92017-05-03 14:05:02 -04001594 if (adapter->rx_scrq) {
1595 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001596 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001597 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001598 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001599 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001600 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001601 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001602 }
1603 }
John Allen6095e592018-03-30 13:44:21 -05001604}
1605
1606static void ibmvnic_cleanup(struct net_device *netdev)
1607{
1608 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1609
1610 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001611 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001612 netif_tx_disable(netdev);
1613 else
1614 netif_tx_stop_all_queues(netdev);
1615
1616 ibmvnic_napi_disable(adapter);
1617 ibmvnic_disable_irqs(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001618}
1619
1620static int __ibmvnic_close(struct net_device *netdev)
1621{
1622 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1623 int rc = 0;
1624
1625 adapter->state = VNIC_CLOSING;
1626 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
Nathan Fontenot90c80142017-05-03 14:04:32 -04001627 adapter->state = VNIC_CLOSED;
Sukadev Bhattiprolud4083d32021-02-10 17:41:43 -08001628 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001629}
1630
Nathan Fontenoted651a12017-05-03 14:04:38 -04001631static int ibmvnic_close(struct net_device *netdev)
1632{
1633 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1634 int rc;
1635
Lijun Pan0666ef72021-04-12 02:41:28 -05001636 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1637 adapter_state_to_string(adapter->state),
1638 adapter->failover_pending,
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08001639 adapter->force_reset_recovery);
1640
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001641 /* If device failover is pending, just set device state and return.
1642 * Device operation will be handled by reset routine.
1643 */
1644 if (adapter->failover_pending) {
1645 adapter->state = VNIC_CLOSED;
1646 return 0;
1647 }
1648
Nathan Fontenoted651a12017-05-03 14:04:38 -04001649 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001650 ibmvnic_cleanup(netdev);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07001651 clean_rx_pools(adapter);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07001652 clean_tx_pools(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001653
1654 return rc;
1655}
1656
Thomas Falconad7775d2016-04-01 17:20:34 -05001657/**
1658 * build_hdr_data - creates L2/L3/L4 header data buffer
Lee Jones80708602021-01-15 20:09:03 +00001659 * @hdr_field: bitfield determining needed headers
1660 * @skb: socket buffer
1661 * @hdr_len: array of header lengths
1662 * @hdr_data: buffer to write the header to
Thomas Falconad7775d2016-04-01 17:20:34 -05001663 *
1664 * Reads hdr_field to determine which headers are needed by firmware.
1665 * Builds a buffer containing these headers. Saves individual header
1666 * lengths and total buffer length to be used to build descriptors.
1667 */
1668static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1669 int *hdr_len, u8 *hdr_data)
1670{
1671 int len = 0;
1672 u8 *hdr;
1673
Thomas Falconda75e3b2018-03-12 11:51:02 -05001674 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1675 hdr_len[0] = sizeof(struct vlan_ethhdr);
1676 else
1677 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001678
1679 if (skb->protocol == htons(ETH_P_IP)) {
1680 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1681 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1682 hdr_len[2] = tcp_hdrlen(skb);
1683 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1684 hdr_len[2] = sizeof(struct udphdr);
1685 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1686 hdr_len[1] = sizeof(struct ipv6hdr);
1687 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1688 hdr_len[2] = tcp_hdrlen(skb);
1689 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1690 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001691 } else if (skb->protocol == htons(ETH_P_ARP)) {
1692 hdr_len[1] = arp_hdr_len(skb->dev);
1693 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001694 }
1695
1696 memset(hdr_data, 0, 120);
1697 if ((hdr_field >> 6) & 1) {
1698 hdr = skb_mac_header(skb);
1699 memcpy(hdr_data, hdr, hdr_len[0]);
1700 len += hdr_len[0];
1701 }
1702
1703 if ((hdr_field >> 5) & 1) {
1704 hdr = skb_network_header(skb);
1705 memcpy(hdr_data + len, hdr, hdr_len[1]);
1706 len += hdr_len[1];
1707 }
1708
1709 if ((hdr_field >> 4) & 1) {
1710 hdr = skb_transport_header(skb);
1711 memcpy(hdr_data + len, hdr, hdr_len[2]);
1712 len += hdr_len[2];
1713 }
1714 return len;
1715}
1716
1717/**
1718 * create_hdr_descs - create header and header extension descriptors
Lee Jones80708602021-01-15 20:09:03 +00001719 * @hdr_field: bitfield determining needed headers
1720 * @hdr_data: buffer containing header data
1721 * @len: length of data buffer
1722 * @hdr_len: array of individual header lengths
1723 * @scrq_arr: descriptor array
Thomas Falconad7775d2016-04-01 17:20:34 -05001724 *
1725 * Creates header and, if needed, header extension descriptors and
1726 * places them in a descriptor array, scrq_arr
1727 */
1728
Thomas Falcon2de09682017-10-16 10:02:11 -05001729static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1730 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001731{
1732 union sub_crq hdr_desc;
1733 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001734 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001735 u8 *data, *cur;
1736 int tmp;
1737
1738 while (tmp_len > 0) {
1739 cur = hdr_data + len - tmp_len;
1740
1741 memset(&hdr_desc, 0, sizeof(hdr_desc));
1742 if (cur != hdr_data) {
1743 data = hdr_desc.hdr_ext.data;
1744 tmp = tmp_len > 29 ? 29 : tmp_len;
1745 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1746 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1747 hdr_desc.hdr_ext.len = tmp;
1748 } else {
1749 data = hdr_desc.hdr.data;
1750 tmp = tmp_len > 24 ? 24 : tmp_len;
1751 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1752 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1753 hdr_desc.hdr.len = tmp;
1754 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1755 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1756 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1757 hdr_desc.hdr.flag = hdr_field << 1;
1758 }
1759 memcpy(data, cur, tmp);
1760 tmp_len -= tmp;
1761 *scrq_arr = hdr_desc;
1762 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001763 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001764 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001765
1766 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001767}
1768
1769/**
1770 * build_hdr_descs_arr - build a header descriptor array
Lijun Pan73214a62021-06-11 10:43:39 -05001771 * @skb: tx socket buffer
1772 * @indir_arr: indirect array
Lee Jones80708602021-01-15 20:09:03 +00001773 * @num_entries: number of descriptors to be sent
1774 * @hdr_field: bit field determining which headers will be sent
Thomas Falconad7775d2016-04-01 17:20:34 -05001775 *
1776 * This function will build a TX descriptor array with applicable
1777 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1778 */
1779
Thomas Falconc62aa372020-11-18 19:12:20 -06001780static void build_hdr_descs_arr(struct sk_buff *skb,
1781 union sub_crq *indir_arr,
Thomas Falconad7775d2016-04-01 17:20:34 -05001782 int *num_entries, u8 hdr_field)
1783{
1784 int hdr_len[3] = {0, 0, 0};
Thomas Falconc62aa372020-11-18 19:12:20 -06001785 u8 hdr_data[140] = {0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001786 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001787
Thomas Falconc62aa372020-11-18 19:12:20 -06001788 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1789 hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001790 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconc62aa372020-11-18 19:12:20 -06001791 indir_arr + 1);
Thomas Falconad7775d2016-04-01 17:20:34 -05001792}
1793
Thomas Falcon1f247a62018-03-12 11:51:04 -05001794static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1795 struct net_device *netdev)
1796{
1797 /* For some backing devices, mishandling of small packets
1798 * can result in a loss of connection or TX stall. Device
1799 * architects recommend that no packet should be smaller
1800 * than the minimum MTU value provided to the driver, so
1801 * pad any packets to that length
1802 */
1803 if (skb->len < netdev->min_mtu)
1804 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001805
1806 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001807}
1808
Thomas Falcon0d973382020-11-18 19:12:19 -06001809static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1810 struct ibmvnic_sub_crq_queue *tx_scrq)
1811{
1812 struct ibmvnic_ind_xmit_queue *ind_bufp;
1813 struct ibmvnic_tx_buff *tx_buff;
1814 struct ibmvnic_tx_pool *tx_pool;
1815 union sub_crq tx_scrq_entry;
1816 int queue_num;
1817 int entries;
1818 int index;
1819 int i;
1820
1821 ind_bufp = &tx_scrq->ind_buf;
1822 entries = (u64)ind_bufp->index;
1823 queue_num = tx_scrq->pool_index;
1824
1825 for (i = entries - 1; i >= 0; --i) {
1826 tx_scrq_entry = ind_bufp->indir_arr[i];
1827 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1828 continue;
1829 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1830 if (index & IBMVNIC_TSO_POOL_MASK) {
1831 tx_pool = &adapter->tso_pool[queue_num];
1832 index &= ~IBMVNIC_TSO_POOL_MASK;
1833 } else {
1834 tx_pool = &adapter->tx_pool[queue_num];
1835 }
1836 tx_pool->free_map[tx_pool->consumer_index] = index;
1837 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1838 tx_pool->num_buffers - 1 :
1839 tx_pool->consumer_index - 1;
1840 tx_buff = &tx_pool->tx_buff[index];
1841 adapter->netdev->stats.tx_packets--;
1842 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1843 adapter->tx_stats_buffers[queue_num].packets--;
1844 adapter->tx_stats_buffers[queue_num].bytes -=
1845 tx_buff->skb->len;
1846 dev_kfree_skb_any(tx_buff->skb);
1847 tx_buff->skb = NULL;
1848 adapter->netdev->stats.tx_dropped++;
1849 }
1850 ind_bufp->index = 0;
1851 if (atomic_sub_return(entries, &tx_scrq->used) <=
1852 (adapter->req_tx_entries_per_subcrq / 2) &&
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -07001853 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1854 !test_bit(0, &adapter->resetting)) {
Thomas Falcon0d973382020-11-18 19:12:19 -06001855 netif_wake_subqueue(adapter->netdev, queue_num);
1856 netdev_dbg(adapter->netdev, "Started queue %d\n",
1857 queue_num);
1858 }
1859}
1860
1861static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1862 struct ibmvnic_sub_crq_queue *tx_scrq)
1863{
1864 struct ibmvnic_ind_xmit_queue *ind_bufp;
1865 u64 dma_addr;
1866 u64 entries;
1867 u64 handle;
1868 int rc;
1869
1870 ind_bufp = &tx_scrq->ind_buf;
1871 dma_addr = (u64)ind_bufp->indir_dma;
1872 entries = (u64)ind_bufp->index;
1873 handle = tx_scrq->handle;
1874
1875 if (!entries)
1876 return 0;
1877 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1878 if (rc)
1879 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1880 else
1881 ind_bufp->index = 0;
1882 return 0;
1883}
1884
YueHaibing94b2bb22018-09-18 14:35:47 +08001885static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001886{
1887 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1888 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001889 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001890 struct device *dev = &adapter->vdev->dev;
Thomas Falcon0d973382020-11-18 19:12:19 -06001891 struct ibmvnic_ind_xmit_queue *ind_bufp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001892 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001893 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001894 struct ibmvnic_tx_pool *tx_pool;
1895 unsigned int tx_send_failed = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001896 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001897 unsigned int tx_map_failed = 0;
Thomas Falconc62aa372020-11-18 19:12:20 -06001898 union sub_crq indir_arr[16];
Thomas Falcon032c5e82015-12-21 11:26:06 -06001899 unsigned int tx_dropped = 0;
1900 unsigned int tx_packets = 0;
1901 unsigned int tx_bytes = 0;
1902 dma_addr_t data_dma_addr;
1903 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001904 unsigned long lpar_rc;
1905 union sub_crq tx_crq;
1906 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001907 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001908 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001909 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001910 u8 proto = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001911
1912 tx_scrq = adapter->tx_scrq[queue_num];
1913 txq = netdev_get_tx_queue(netdev, queue_num);
1914 ind_bufp = &tx_scrq->ind_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001915
Juliet Kim7ed5b312019-09-20 16:11:23 -04001916 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001917 dev_kfree_skb_any(skb);
1918
Thomas Falcon032c5e82015-12-21 11:26:06 -06001919 tx_send_failed++;
1920 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001921 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001922 goto out;
1923 }
1924
Thomas Falcon7083a452018-03-12 21:05:26 -05001925 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001926 tx_dropped++;
1927 tx_send_failed++;
1928 ret = NETDEV_TX_OK;
Thomas Falcon0d973382020-11-18 19:12:19 -06001929 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon1f247a62018-03-12 11:51:04 -05001930 goto out;
1931 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001932 if (skb_is_gso(skb))
1933 tx_pool = &adapter->tso_pool[queue_num];
1934 else
1935 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001936
Thomas Falcon032c5e82015-12-21 11:26:06 -06001937 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001938
Thomas Falcon86b61a52018-03-16 20:00:29 -05001939 if (index == IBMVNIC_INVALID_MAP) {
1940 dev_kfree_skb_any(skb);
1941 tx_send_failed++;
1942 tx_dropped++;
Sukadev Bhattiprolubb553622021-07-20 19:34:39 -07001943 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon86b61a52018-03-16 20:00:29 -05001944 ret = NETDEV_TX_OK;
1945 goto out;
1946 }
1947
1948 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1949
Thomas Falcon06b3e352018-03-16 20:00:28 -05001950 offset = index * tx_pool->buf_size;
1951 dst = tx_pool->long_term_buff.buff + offset;
1952 memset(dst, 0, tx_pool->buf_size);
1953 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001954
Thomas Falcon15482052017-10-17 12:36:54 -05001955 if (skb_shinfo(skb)->nr_frags) {
1956 int cur, i;
1957
1958 /* Copy the head */
1959 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1960 cur = skb_headlen(skb);
1961
1962 /* Copy the frags */
1963 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1964 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1965
Christophe JAILLETc3105f82021-04-04 10:54:37 +02001966 memcpy(dst + cur, skb_frag_address(frag),
1967 skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001968 cur += skb_frag_size(frag);
1969 }
1970 } else {
1971 skb_copy_from_linear_data(skb, dst, skb->len);
1972 }
1973
Lijun Pan42557da2021-02-12 20:48:40 -06001974 /* post changes to long_term_buff *dst before VIOS accessing it */
1975 dma_wmb();
1976
Thomas Falcon032c5e82015-12-21 11:26:06 -06001977 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001978 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001979
1980 tx_buff = &tx_pool->tx_buff[index];
1981 tx_buff->skb = skb;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001982 tx_buff->index = index;
1983 tx_buff->pool_index = queue_num;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001984
1985 memset(&tx_crq, 0, sizeof(tx_crq));
1986 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1987 tx_crq.v1.type = IBMVNIC_TX_DESC;
1988 tx_crq.v1.n_crq_elem = 1;
1989 tx_crq.v1.n_sge = 1;
1990 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001991
Thomas Falconfdb06102017-10-17 12:36:55 -05001992 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001993 tx_crq.v1.correlator =
1994 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001995 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001996 tx_crq.v1.correlator = cpu_to_be32(index);
1997 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001998 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1999 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2000
Michał Mirosławe84b4792018-11-07 17:50:52 +01002001 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002002 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2003 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2004 }
2005
2006 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06002007 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2008 proto = ip_hdr(skb)->protocol;
2009 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2010 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2011 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002012 }
2013
Thomas Falcona0dca102018-01-18 19:29:48 -06002014 if (proto == IPPROTO_TCP)
2015 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2016 else if (proto == IPPROTO_UDP)
2017 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2018
Thomas Falconad7775d2016-04-01 17:20:34 -05002019 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002020 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05002021 hdrs += 2;
2022 }
Thomas Falconfdb06102017-10-17 12:36:55 -05002023 if (skb_is_gso(skb)) {
2024 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2025 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2026 hdrs += 2;
2027 }
Thomas Falcon0d973382020-11-18 19:12:19 -06002028
2029 if ((*hdrs >> 7) & 1)
Thomas Falconc62aa372020-11-18 19:12:20 -06002030 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
Thomas Falcon0d973382020-11-18 19:12:19 -06002031
2032 tx_crq.v1.n_crq_elem = num_entries;
2033 tx_buff->num_entries = num_entries;
2034 /* flush buffer if current entry can not fit */
2035 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2036 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2037 if (lpar_rc != H_SUCCESS)
2038 goto tx_flush_err;
Thomas Falconad7775d2016-04-01 17:20:34 -05002039 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04002040
Thomas Falconc62aa372020-11-18 19:12:20 -06002041 indir_arr[0] = tx_crq;
2042 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
Thomas Falcon0d973382020-11-18 19:12:19 -06002043 num_entries * sizeof(struct ibmvnic_generic_scrq));
2044 ind_bufp->index += num_entries;
2045 if (__netdev_tx_sent_queue(txq, skb->len,
2046 netdev_xmit_more() &&
2047 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2048 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2049 if (lpar_rc != H_SUCCESS)
2050 goto tx_err;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002051 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002052
Thomas Falconffc385b2018-02-18 10:08:41 -06002053 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04002054 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06002055 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002056 netif_stop_subqueue(netdev, queue_num);
2057 }
2058
Thomas Falcon032c5e82015-12-21 11:26:06 -06002059 tx_packets++;
2060 tx_bytes += skb->len;
2061 txq->trans_start = jiffies;
2062 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05002063 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002064
Thomas Falcon0d973382020-11-18 19:12:19 -06002065tx_flush_err:
2066 dev_kfree_skb_any(skb);
2067 tx_buff->skb = NULL;
2068 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2069 tx_pool->num_buffers - 1 :
2070 tx_pool->consumer_index - 1;
2071 tx_dropped++;
2072tx_err:
2073 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2074 dev_err_ratelimited(dev, "tx: send failed\n");
2075
2076 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2077 /* Disable TX and report carrier off if queue is closed
2078 * or pending failover.
2079 * Firmware guarantees that a signal will be sent to the
2080 * driver, triggering a reset or some other action.
2081 */
2082 netif_tx_stop_all_queues(netdev);
2083 netif_carrier_off(netdev);
2084 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002085out:
2086 netdev->stats.tx_dropped += tx_dropped;
2087 netdev->stats.tx_bytes += tx_bytes;
2088 netdev->stats.tx_packets += tx_packets;
2089 adapter->tx_send_failed += tx_send_failed;
2090 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05002091 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2092 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2093 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002094
2095 return ret;
2096}
2097
2098static void ibmvnic_set_multi(struct net_device *netdev)
2099{
2100 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2101 struct netdev_hw_addr *ha;
2102 union ibmvnic_crq crq;
2103
2104 memset(&crq, 0, sizeof(crq));
2105 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2106 crq.request_capability.cmd = REQUEST_CAPABILITY;
2107
2108 if (netdev->flags & IFF_PROMISC) {
2109 if (!adapter->promisc_supported)
2110 return;
2111 } else {
2112 if (netdev->flags & IFF_ALLMULTI) {
2113 /* Accept all multicast */
2114 memset(&crq, 0, sizeof(crq));
2115 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2116 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2117 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2118 ibmvnic_send_crq(adapter, &crq);
2119 } else if (netdev_mc_empty(netdev)) {
2120 /* Reject all multicast */
2121 memset(&crq, 0, sizeof(crq));
2122 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2123 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2124 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2125 ibmvnic_send_crq(adapter, &crq);
2126 } else {
2127 /* Accept one or more multicast(s) */
2128 netdev_for_each_mc_addr(ha, netdev) {
2129 memset(&crq, 0, sizeof(crq));
2130 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2131 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2132 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2133 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2134 ha->addr);
2135 ibmvnic_send_crq(adapter, &crq);
2136 }
2137 }
2138 }
2139}
2140
Thomas Falcon62740e92019-05-09 23:13:43 -05002141static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002142{
2143 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002144 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002145 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002146
Thomas Falcon62740e92019-05-09 23:13:43 -05002147 if (!is_valid_ether_addr(dev_addr)) {
2148 rc = -EADDRNOTAVAIL;
2149 goto err;
2150 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002151
2152 memset(&crq, 0, sizeof(crq));
2153 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2154 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05002155 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06002156
Thomas Falconff25dcb2019-11-25 17:12:56 -06002157 mutex_lock(&adapter->fw_lock);
2158 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06002159 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06002160
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002161 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05002162 if (rc) {
2163 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06002164 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002165 goto err;
2166 }
2167
Thomas Falcon476d96c2019-11-25 17:12:55 -06002168 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002169 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06002170 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05002171 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06002172 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002173 goto err;
2174 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06002175 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002176 return 0;
2177err:
2178 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2179 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002180}
2181
John Allenc26eba02017-10-26 16:23:25 -05002182static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2183{
2184 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2185 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06002186 int rc;
John Allenc26eba02017-10-26 16:23:25 -05002187
Thomas Falcon62740e92019-05-09 23:13:43 -05002188 rc = 0;
Lijun Pan8fc36722020-10-27 17:04:56 -05002189 if (!is_valid_ether_addr(addr->sa_data))
2190 return -EADDRNOTAVAIL;
2191
Jiri Wiesner67eb2112021-03-04 17:18:28 +01002192 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2193 if (adapter->state != VNIC_PROBED)
Thomas Falcon62740e92019-05-09 23:13:43 -05002194 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
John Allenc26eba02017-10-26 16:23:25 -05002195
Thomas Falconf8136142018-01-29 13:45:05 -06002196 return rc;
John Allenc26eba02017-10-26 16:23:25 -05002197}
2198
Lijun Pancaee7bf2021-04-12 02:41:27 -05002199static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2200{
2201 switch (reason) {
2202 case VNIC_RESET_FAILOVER:
2203 return "FAILOVER";
2204 case VNIC_RESET_MOBILITY:
2205 return "MOBILITY";
2206 case VNIC_RESET_FATAL:
2207 return "FATAL";
2208 case VNIC_RESET_NON_FATAL:
2209 return "NON_FATAL";
2210 case VNIC_RESET_TIMEOUT:
2211 return "TIMEOUT";
2212 case VNIC_RESET_CHANGE_PARAM:
2213 return "CHANGE_PARAM";
Lijun Pan822ebc22021-06-11 10:35:37 -05002214 case VNIC_RESET_PASSIVE_INIT:
2215 return "PASSIVE_INIT";
Lijun Pancaee7bf2021-04-12 02:41:27 -05002216 }
Michal Suchanek07b5dc12021-05-20 08:50:34 +02002217 return "UNKNOWN";
Lijun Pancaee7bf2021-04-12 02:41:27 -05002218}
2219
Lee Jones80708602021-01-15 20:09:03 +00002220/*
Nathan Fontenoted651a12017-05-03 14:04:38 -04002221 * do_reset returns zero if we are able to keep processing reset events, or
2222 * non-zero if we hit a fatal error and must halt.
2223 */
2224static int do_reset(struct ibmvnic_adapter *adapter,
2225 struct ibmvnic_rwi *rwi, u32 reset_state)
2226{
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07002227 struct net_device *netdev = adapter->netdev;
John Allen896d8692018-01-18 16:26:31 -06002228 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002229 u64 old_num_rx_slots, old_num_tx_slots;
Lijun Pand3a6abc2021-04-14 02:46:15 -05002230 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002231
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002232 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002233 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2234 adapter_state_to_string(adapter->state),
2235 adapter->failover_pending,
2236 reset_reason_to_string(rwi->reset_reason),
2237 adapter_state_to_string(reset_state));
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002238
Lijun Pan3f5ec372021-01-06 15:35:14 -06002239 adapter->reset_reason = rwi->reset_reason;
2240 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2241 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2242 rtnl_lock();
2243
Lijun Panbab08be2021-02-11 00:43:19 -06002244 /* Now that we have the rtnl lock, clear any pending failover.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002245 * This will ensure ibmvnic_open() has either completed or will
2246 * block until failover is complete.
2247 */
2248 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2249 adapter->failover_pending = false;
Juliet Kimb27507b2019-09-20 16:11:22 -04002250
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002251 /* read the state and check (again) after getting rtnl */
2252 reset_state = adapter->state;
2253
2254 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2255 rc = -EBUSY;
2256 goto out;
2257 }
2258
Nathan Fontenoted651a12017-05-03 14:04:38 -04002259 netif_carrier_off(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002260
John Allen896d8692018-01-18 16:26:31 -06002261 old_num_rx_queues = adapter->req_rx_queues;
2262 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002263 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2264 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06002265
Nathan Fontenot30f79622018-04-06 18:37:06 -05002266 ibmvnic_cleanup(netdev);
2267
Thomas Falcon1f946082019-06-07 16:03:53 -05002268 if (reset_state == VNIC_OPEN &&
2269 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05002270 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Lijun Pan3f5ec372021-01-06 15:35:14 -06002271 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2272 rc = __ibmvnic_close(netdev);
2273 if (rc)
2274 goto out;
2275 } else {
2276 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002277
Lijun Pan3f5ec372021-01-06 15:35:14 -06002278 /* Release the RTNL lock before link state change and
2279 * re-acquire after the link state change to allow
2280 * linkwatch_event to grab the RTNL lock and run during
2281 * a reset.
2282 */
2283 rtnl_unlock();
2284 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2285 rtnl_lock();
2286 if (rc)
2287 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04002288
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002289 if (adapter->state == VNIC_OPEN) {
2290 /* When we dropped rtnl, ibmvnic_open() got
2291 * it and noticed that we are resetting and
2292 * set the adapter state to OPEN. Update our
2293 * new "target" state, and resume the reset
2294 * from VNIC_CLOSING state.
2295 */
2296 netdev_dbg(netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002297 "Open changed state from %s, updating.\n",
2298 adapter_state_to_string(reset_state));
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002299 reset_state = VNIC_OPEN;
2300 adapter->state = VNIC_CLOSING;
2301 }
2302
Lijun Pan3f5ec372021-01-06 15:35:14 -06002303 if (adapter->state != VNIC_CLOSING) {
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002304 /* If someone else changed the adapter state
2305 * when we dropped the rtnl, fail the reset
2306 */
Lijun Pan3f5ec372021-01-06 15:35:14 -06002307 rc = -1;
2308 goto out;
2309 }
Lijun Pan3f5ec372021-01-06 15:35:14 -06002310 adapter->state = VNIC_CLOSED;
Juliet Kimb27507b2019-09-20 16:11:22 -04002311 }
Lijun Pan3f5ec372021-01-06 15:35:14 -06002312 }
Juliet Kimb27507b2019-09-20 16:11:22 -04002313
Lijun Pan3f5ec372021-01-06 15:35:14 -06002314 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2315 release_resources(adapter);
2316 release_sub_crqs(adapter, 1);
2317 release_crq_queue(adapter);
John Allenc26eba02017-10-26 16:23:25 -05002318 }
2319
John Allen8cb31cf2017-05-26 10:30:37 -04002320 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2321 /* remove the closed state so when we call open it appears
2322 * we are coming from the probed state.
2323 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002324 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04002325
Lijun Pan3f5ec372021-01-06 15:35:14 -06002326 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2327 rc = init_crq_queue(adapter);
2328 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002329 rc = ibmvnic_reenable_crq_queue(adapter);
2330 release_sub_crqs(adapter, 1);
2331 } else {
2332 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04002333 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002334 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04002335 if (rc)
2336 netdev_err(adapter->netdev,
2337 "Reset failed to enable interrupts. rc=%d\n",
2338 rc);
2339 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05002340 }
2341
2342 if (rc) {
2343 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04002344 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002345 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05002346 }
2347
Lijun Pan635e4422020-08-19 17:52:26 -05002348 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04002349 if (rc) {
2350 rc = IBMVNIC_INIT_FAILED;
2351 goto out;
2352 }
John Allen8cb31cf2017-05-26 10:30:37 -04002353
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002354 /* If the adapter was in PROBE or DOWN state prior to the reset,
John Allen8cb31cf2017-05-26 10:30:37 -04002355 * exit here.
2356 */
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002357 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002358 rc = 0;
2359 goto out;
2360 }
John Allen8cb31cf2017-05-26 10:30:37 -04002361
2362 rc = ibmvnic_login(netdev);
Lijun Panf78afaa2021-02-11 00:43:20 -06002363 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002364 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002365
Lijun Pan3f5ec372021-01-06 15:35:14 -06002366 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2367 rc = init_resources(adapter);
2368 if (rc)
2369 goto out;
2370 } else if (adapter->req_rx_queues != old_num_rx_queues ||
Juliet Kimb27507b2019-09-20 16:11:22 -04002371 adapter->req_tx_queues != old_num_tx_queues ||
2372 adapter->req_rx_add_entries_per_subcrq !=
2373 old_num_rx_slots ||
2374 adapter->req_tx_entries_per_subcrq !=
Mingming Cao9f134572020-08-25 13:26:41 -04002375 old_num_tx_slots ||
2376 !adapter->rx_pool ||
2377 !adapter->tso_pool ||
2378 !adapter->tx_pool) {
Juliet Kima5681e22018-11-19 15:59:22 -06002379 release_napi(adapter);
2380 release_vpd_data(adapter);
2381
2382 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002383 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002384 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002385
John Allenc26eba02017-10-26 16:23:25 -05002386 } else {
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07002387 rc = init_tx_pools(netdev);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002388 if (rc) {
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07002389 netdev_dbg(netdev,
2390 "init tx pools failed (%d)\n",
Lijun Pan91dc5d22021-02-11 00:43:22 -06002391 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002392 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002393 }
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002394
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07002395 rc = init_rx_pools(netdev);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002396 if (rc) {
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07002397 netdev_dbg(netdev,
2398 "init rx pools failed (%d)\n",
Lijun Pan91dc5d22021-02-11 00:43:22 -06002399 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002400 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002401 }
John Allenc26eba02017-10-26 16:23:25 -05002402 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002403 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002404 }
John Allene676d812018-03-14 10:41:29 -05002405 adapter->state = VNIC_CLOSED;
2406
Juliet Kimb27507b2019-09-20 16:11:22 -04002407 if (reset_state == VNIC_CLOSED) {
2408 rc = 0;
2409 goto out;
2410 }
John Allene676d812018-03-14 10:41:29 -05002411
Nathan Fontenoted651a12017-05-03 14:04:38 -04002412 rc = __ibmvnic_open(netdev);
2413 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002414 rc = IBMVNIC_OPEN_FAILED;
2415 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002416 }
2417
Thomas Falconbe32a242019-06-07 16:03:54 -05002418 /* refresh device's multicast list */
2419 ibmvnic_set_multi(netdev);
2420
Lijun Pan98025bc2020-11-20 16:40:12 -06002421 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
Lijun Pan6be46662020-12-14 15:19:29 -06002422 adapter->reset_reason == VNIC_RESET_MOBILITY)
2423 __netdev_notify_peers(netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002424
Juliet Kimb27507b2019-09-20 16:11:22 -04002425 rc = 0;
2426
2427out:
Dany Madden0cb4bc62020-11-25 18:04:27 -06002428 /* restore the adapter state if reset failed */
2429 if (rc)
2430 adapter->state = reset_state;
Lijun Pan3f5ec372021-01-06 15:35:14 -06002431 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2432 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2433 rtnl_unlock();
Juliet Kimb27507b2019-09-20 16:11:22 -04002434
Lijun Pan0666ef72021-04-12 02:41:28 -05002435 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2436 adapter_state_to_string(adapter->state),
2437 adapter->failover_pending, rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002438 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002439}
2440
Thomas Falcon2770a792018-05-23 13:38:02 -05002441static int do_hard_reset(struct ibmvnic_adapter *adapter,
2442 struct ibmvnic_rwi *rwi, u32 reset_state)
2443{
2444 struct net_device *netdev = adapter->netdev;
2445 int rc;
2446
Lijun Pancaee7bf2021-04-12 02:41:27 -05002447 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2448 reset_reason_to_string(rwi->reset_reason));
Thomas Falcon2770a792018-05-23 13:38:02 -05002449
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002450 /* read the state and check (again) after getting rtnl */
2451 reset_state = adapter->state;
2452
2453 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2454 rc = -EBUSY;
2455 goto out;
2456 }
2457
Thomas Falcon2770a792018-05-23 13:38:02 -05002458 netif_carrier_off(netdev);
2459 adapter->reset_reason = rwi->reset_reason;
2460
2461 ibmvnic_cleanup(netdev);
2462 release_resources(adapter);
2463 release_sub_crqs(adapter, 0);
2464 release_crq_queue(adapter);
2465
2466 /* remove the closed state so when we call open it appears
2467 * we are coming from the probed state.
2468 */
2469 adapter->state = VNIC_PROBED;
2470
Thomas Falconbbd669a2019-04-04 18:58:26 -05002471 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002472 rc = init_crq_queue(adapter);
2473 if (rc) {
2474 netdev_err(adapter->netdev,
2475 "Couldn't initialize crq. rc=%d\n", rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002476 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002477 }
2478
Lijun Pan635e4422020-08-19 17:52:26 -05002479 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002480 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002481 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002482
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002483 /* If the adapter was in PROBE or DOWN state prior to the reset,
Thomas Falcon2770a792018-05-23 13:38:02 -05002484 * exit here.
2485 */
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002486 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002487 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002488
2489 rc = ibmvnic_login(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002490 if (rc)
2491 goto out;
Juliet Kima5681e22018-11-19 15:59:22 -06002492
2493 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002494 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002495 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002496
2497 ibmvnic_disable_irqs(adapter);
2498 adapter->state = VNIC_CLOSED;
2499
2500 if (reset_state == VNIC_CLOSED)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002501 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002502
2503 rc = __ibmvnic_open(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002504 if (rc) {
2505 rc = IBMVNIC_OPEN_FAILED;
2506 goto out;
2507 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002508
Lijun Pan6be46662020-12-14 15:19:29 -06002509 __netdev_notify_peers(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002510out:
2511 /* restore adapter state if reset failed */
2512 if (rc)
2513 adapter->state = reset_state;
Lijun Pan0666ef72021-04-12 02:41:28 -05002514 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2515 adapter_state_to_string(adapter->state),
2516 adapter->failover_pending, rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002517 return rc;
Thomas Falcon2770a792018-05-23 13:38:02 -05002518}
2519
Nathan Fontenoted651a12017-05-03 14:04:38 -04002520static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2521{
2522 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002523 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002524
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002525 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002526
2527 if (!list_empty(&adapter->rwi_list)) {
2528 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2529 list);
2530 list_del(&rwi->list);
2531 } else {
2532 rwi = NULL;
2533 }
2534
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002535 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002536 return rwi;
2537}
2538
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002539/**
2540 * do_passive_init - complete probing when partner device is detected.
2541 * @adapter: ibmvnic_adapter struct
2542 *
2543 * If the ibmvnic device does not have a partner device to communicate with at boot
2544 * and that partner device comes online at a later time, this function is called
2545 * to complete the initialization process of ibmvnic device.
2546 * Caller is expected to hold rtnl_lock().
2547 *
2548 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2549 * in the down state.
2550 * Returns 0 upon success and the device is in PROBED state.
2551 */
2552
2553static int do_passive_init(struct ibmvnic_adapter *adapter)
2554{
2555 unsigned long timeout = msecs_to_jiffies(30000);
2556 struct net_device *netdev = adapter->netdev;
2557 struct device *dev = &adapter->vdev->dev;
2558 int rc;
2559
2560 netdev_dbg(netdev, "Partner device found, probing.\n");
2561
2562 adapter->state = VNIC_PROBING;
2563 reinit_completion(&adapter->init_done);
2564 adapter->init_done_rc = 0;
2565 adapter->crq.active = true;
2566
2567 rc = send_crq_init_complete(adapter);
2568 if (rc)
2569 goto out;
2570
2571 rc = send_version_xchg(adapter);
2572 if (rc)
2573 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2574
2575 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2576 dev_err(dev, "Initialization sequence timed out\n");
2577 rc = -ETIMEDOUT;
2578 goto out;
2579 }
2580
2581 rc = init_sub_crqs(adapter);
2582 if (rc) {
2583 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2584 goto out;
2585 }
2586
2587 rc = init_sub_crq_irqs(adapter);
2588 if (rc) {
2589 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2590 goto init_failed;
2591 }
2592
2593 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2594 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2595 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2596
2597 adapter->state = VNIC_PROBED;
2598 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2599
2600 return 0;
2601
2602init_failed:
2603 release_sub_crqs(adapter, 1);
2604out:
2605 adapter->state = VNIC_DOWN;
2606 return rc;
2607}
2608
Nathan Fontenoted651a12017-05-03 14:04:38 -04002609static void __ibmvnic_reset(struct work_struct *work)
2610{
Nathan Fontenoted651a12017-05-03 14:04:38 -04002611 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002612 bool saved_state = false;
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002613 struct ibmvnic_rwi *tmprwi;
2614 struct ibmvnic_rwi *rwi;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002615 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002616 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002617 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002618
2619 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002620
Juliet Kim7ed5b312019-09-20 16:11:23 -04002621 if (test_and_set_bit_lock(0, &adapter->resetting)) {
Lijun Pan870e04a2021-04-13 14:33:39 -05002622 queue_delayed_work(system_long_wq,
2623 &adapter->ibmvnic_delayed_reset,
2624 IBMVNIC_RESET_DELAY);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002625 return;
2626 }
2627
Nathan Fontenoted651a12017-05-03 14:04:38 -04002628 rwi = get_next_rwi(adapter);
2629 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002630 spin_lock_irqsave(&adapter->state_lock, flags);
2631
Thomas Falcon36f10312019-08-27 11:10:04 -05002632 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002633 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002634 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002635 kfree(rwi);
2636 rc = EBUSY;
2637 break;
2638 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002639
Juliet Kim7d7195a2020-03-10 09:23:58 -05002640 if (!saved_state) {
2641 reset_state = adapter->state;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002642 saved_state = true;
2643 }
2644 spin_unlock_irqrestore(&adapter->state_lock, flags);
2645
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002646 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2647 rtnl_lock();
2648 rc = do_passive_init(adapter);
2649 rtnl_unlock();
2650 if (!rc)
2651 netif_carrier_on(adapter->netdev);
2652 } else if (adapter->force_reset_recovery) {
Lijun Panbab08be2021-02-11 00:43:19 -06002653 /* Since we are doing a hard reset now, clear the
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002654 * failover_pending flag so we don't ignore any
2655 * future MOBILITY or other resets.
2656 */
2657 adapter->failover_pending = false;
2658
Juliet Kimb27507b2019-09-20 16:11:22 -04002659 /* Transport event occurred during previous reset */
2660 if (adapter->wait_for_reset) {
2661 /* Previous was CHANGE_PARAM; caller locked */
2662 adapter->force_reset_recovery = false;
2663 rc = do_hard_reset(adapter, rwi, reset_state);
2664 } else {
2665 rtnl_lock();
2666 adapter->force_reset_recovery = false;
2667 rc = do_hard_reset(adapter, rwi, reset_state);
2668 rtnl_unlock();
2669 }
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002670 if (rc) {
2671 /* give backing device time to settle down */
2672 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002673 "[S:%s] Hard reset failed, waiting 60 secs\n",
2674 adapter_state_to_string(adapter->state));
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002675 set_current_state(TASK_UNINTERRUPTIBLE);
2676 schedule_timeout(60 * HZ);
2677 }
Lijun Pan1f45dc22020-12-23 14:49:04 -06002678 } else {
Thomas Falcon2770a792018-05-23 13:38:02 -05002679 rc = do_reset(adapter, rwi, reset_state);
2680 }
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002681 tmprwi = rwi;
Dany Maddena86d5c62020-11-25 18:04:31 -06002682 adapter->last_reset_time = jiffies;
Dany Madden0cb4bc62020-11-25 18:04:27 -06002683
Dany Madden18f141b2020-11-25 18:04:25 -06002684 if (rc)
2685 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002686
2687 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002688
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002689 /*
2690 * If there is another reset queued, free the previous rwi
2691 * and process the new reset even if previous reset failed
2692 * (the previous reset could have failed because of a fail
2693 * over for instance, so process the fail over).
2694 *
2695 * If there are no resets queued and the previous reset failed,
2696 * the adapter would be in an undefined state. So retry the
2697 * previous reset as a hard reset.
2698 */
2699 if (rwi)
2700 kfree(tmprwi);
2701 else if (rc)
2702 rwi = tmprwi;
2703
Juliet Kim7ed5b312019-09-20 16:11:23 -04002704 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002705 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
Juliet Kim7ed5b312019-09-20 16:11:23 -04002706 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002707 }
2708
John Allenc26eba02017-10-26 16:23:25 -05002709 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002710 adapter->reset_done_rc = rc;
2711 complete(&adapter->reset_done);
2712 }
2713
Juliet Kim7ed5b312019-09-20 16:11:23 -04002714 clear_bit_unlock(0, &adapter->resetting);
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002715
2716 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002717 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2718 adapter_state_to_string(adapter->state),
2719 adapter->force_reset_recovery,
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002720 adapter->wait_for_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002721}
2722
2723static void __ibmvnic_delayed_reset(struct work_struct *work)
2724{
2725 struct ibmvnic_adapter *adapter;
2726
2727 adapter = container_of(work, struct ibmvnic_adapter,
2728 ibmvnic_delayed_reset.work);
2729 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002730}
2731
Thomas Falconaf894d22018-04-06 18:37:04 -05002732static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2733 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002734{
Thomas Falcon2770a792018-05-23 13:38:02 -05002735 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002736 struct ibmvnic_rwi *rwi, *tmp;
2737 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002738 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002739 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002740
Jakub Kicinskib646acd52021-02-16 22:58:44 -08002741 spin_lock_irqsave(&adapter->rwi_lock, flags);
2742
2743 /* If failover is pending don't schedule any other reset.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002744 * Instead let the failover complete. If there is already a
2745 * a failover reset scheduled, we will detect and drop the
2746 * duplicate reset when walking the ->rwi_list below.
2747 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002748 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002749 adapter->state == VNIC_REMOVED ||
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002750 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002751 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002752 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002753 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002754 }
2755
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002756 if (adapter->state == VNIC_PROBING) {
2757 netdev_warn(netdev, "Adapter reset during probe\n");
Sukadev Bhattiprolu6b278c02021-10-29 15:03:16 -07002758 adapter->init_done_rc = -EAGAIN;
Lijun Pan91dc5d22021-02-11 00:43:22 -06002759 ret = EAGAIN;
Thomas Falconaf894d22018-04-06 18:37:04 -05002760 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002761 }
2762
Wang Hai3e98ae02021-06-10 20:54:17 +08002763 list_for_each_entry(tmp, &adapter->rwi_list, list) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04002764 if (tmp->reset_reason == reason) {
Lijun Pancaee7bf2021-04-12 02:41:27 -05002765 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2766 reset_reason_to_string(reason));
Thomas Falconaf894d22018-04-06 18:37:04 -05002767 ret = EBUSY;
2768 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002769 }
2770 }
2771
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002772 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002773 if (!rwi) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002774 ret = ENOMEM;
2775 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002776 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002777 /* if we just received a transport event,
2778 * flush reset queue and process this reset
2779 */
2780 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2781 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2782 list_del(entry);
2783 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002784 rwi->reset_reason = reason;
2785 list_add_tail(&rwi->list, &adapter->rwi_list);
Lijun Pancaee7bf2021-04-12 02:41:27 -05002786 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2787 reset_reason_to_string(reason));
Lijun Pan870e04a2021-04-13 14:33:39 -05002788 queue_work(system_long_wq, &adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002789
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08002790 ret = 0;
Thomas Falconaf894d22018-04-06 18:37:04 -05002791err:
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08002792 /* ibmvnic_close() below can block, so drop the lock first */
2793 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2794
2795 if (ret == ENOMEM)
2796 ibmvnic_close(netdev);
2797
Thomas Falconaf894d22018-04-06 18:37:04 -05002798 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002799}
2800
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002801static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002802{
2803 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002804
Lijun Pan855a6312020-11-20 16:40:13 -06002805 if (test_bit(0, &adapter->resetting)) {
2806 netdev_err(adapter->netdev,
2807 "Adapter is resetting, skip timeout reset\n");
2808 return;
2809 }
Dany Maddena86d5c62020-11-25 18:04:31 -06002810 /* No queuing up reset until at least 5 seconds (default watchdog val)
2811 * after last reset
2812 */
2813 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2814 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2815 return;
2816 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002817 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002818}
2819
2820static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2821 struct ibmvnic_rx_buff *rx_buff)
2822{
2823 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2824
2825 rx_buff->skb = NULL;
2826
2827 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2828 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2829
2830 atomic_dec(&pool->available);
2831}
2832
2833static int ibmvnic_poll(struct napi_struct *napi, int budget)
2834{
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002835 struct ibmvnic_sub_crq_queue *rx_scrq;
2836 struct ibmvnic_adapter *adapter;
2837 struct net_device *netdev;
2838 int frames_processed;
2839 int scrq_num;
2840
2841 netdev = napi->dev;
2842 adapter = netdev_priv(netdev);
2843 scrq_num = (int)(napi - adapter->napi);
2844 frames_processed = 0;
2845 rx_scrq = adapter->rx_scrq[scrq_num];
Nathan Fontenot152ce472017-05-26 10:30:54 -04002846
Thomas Falcon032c5e82015-12-21 11:26:06 -06002847restart_poll:
2848 while (frames_processed < budget) {
2849 struct sk_buff *skb;
2850 struct ibmvnic_rx_buff *rx_buff;
2851 union sub_crq *next;
2852 u32 length;
2853 u16 offset;
2854 u8 flags = 0;
2855
Juliet Kim7ed5b312019-09-20 16:11:23 -04002856 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002857 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002858 enable_scrq_irq(adapter, rx_scrq);
Thomas Falcon21ecba62017-06-14 23:50:09 -05002859 napi_complete_done(napi, frames_processed);
2860 return frames_processed;
2861 }
2862
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002863 if (!pending_scrq(adapter, rx_scrq))
Thomas Falcon032c5e82015-12-21 11:26:06 -06002864 break;
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002865 next = ibmvnic_next_scrq(adapter, rx_scrq);
Lijun Pan914789a2021-02-11 00:43:21 -06002866 rx_buff = (struct ibmvnic_rx_buff *)
2867 be64_to_cpu(next->rx_comp.correlator);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002868 /* do error checking */
2869 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002870 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2871 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002872 /* free the entry */
2873 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002874 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002875 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002876 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002877 } else if (!rx_buff->skb) {
2878 /* free the entry */
2879 next->rx_comp.first = 0;
2880 remove_buff_from_pool(adapter, rx_buff);
2881 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002882 }
2883
2884 length = be32_to_cpu(next->rx_comp.len);
2885 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2886 flags = next->rx_comp.flags;
2887 skb = rx_buff->skb;
Lijun Pan42557da2021-02-12 20:48:40 -06002888 /* load long_term_buff before copying to skb */
2889 dma_rmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06002890 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2891 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002892
2893 /* VLAN Header has been stripped by the system firmware and
2894 * needs to be inserted by the driver
2895 */
2896 if (adapter->rx_vlan_header_insertion &&
2897 (flags & IBMVNIC_VLAN_STRIPPED))
2898 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2899 ntohs(next->rx_comp.vlan_tci));
2900
Thomas Falcon032c5e82015-12-21 11:26:06 -06002901 /* free the entry */
2902 next->rx_comp.first = 0;
2903 remove_buff_from_pool(adapter, rx_buff);
2904
2905 skb_put(skb, length);
2906 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002907 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002908
2909 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2910 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2911 skb->ip_summed = CHECKSUM_UNNECESSARY;
2912 }
2913
2914 length = skb->len;
2915 napi_gro_receive(napi, skb); /* send it up */
2916 netdev->stats.rx_packets++;
2917 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002918 adapter->rx_stats_buffers[scrq_num].packets++;
2919 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002920 frames_processed++;
2921 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002922
Dwip N. Banerjee41ed0a02020-11-18 19:12:25 -06002923 if (adapter->state != VNIC_CLOSING &&
2924 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2925 adapter->req_rx_add_entries_per_subcrq / 2) ||
2926 frames_processed < budget))
Nathan Fontenot152ce472017-05-26 10:30:54 -04002927 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002928 if (frames_processed < budget) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002929 if (napi_complete_done(napi, frames_processed)) {
2930 enable_scrq_irq(adapter, rx_scrq);
2931 if (pending_scrq(adapter, rx_scrq)) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002932 if (napi_reschedule(napi)) {
2933 disable_scrq_irq(adapter, rx_scrq);
2934 goto restart_poll;
2935 }
2936 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002937 }
2938 }
2939 return frames_processed;
2940}
2941
John Allenc26eba02017-10-26 16:23:25 -05002942static int wait_for_reset(struct ibmvnic_adapter *adapter)
2943{
Thomas Falconaf894d22018-04-06 18:37:04 -05002944 int rc, ret;
2945
John Allenc26eba02017-10-26 16:23:25 -05002946 adapter->fallback.mtu = adapter->req_mtu;
2947 adapter->fallback.rx_queues = adapter->req_rx_queues;
2948 adapter->fallback.tx_queues = adapter->req_tx_queues;
2949 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2950 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2951
Thomas Falcon070eca92019-11-25 17:12:53 -06002952 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002953 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002954 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002955
2956 if (rc) {
2957 ret = rc;
2958 goto out;
2959 }
2960 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2961 if (rc) {
2962 ret = -ENODEV;
2963 goto out;
2964 }
John Allenc26eba02017-10-26 16:23:25 -05002965
Thomas Falconaf894d22018-04-06 18:37:04 -05002966 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002967 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002968 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002969 adapter->desired.mtu = adapter->fallback.mtu;
2970 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2971 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2972 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2973 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2974
Thomas Falcon070eca92019-11-25 17:12:53 -06002975 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002976 adapter->wait_for_reset = true;
2977 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002978 if (rc) {
2979 ret = rc;
2980 goto out;
2981 }
2982 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2983 60000);
2984 if (rc) {
2985 ret = -ENODEV;
2986 goto out;
2987 }
John Allenc26eba02017-10-26 16:23:25 -05002988 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002989out:
John Allenc26eba02017-10-26 16:23:25 -05002990 adapter->wait_for_reset = false;
2991
Thomas Falconaf894d22018-04-06 18:37:04 -05002992 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002993}
2994
John Allen3a807b72017-06-06 16:55:52 -05002995static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2996{
John Allenc26eba02017-10-26 16:23:25 -05002997 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2998
2999 adapter->desired.mtu = new_mtu + ETH_HLEN;
3000
3001 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05003002}
3003
Thomas Falconf10b09e2018-03-12 11:51:05 -05003004static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3005 struct net_device *dev,
3006 netdev_features_t features)
3007{
3008 /* Some backing hardware adapters can not
3009 * handle packets with a MSS less than 224
3010 * or with only one segment.
3011 */
3012 if (skb_is_gso(skb)) {
3013 if (skb_shinfo(skb)->gso_size < 224 ||
3014 skb_shinfo(skb)->gso_segs == 1)
3015 features &= ~NETIF_F_GSO_MASK;
3016 }
3017
3018 return features;
3019}
3020
Thomas Falcon032c5e82015-12-21 11:26:06 -06003021static const struct net_device_ops ibmvnic_netdev_ops = {
3022 .ndo_open = ibmvnic_open,
3023 .ndo_stop = ibmvnic_close,
3024 .ndo_start_xmit = ibmvnic_xmit,
3025 .ndo_set_rx_mode = ibmvnic_set_multi,
3026 .ndo_set_mac_address = ibmvnic_set_mac,
3027 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003028 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05003029 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05003030 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003031};
3032
3033/* ethtool functions */
3034
Philippe Reynes8a433792017-01-07 22:37:29 +01003035static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3036 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003037{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03003038 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3039 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01003040
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03003041 rc = send_query_phys_parms(adapter);
3042 if (rc) {
3043 adapter->speed = SPEED_UNKNOWN;
3044 adapter->duplex = DUPLEX_UNKNOWN;
3045 }
3046 cmd->base.speed = adapter->speed;
3047 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01003048 cmd->base.port = PORT_FIBRE;
3049 cmd->base.phy_address = 0;
3050 cmd->base.autoneg = AUTONEG_ENABLE;
3051
Thomas Falcon032c5e82015-12-21 11:26:06 -06003052 return 0;
3053}
3054
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003055static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003056 struct ethtool_drvinfo *info)
3057{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003058 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3059
Lijun Pan8a96c802021-02-11 00:43:25 -06003060 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3061 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3062 strscpy(info->fw_version, adapter->fw_version,
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003063 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003064}
3065
3066static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3067{
3068 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3069
3070 return adapter->msg_enable;
3071}
3072
3073static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3074{
3075 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3076
3077 adapter->msg_enable = data;
3078}
3079
3080static u32 ibmvnic_get_link(struct net_device *netdev)
3081{
3082 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3083
3084 /* Don't need to send a query because we request a logical link up at
3085 * init and then we wait for link state indications
3086 */
3087 return adapter->logical_link_state;
3088}
3089
3090static void ibmvnic_get_ringparam(struct net_device *netdev,
3091 struct ethtool_ringparam *ring)
3092{
John Allenbc131b32017-08-02 16:46:30 -05003093 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3094
Thomas Falcon723ad912018-09-28 18:38:26 -05003095 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3096 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3097 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3098 } else {
3099 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3100 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3101 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003102 ring->rx_mini_max_pending = 0;
3103 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05003104 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3105 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003106 ring->rx_mini_pending = 0;
3107 ring->rx_jumbo_pending = 0;
3108}
3109
John Allenc26eba02017-10-26 16:23:25 -05003110static int ibmvnic_set_ringparam(struct net_device *netdev,
3111 struct ethtool_ringparam *ring)
3112{
3113 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05003114 int ret;
John Allenc26eba02017-10-26 16:23:25 -05003115
Thomas Falcon723ad912018-09-28 18:38:26 -05003116 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05003117 adapter->desired.rx_entries = ring->rx_pending;
3118 adapter->desired.tx_entries = ring->tx_pending;
3119
Thomas Falcon723ad912018-09-28 18:38:26 -05003120 ret = wait_for_reset(adapter);
3121
3122 if (!ret &&
3123 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
3124 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
3125 netdev_info(netdev,
3126 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3127 ring->rx_pending, ring->tx_pending,
3128 adapter->req_rx_add_entries_per_subcrq,
3129 adapter->req_tx_entries_per_subcrq);
3130 return ret;
John Allenc26eba02017-10-26 16:23:25 -05003131}
3132
John Allenc2dbeb62017-08-02 16:47:17 -05003133static void ibmvnic_get_channels(struct net_device *netdev,
3134 struct ethtool_channels *channels)
3135{
3136 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3137
Thomas Falcon723ad912018-09-28 18:38:26 -05003138 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3139 channels->max_rx = adapter->max_rx_queues;
3140 channels->max_tx = adapter->max_tx_queues;
3141 } else {
3142 channels->max_rx = IBMVNIC_MAX_QUEUES;
3143 channels->max_tx = IBMVNIC_MAX_QUEUES;
3144 }
3145
John Allenc2dbeb62017-08-02 16:47:17 -05003146 channels->max_other = 0;
3147 channels->max_combined = 0;
3148 channels->rx_count = adapter->req_rx_queues;
3149 channels->tx_count = adapter->req_tx_queues;
3150 channels->other_count = 0;
3151 channels->combined_count = 0;
3152}
3153
John Allenc26eba02017-10-26 16:23:25 -05003154static int ibmvnic_set_channels(struct net_device *netdev,
3155 struct ethtool_channels *channels)
3156{
3157 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05003158 int ret;
John Allenc26eba02017-10-26 16:23:25 -05003159
Thomas Falcon723ad912018-09-28 18:38:26 -05003160 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05003161 adapter->desired.rx_queues = channels->rx_count;
3162 adapter->desired.tx_queues = channels->tx_count;
3163
Thomas Falcon723ad912018-09-28 18:38:26 -05003164 ret = wait_for_reset(adapter);
3165
3166 if (!ret &&
3167 (adapter->req_rx_queues != channels->rx_count ||
3168 adapter->req_tx_queues != channels->tx_count))
3169 netdev_info(netdev,
3170 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3171 channels->rx_count, channels->tx_count,
3172 adapter->req_rx_queues, adapter->req_tx_queues);
3173 return ret;
John Allenc26eba02017-10-26 16:23:25 -05003174}
3175
Thomas Falcon032c5e82015-12-21 11:26:06 -06003176static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3177{
John Allen3d52b592017-08-02 16:44:14 -05003178 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003179 int i;
3180
Thomas Falcon723ad912018-09-28 18:38:26 -05003181 switch (stringset) {
3182 case ETH_SS_STATS:
3183 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3184 i++, data += ETH_GSTRING_LEN)
3185 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3186
3187 for (i = 0; i < adapter->req_tx_queues; i++) {
3188 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3189 data += ETH_GSTRING_LEN;
3190
3191 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3192 data += ETH_GSTRING_LEN;
3193
3194 snprintf(data, ETH_GSTRING_LEN,
3195 "tx%d_dropped_packets", i);
3196 data += ETH_GSTRING_LEN;
3197 }
3198
3199 for (i = 0; i < adapter->req_rx_queues; i++) {
3200 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3201 data += ETH_GSTRING_LEN;
3202
3203 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3204 data += ETH_GSTRING_LEN;
3205
3206 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3207 data += ETH_GSTRING_LEN;
3208 }
3209 break;
3210
3211 case ETH_SS_PRIV_FLAGS:
3212 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3213 strcpy(data + i * ETH_GSTRING_LEN,
3214 ibmvnic_priv_flags[i]);
3215 break;
3216 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06003217 return;
John Allen3d52b592017-08-02 16:44:14 -05003218 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003219}
3220
3221static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3222{
John Allen3d52b592017-08-02 16:44:14 -05003223 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3224
Thomas Falcon032c5e82015-12-21 11:26:06 -06003225 switch (sset) {
3226 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05003227 return ARRAY_SIZE(ibmvnic_stats) +
3228 adapter->req_tx_queues * NUM_TX_STATS +
3229 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05003230 case ETH_SS_PRIV_FLAGS:
3231 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003232 default:
3233 return -EOPNOTSUPP;
3234 }
3235}
3236
3237static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3238 struct ethtool_stats *stats, u64 *data)
3239{
3240 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3241 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05003242 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003243 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003244
3245 memset(&crq, 0, sizeof(crq));
3246 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3247 crq.request_statistics.cmd = REQUEST_STATISTICS;
3248 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3249 crq.request_statistics.len =
3250 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003251
3252 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06003253 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003254 rc = ibmvnic_send_crq(adapter, &crq);
3255 if (rc)
3256 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06003257 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3258 if (rc)
3259 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003260
3261 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
Lijun Pan91dc5d22021-02-11 00:43:22 -06003262 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3263 (adapter, ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05003264
3265 for (j = 0; j < adapter->req_tx_queues; j++) {
3266 data[i] = adapter->tx_stats_buffers[j].packets;
3267 i++;
3268 data[i] = adapter->tx_stats_buffers[j].bytes;
3269 i++;
3270 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3271 i++;
3272 }
3273
3274 for (j = 0; j < adapter->req_rx_queues; j++) {
3275 data[i] = adapter->rx_stats_buffers[j].packets;
3276 i++;
3277 data[i] = adapter->rx_stats_buffers[j].bytes;
3278 i++;
3279 data[i] = adapter->rx_stats_buffers[j].interrupts;
3280 i++;
3281 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003282}
3283
Thomas Falcon723ad912018-09-28 18:38:26 -05003284static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3285{
3286 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3287
3288 return adapter->priv_flags;
3289}
3290
3291static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3292{
3293 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3294 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3295
3296 if (which_maxes)
3297 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3298 else
3299 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3300
3301 return 0;
3302}
Lijun Pan91dc5d22021-02-11 00:43:22 -06003303
Thomas Falcon032c5e82015-12-21 11:26:06 -06003304static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003305 .get_drvinfo = ibmvnic_get_drvinfo,
3306 .get_msglevel = ibmvnic_get_msglevel,
3307 .set_msglevel = ibmvnic_set_msglevel,
3308 .get_link = ibmvnic_get_link,
3309 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05003310 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05003311 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05003312 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003313 .get_strings = ibmvnic_get_strings,
3314 .get_sset_count = ibmvnic_get_sset_count,
3315 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01003316 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05003317 .get_priv_flags = ibmvnic_get_priv_flags,
3318 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003319};
3320
3321/* Routines for managing CRQs/sCRQs */
3322
Nathan Fontenot57a49432017-05-26 10:31:12 -04003323static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3324 struct ibmvnic_sub_crq_queue *scrq)
3325{
3326 int rc;
3327
Dany Madden9281cf22020-11-25 18:04:26 -06003328 if (!scrq) {
YANG LI862aecb2020-12-30 15:23:14 +08003329 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
Dany Madden9281cf22020-11-25 18:04:26 -06003330 return -EINVAL;
3331 }
3332
Nathan Fontenot57a49432017-05-26 10:31:12 -04003333 if (scrq->irq) {
3334 free_irq(scrq->irq, scrq);
3335 irq_dispose_mapping(scrq->irq);
3336 scrq->irq = 0;
3337 }
3338
Dany Madden9281cf22020-11-25 18:04:26 -06003339 if (scrq->msgs) {
3340 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3341 atomic_set(&scrq->used, 0);
3342 scrq->cur = 0;
Jakub Kicinski55fd59b2020-12-03 15:42:13 -08003343 scrq->ind_buf.index = 0;
Dany Madden9281cf22020-11-25 18:04:26 -06003344 } else {
3345 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3346 return -EINVAL;
3347 }
Nathan Fontenot57a49432017-05-26 10:31:12 -04003348
3349 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3350 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3351 return rc;
3352}
3353
3354static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3355{
3356 int i, rc;
3357
Lijun Pana0faaa22020-11-23 13:35:45 -06003358 if (!adapter->tx_scrq || !adapter->rx_scrq)
3359 return -EINVAL;
3360
Nathan Fontenot57a49432017-05-26 10:31:12 -04003361 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003362 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04003363 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3364 if (rc)
3365 return rc;
3366 }
3367
3368 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003369 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04003370 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3371 if (rc)
3372 return rc;
3373 }
3374
Nathan Fontenot57a49432017-05-26 10:31:12 -04003375 return rc;
3376}
3377
Thomas Falcon032c5e82015-12-21 11:26:06 -06003378static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003379 struct ibmvnic_sub_crq_queue *scrq,
3380 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003381{
3382 struct device *dev = &adapter->vdev->dev;
3383 long rc;
3384
3385 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3386
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003387 if (do_h_free) {
3388 /* Close the sub-crqs */
3389 do {
3390 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3391 adapter->vdev->unit_address,
3392 scrq->crq_num);
3393 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003394
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003395 if (rc) {
3396 netdev_err(adapter->netdev,
3397 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3398 scrq->crq_num, rc);
3399 }
Thomas Falconffa73852017-04-19 13:44:29 -04003400 }
3401
Thomas Falconf019fb62020-11-18 19:12:17 -06003402 dma_free_coherent(dev,
3403 IBMVNIC_IND_ARR_SZ,
3404 scrq->ind_buf.indir_arr,
3405 scrq->ind_buf.indir_dma);
3406
Thomas Falcon032c5e82015-12-21 11:26:06 -06003407 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3408 DMA_BIDIRECTIONAL);
3409 free_pages((unsigned long)scrq->msgs, 2);
3410 kfree(scrq);
3411}
3412
3413static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3414 *adapter)
3415{
3416 struct device *dev = &adapter->vdev->dev;
3417 struct ibmvnic_sub_crq_queue *scrq;
3418 int rc;
3419
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003420 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003421 if (!scrq)
3422 return NULL;
3423
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04003424 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003425 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003426 if (!scrq->msgs) {
3427 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3428 goto zero_page_failed;
3429 }
3430
3431 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3432 DMA_BIDIRECTIONAL);
3433 if (dma_mapping_error(dev, scrq->msg_token)) {
3434 dev_warn(dev, "Couldn't map crq queue messages page\n");
3435 goto map_failed;
3436 }
3437
3438 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3439 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3440
3441 if (rc == H_RESOURCE)
3442 rc = ibmvnic_reset_crq(adapter);
3443
3444 if (rc == H_CLOSED) {
3445 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3446 } else if (rc) {
3447 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3448 goto reg_failed;
3449 }
3450
Thomas Falcon032c5e82015-12-21 11:26:06 -06003451 scrq->adapter = adapter;
3452 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falconf019fb62020-11-18 19:12:17 -06003453 scrq->ind_buf.index = 0;
3454
3455 scrq->ind_buf.indir_arr =
3456 dma_alloc_coherent(dev,
3457 IBMVNIC_IND_ARR_SZ,
3458 &scrq->ind_buf.indir_dma,
3459 GFP_KERNEL);
3460
3461 if (!scrq->ind_buf.indir_arr)
3462 goto indir_failed;
3463
Thomas Falcon032c5e82015-12-21 11:26:06 -06003464 spin_lock_init(&scrq->lock);
3465
3466 netdev_dbg(adapter->netdev,
3467 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3468 scrq->crq_num, scrq->hw_irq, scrq->irq);
3469
3470 return scrq;
3471
Thomas Falconf019fb62020-11-18 19:12:17 -06003472indir_failed:
3473 do {
3474 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3475 adapter->vdev->unit_address,
3476 scrq->crq_num);
3477 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003478reg_failed:
3479 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3480 DMA_BIDIRECTIONAL);
3481map_failed:
3482 free_pages((unsigned long)scrq->msgs, 2);
3483zero_page_failed:
3484 kfree(scrq);
3485
3486 return NULL;
3487}
3488
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003489static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003490{
3491 int i;
3492
3493 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003494 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003495 if (!adapter->tx_scrq[i])
3496 continue;
3497
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003498 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3499 i);
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -07003500 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003501 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003502 free_irq(adapter->tx_scrq[i]->irq,
3503 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003504 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003505 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003506 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003507
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003508 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3509 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003510 }
3511
Nathan Fontenot9501df32017-03-15 23:38:07 -04003512 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003513 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003514 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003515 }
3516
3517 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003518 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003519 if (!adapter->rx_scrq[i])
3520 continue;
3521
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003522 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3523 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003524 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003525 free_irq(adapter->rx_scrq[i]->irq,
3526 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003527 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003528 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003529 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003530
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003531 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3532 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003533 }
3534
Nathan Fontenot9501df32017-03-15 23:38:07 -04003535 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003536 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003537 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003538 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003539}
3540
3541static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3542 struct ibmvnic_sub_crq_queue *scrq)
3543{
3544 struct device *dev = &adapter->vdev->dev;
3545 unsigned long rc;
3546
3547 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3548 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3549 if (rc)
3550 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3551 scrq->hw_irq, rc);
3552 return rc;
3553}
3554
3555static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3556 struct ibmvnic_sub_crq_queue *scrq)
3557{
3558 struct device *dev = &adapter->vdev->dev;
3559 unsigned long rc;
3560
3561 if (scrq->hw_irq > 0x100000000ULL) {
3562 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3563 return 1;
3564 }
3565
Juliet Kim7ed5b312019-09-20 16:11:23 -04003566 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003567 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003568 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003569
Juliet Kim284f87d2019-11-20 10:50:03 -05003570 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003571 /* H_EOI would fail with rc = H_FUNCTION when running
3572 * in XIVE mode which is expected, but not an error.
3573 */
Sukadev Bhattiprolu154b3b22021-06-23 21:13:16 -07003574 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003575 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3576 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003577 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003578
Thomas Falcon032c5e82015-12-21 11:26:06 -06003579 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3580 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3581 if (rc)
3582 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3583 scrq->hw_irq, rc);
3584 return rc;
3585}
3586
3587static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3588 struct ibmvnic_sub_crq_queue *scrq)
3589{
3590 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003591 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003592 struct ibmvnic_tx_buff *txbuff;
Thomas Falcon0d973382020-11-18 19:12:19 -06003593 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003594 union sub_crq *next;
3595 int index;
Thomas Falconc62aa372020-11-18 19:12:20 -06003596 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003597
3598restart_loop:
3599 while (pending_scrq(adapter, scrq)) {
3600 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003601 int num_entries = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06003602 int total_bytes = 0;
3603 int num_packets = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003604
3605 next = ibmvnic_next_scrq(adapter, scrq);
3606 for (i = 0; i < next->tx_comp.num_comps; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003607 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003608 if (index & IBMVNIC_TSO_POOL_MASK) {
3609 tx_pool = &adapter->tso_pool[pool];
3610 index &= ~IBMVNIC_TSO_POOL_MASK;
3611 } else {
3612 tx_pool = &adapter->tx_pool[pool];
3613 }
3614
3615 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon0d973382020-11-18 19:12:19 -06003616 num_packets++;
Thomas Falconffc385b2018-02-18 10:08:41 -06003617 num_entries += txbuff->num_entries;
Thomas Falcon0d973382020-11-18 19:12:19 -06003618 if (txbuff->skb) {
3619 total_bytes += txbuff->skb->len;
Lijun Panca09bf72021-04-13 03:33:25 -05003620 if (next->tx_comp.rcs[i]) {
3621 dev_err(dev, "tx error %x\n",
3622 next->tx_comp.rcs[i]);
3623 dev_kfree_skb_irq(txbuff->skb);
3624 } else {
3625 dev_consume_skb_irq(txbuff->skb);
3626 }
Thomas Falcon0d973382020-11-18 19:12:19 -06003627 txbuff->skb = NULL;
3628 } else {
3629 netdev_warn(adapter->netdev,
3630 "TX completion received with NULL socket buffer\n");
3631 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05003632 tx_pool->free_map[tx_pool->producer_index] = index;
3633 tx_pool->producer_index =
3634 (tx_pool->producer_index + 1) %
3635 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003636 }
3637 /* remove tx_comp scrq*/
3638 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003639
Thomas Falcon0d973382020-11-18 19:12:19 -06003640 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3641 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3642
Thomas Falconffc385b2018-02-18 10:08:41 -06003643 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003644 (adapter->req_tx_entries_per_subcrq / 2) &&
3645 __netif_subqueue_stopped(adapter->netdev,
3646 scrq->pool_index)) {
3647 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003648 netdev_dbg(adapter->netdev, "Started queue %d\n",
3649 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003650 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003651 }
3652
3653 enable_scrq_irq(adapter, scrq);
3654
3655 if (pending_scrq(adapter, scrq)) {
3656 disable_scrq_irq(adapter, scrq);
3657 goto restart_loop;
3658 }
3659
3660 return 0;
3661}
3662
3663static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3664{
3665 struct ibmvnic_sub_crq_queue *scrq = instance;
3666 struct ibmvnic_adapter *adapter = scrq->adapter;
3667
3668 disable_scrq_irq(adapter, scrq);
3669 ibmvnic_complete_tx(adapter, scrq);
3670
3671 return IRQ_HANDLED;
3672}
3673
3674static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3675{
3676 struct ibmvnic_sub_crq_queue *scrq = instance;
3677 struct ibmvnic_adapter *adapter = scrq->adapter;
3678
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003679 /* When booting a kdump kernel we can hit pending interrupts
3680 * prior to completing driver initialization.
3681 */
3682 if (unlikely(adapter->state != VNIC_OPEN))
3683 return IRQ_NONE;
3684
John Allen3d52b592017-08-02 16:44:14 -05003685 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3686
Thomas Falcon032c5e82015-12-21 11:26:06 -06003687 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3688 disable_scrq_irq(adapter, scrq);
3689 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3690 }
3691
3692 return IRQ_HANDLED;
3693}
3694
Thomas Falconea22d512016-07-06 15:35:17 -05003695static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3696{
3697 struct device *dev = &adapter->vdev->dev;
3698 struct ibmvnic_sub_crq_queue *scrq;
3699 int i = 0, j = 0;
3700 int rc = 0;
3701
3702 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003703 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3704 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003705 scrq = adapter->tx_scrq[i];
3706 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3707
Michael Ellerman99c17902016-09-10 19:59:05 +10003708 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003709 rc = -EINVAL;
3710 dev_err(dev, "Error mapping irq\n");
3711 goto req_tx_irq_failed;
3712 }
3713
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003714 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3715 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003716 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003717 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003718
3719 if (rc) {
3720 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3721 scrq->irq, rc);
3722 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003723 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003724 }
3725 }
3726
3727 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003728 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3729 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003730 scrq = adapter->rx_scrq[i];
3731 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003732 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003733 rc = -EINVAL;
3734 dev_err(dev, "Error mapping irq\n");
3735 goto req_rx_irq_failed;
3736 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003737 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3738 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003739 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003740 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003741 if (rc) {
3742 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3743 scrq->irq, rc);
3744 irq_dispose_mapping(scrq->irq);
3745 goto req_rx_irq_failed;
3746 }
3747 }
3748 return rc;
3749
3750req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003751 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003752 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3753 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003754 }
Thomas Falconea22d512016-07-06 15:35:17 -05003755 i = adapter->req_tx_queues;
3756req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003757 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003758 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003759 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003760 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003761 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003762 return rc;
3763}
3764
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003765static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003766{
3767 struct device *dev = &adapter->vdev->dev;
3768 struct ibmvnic_sub_crq_queue **allqueues;
3769 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003770 int total_queues;
3771 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003772 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003773
Thomas Falcon032c5e82015-12-21 11:26:06 -06003774 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3775
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003776 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003777 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003778 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003779
3780 for (i = 0; i < total_queues; i++) {
3781 allqueues[i] = init_sub_crq_queue(adapter);
3782 if (!allqueues[i]) {
3783 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3784 break;
3785 }
3786 registered_queues++;
3787 }
3788
3789 /* Make sure we were able to register the minimum number of queues */
3790 if (registered_queues <
3791 adapter->min_tx_queues + adapter->min_rx_queues) {
3792 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3793 goto tx_failed;
3794 }
3795
3796 /* Distribute the failed allocated queues*/
3797 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3798 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3799 switch (i % 3) {
3800 case 0:
3801 if (adapter->req_rx_queues > adapter->min_rx_queues)
3802 adapter->req_rx_queues--;
3803 else
3804 more++;
3805 break;
3806 case 1:
3807 if (adapter->req_tx_queues > adapter->min_tx_queues)
3808 adapter->req_tx_queues--;
3809 else
3810 more++;
3811 break;
3812 }
3813 }
3814
3815 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003816 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003817 if (!adapter->tx_scrq)
3818 goto tx_failed;
3819
3820 for (i = 0; i < adapter->req_tx_queues; i++) {
3821 adapter->tx_scrq[i] = allqueues[i];
3822 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003823 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003824 }
3825
3826 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003827 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003828 if (!adapter->rx_scrq)
3829 goto rx_failed;
3830
3831 for (i = 0; i < adapter->req_rx_queues; i++) {
3832 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3833 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003834 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003835 }
3836
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003837 kfree(allqueues);
3838 return 0;
3839
3840rx_failed:
3841 kfree(adapter->tx_scrq);
3842 adapter->tx_scrq = NULL;
3843tx_failed:
3844 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003845 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003846 kfree(allqueues);
3847 return -1;
3848}
3849
Lijun Pan09081b92020-09-27 20:13:27 -05003850static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003851{
3852 struct device *dev = &adapter->vdev->dev;
3853 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003854 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003855
3856 if (!retry) {
3857 /* Sub-CRQ entries are 32 byte long */
3858 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3859
3860 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3861 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3862 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3863 return;
3864 }
3865
John Allenc26eba02017-10-26 16:23:25 -05003866 if (adapter->desired.mtu)
3867 adapter->req_mtu = adapter->desired.mtu;
3868 else
3869 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003870
John Allenc26eba02017-10-26 16:23:25 -05003871 if (!adapter->desired.tx_entries)
3872 adapter->desired.tx_entries =
3873 adapter->max_tx_entries_per_subcrq;
3874 if (!adapter->desired.rx_entries)
3875 adapter->desired.rx_entries =
3876 adapter->max_rx_add_entries_per_subcrq;
3877
3878 max_entries = IBMVNIC_MAX_LTB_SIZE /
3879 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3880
3881 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3882 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3883 adapter->desired.tx_entries = max_entries;
3884 }
3885
3886 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3887 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3888 adapter->desired.rx_entries = max_entries;
3889 }
3890
3891 if (adapter->desired.tx_entries)
3892 adapter->req_tx_entries_per_subcrq =
3893 adapter->desired.tx_entries;
3894 else
3895 adapter->req_tx_entries_per_subcrq =
3896 adapter->max_tx_entries_per_subcrq;
3897
3898 if (adapter->desired.rx_entries)
3899 adapter->req_rx_add_entries_per_subcrq =
3900 adapter->desired.rx_entries;
3901 else
3902 adapter->req_rx_add_entries_per_subcrq =
3903 adapter->max_rx_add_entries_per_subcrq;
3904
3905 if (adapter->desired.tx_queues)
3906 adapter->req_tx_queues =
3907 adapter->desired.tx_queues;
3908 else
3909 adapter->req_tx_queues =
3910 adapter->opt_tx_comp_sub_queues;
3911
3912 if (adapter->desired.rx_queues)
3913 adapter->req_rx_queues =
3914 adapter->desired.rx_queues;
3915 else
3916 adapter->req_rx_queues =
3917 adapter->opt_rx_comp_queues;
3918
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003919 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003920 }
3921
Thomas Falcon032c5e82015-12-21 11:26:06 -06003922 memset(&crq, 0, sizeof(crq));
3923 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3924 crq.request_capability.cmd = REQUEST_CAPABILITY;
3925
3926 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003927 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003928 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003929 ibmvnic_send_crq(adapter, &crq);
3930
3931 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003932 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003933 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003934 ibmvnic_send_crq(adapter, &crq);
3935
3936 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003937 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003938 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003939 ibmvnic_send_crq(adapter, &crq);
3940
3941 crq.request_capability.capability =
3942 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3943 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003944 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003945 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003946 ibmvnic_send_crq(adapter, &crq);
3947
3948 crq.request_capability.capability =
3949 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3950 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003951 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003952 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003953 ibmvnic_send_crq(adapter, &crq);
3954
3955 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003956 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003957 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003958 ibmvnic_send_crq(adapter, &crq);
3959
3960 if (adapter->netdev->flags & IFF_PROMISC) {
3961 if (adapter->promisc_supported) {
3962 crq.request_capability.capability =
3963 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003964 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003965 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003966 ibmvnic_send_crq(adapter, &crq);
3967 }
3968 } else {
3969 crq.request_capability.capability =
3970 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003971 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003972 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003973 ibmvnic_send_crq(adapter, &crq);
3974 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003975}
3976
3977static int pending_scrq(struct ibmvnic_adapter *adapter,
3978 struct ibmvnic_sub_crq_queue *scrq)
3979{
3980 union sub_crq *entry = &scrq->msgs[scrq->cur];
Lijun Pan665ab1e2021-01-29 19:19:04 -06003981 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003982
Lijun Pan665ab1e2021-01-29 19:19:04 -06003983 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3984
3985 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3986 * contents of the SCRQ descriptor
3987 */
3988 dma_rmb();
3989
3990 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003991}
3992
3993static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3994 struct ibmvnic_sub_crq_queue *scrq)
3995{
3996 union sub_crq *entry;
3997 unsigned long flags;
3998
3999 spin_lock_irqsave(&scrq->lock, flags);
4000 entry = &scrq->msgs[scrq->cur];
4001 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4002 if (++scrq->cur == scrq->size)
4003 scrq->cur = 0;
4004 } else {
4005 entry = NULL;
4006 }
4007 spin_unlock_irqrestore(&scrq->lock, flags);
4008
Lijun Pan665ab1e2021-01-29 19:19:04 -06004009 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4010 * contents of the SCRQ descriptor
Thomas Falconb71ec952020-12-01 09:52:10 -06004011 */
4012 dma_rmb();
4013
Thomas Falcon032c5e82015-12-21 11:26:06 -06004014 return entry;
4015}
4016
4017static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4018{
4019 struct ibmvnic_crq_queue *queue = &adapter->crq;
4020 union ibmvnic_crq *crq;
4021
4022 crq = &queue->msgs[queue->cur];
4023 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4024 if (++queue->cur == queue->size)
4025 queue->cur = 0;
4026 } else {
4027 crq = NULL;
4028 }
4029
4030 return crq;
4031}
4032
Thomas Falcon2d14d372018-07-13 12:03:32 -05004033static void print_subcrq_error(struct device *dev, int rc, const char *func)
4034{
4035 switch (rc) {
4036 case H_PARAMETER:
4037 dev_warn_ratelimited(dev,
4038 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4039 func, rc);
4040 break;
4041 case H_CLOSED:
4042 dev_warn_ratelimited(dev,
4043 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4044 func, rc);
4045 break;
4046 default:
4047 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4048 break;
4049 }
4050}
4051
Thomas Falconad7775d2016-04-01 17:20:34 -05004052static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4053 u64 remote_handle, u64 ioba, u64 num_entries)
4054{
4055 unsigned int ua = adapter->vdev->unit_address;
4056 struct device *dev = &adapter->vdev->dev;
4057 int rc;
4058
4059 /* Make sure the hypervisor sees the complete request */
Lijun Pan1a421562021-02-12 20:36:46 -06004060 dma_wmb();
Thomas Falconad7775d2016-04-01 17:20:34 -05004061 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4062 cpu_to_be64(remote_handle),
4063 ioba, num_entries);
4064
Thomas Falcon2d14d372018-07-13 12:03:32 -05004065 if (rc)
4066 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05004067
4068 return rc;
4069}
4070
Thomas Falcon032c5e82015-12-21 11:26:06 -06004071static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4072 union ibmvnic_crq *crq)
4073{
4074 unsigned int ua = adapter->vdev->unit_address;
4075 struct device *dev = &adapter->vdev->dev;
4076 u64 *u64_crq = (u64 *)crq;
4077 int rc;
4078
4079 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004080 (unsigned long)cpu_to_be64(u64_crq[0]),
4081 (unsigned long)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004082
Thomas Falcon51536982018-05-23 13:37:56 -05004083 if (!adapter->crq.active &&
4084 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4085 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4086 return -EINVAL;
4087 }
4088
Thomas Falcon032c5e82015-12-21 11:26:06 -06004089 /* Make sure the hypervisor sees the complete request */
Lijun Pan1a421562021-02-12 20:36:46 -06004090 dma_wmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06004091
4092 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4093 cpu_to_be64(u64_crq[0]),
4094 cpu_to_be64(u64_crq[1]));
4095
4096 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06004097 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06004098 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05004099 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06004100 }
4101
Thomas Falcon032c5e82015-12-21 11:26:06 -06004102 dev_warn(dev, "Send error (rc=%d)\n", rc);
4103 }
4104
4105 return rc;
4106}
4107
4108static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4109{
Thomas Falcon36a782f2020-08-31 11:59:57 -05004110 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004111 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05004112 int retries = 100;
4113 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004114
4115 memset(&crq, 0, sizeof(crq));
4116 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4117 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4118 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4119
Thomas Falcon36a782f2020-08-31 11:59:57 -05004120 do {
4121 rc = ibmvnic_send_crq(adapter, &crq);
4122 if (rc != H_CLOSED)
4123 break;
4124 retries--;
4125 msleep(50);
4126
4127 } while (retries > 0);
4128
4129 if (rc) {
4130 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4131 return rc;
4132 }
4133
4134 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004135}
4136
Nathan Fontenot37798d02017-11-08 11:23:56 -06004137struct vnic_login_client_data {
4138 u8 type;
4139 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07004140 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06004141} __packed;
4142
4143static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4144{
4145 int len;
4146
4147 /* Calculate the amount of buffer space needed for the
4148 * vnic client data in the login buffer. There are four entries,
4149 * OS name, LPAR name, device name, and a null last entry.
4150 */
4151 len = 4 * sizeof(struct vnic_login_client_data);
4152 len += 6; /* "Linux" plus NULL */
4153 len += strlen(utsname()->nodename) + 1;
4154 len += strlen(adapter->netdev->name) + 1;
4155
4156 return len;
4157}
4158
4159static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4160 struct vnic_login_client_data *vlcd)
4161{
4162 const char *os_name = "Linux";
4163 int len;
4164
4165 /* Type 1 - LPAR OS */
4166 vlcd->type = 1;
4167 len = strlen(os_name) + 1;
4168 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004169 strscpy(vlcd->name, os_name, len);
Kees Cook08ea5562018-04-10 15:26:43 -07004170 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004171
4172 /* Type 2 - LPAR name */
4173 vlcd->type = 2;
4174 len = strlen(utsname()->nodename) + 1;
4175 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004176 strscpy(vlcd->name, utsname()->nodename, len);
Kees Cook08ea5562018-04-10 15:26:43 -07004177 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004178
4179 /* Type 3 - device name */
4180 vlcd->type = 3;
4181 len = strlen(adapter->netdev->name) + 1;
4182 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004183 strscpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004184}
4185
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004186static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004187{
4188 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4189 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004190 struct device *dev = &adapter->vdev->dev;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004191 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004192 dma_addr_t rsp_buffer_token;
4193 dma_addr_t buffer_token;
4194 size_t rsp_buffer_size;
4195 union ibmvnic_crq crq;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004196 int client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004197 size_t buffer_size;
4198 __be64 *tx_list_p;
4199 __be64 *rx_list_p;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004200 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004201 int i;
4202
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004203 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4204 netdev_err(adapter->netdev,
4205 "RX or TX queues are not allocated, device login failed\n");
4206 return -1;
4207 }
4208
Lijun Pana0c8be52020-12-19 15:39:19 -06004209 release_login_buffer(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06004210 release_login_rsp_buffer(adapter);
Lijun Pana0c8be52020-12-19 15:39:19 -06004211
Nathan Fontenot37798d02017-11-08 11:23:56 -06004212 client_data_len = vnic_client_data_len(adapter);
4213
Thomas Falcon032c5e82015-12-21 11:26:06 -06004214 buffer_size =
4215 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06004216 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4217 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004218
Nathan Fontenot37798d02017-11-08 11:23:56 -06004219 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004220 if (!login_buffer)
4221 goto buf_alloc_failed;
4222
4223 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4224 DMA_TO_DEVICE);
4225 if (dma_mapping_error(dev, buffer_token)) {
4226 dev_err(dev, "Couldn't map login buffer\n");
4227 goto buf_map_failed;
4228 }
4229
John Allen498cd8e2016-04-06 11:49:55 -05004230 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4231 sizeof(u64) * adapter->req_tx_queues +
4232 sizeof(u64) * adapter->req_rx_queues +
4233 sizeof(u64) * adapter->req_rx_queues +
4234 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004235
4236 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4237 if (!login_rsp_buffer)
4238 goto buf_rsp_alloc_failed;
4239
4240 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4241 rsp_buffer_size, DMA_FROM_DEVICE);
4242 if (dma_mapping_error(dev, rsp_buffer_token)) {
4243 dev_err(dev, "Couldn't map login rsp buffer\n");
4244 goto buf_rsp_map_failed;
4245 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04004246
Thomas Falcon032c5e82015-12-21 11:26:06 -06004247 adapter->login_buf = login_buffer;
4248 adapter->login_buf_token = buffer_token;
4249 adapter->login_buf_sz = buffer_size;
4250 adapter->login_rsp_buf = login_rsp_buffer;
4251 adapter->login_rsp_buf_token = rsp_buffer_token;
4252 adapter->login_rsp_buf_sz = rsp_buffer_size;
4253
4254 login_buffer->len = cpu_to_be32(buffer_size);
4255 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4256 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4257 login_buffer->off_txcomp_subcrqs =
4258 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4259 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4260 login_buffer->off_rxcomp_subcrqs =
4261 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4262 sizeof(u64) * adapter->req_tx_queues);
4263 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4264 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4265
4266 tx_list_p = (__be64 *)((char *)login_buffer +
4267 sizeof(struct ibmvnic_login_buffer));
4268 rx_list_p = (__be64 *)((char *)login_buffer +
4269 sizeof(struct ibmvnic_login_buffer) +
4270 sizeof(u64) * adapter->req_tx_queues);
4271
4272 for (i = 0; i < adapter->req_tx_queues; i++) {
4273 if (adapter->tx_scrq[i]) {
Lijun Pan914789a2021-02-11 00:43:21 -06004274 tx_list_p[i] =
4275 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004276 }
4277 }
4278
4279 for (i = 0; i < adapter->req_rx_queues; i++) {
4280 if (adapter->rx_scrq[i]) {
Lijun Pan914789a2021-02-11 00:43:21 -06004281 rx_list_p[i] =
4282 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004283 }
4284 }
4285
Nathan Fontenot37798d02017-11-08 11:23:56 -06004286 /* Insert vNIC login client data */
4287 vlcd = (struct vnic_login_client_data *)
4288 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4289 login_buffer->client_data_offset =
4290 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4291 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4292
4293 vnic_add_client_data(adapter, vlcd);
4294
Thomas Falcon032c5e82015-12-21 11:26:06 -06004295 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4296 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4297 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004298 ((unsigned long *)(adapter->login_buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004299 }
4300
4301 memset(&crq, 0, sizeof(crq));
4302 crq.login.first = IBMVNIC_CRQ_CMD;
4303 crq.login.cmd = LOGIN;
4304 crq.login.ioba = cpu_to_be32(buffer_token);
4305 crq.login.len = cpu_to_be32(buffer_size);
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06004306
4307 adapter->login_pending = true;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004308 rc = ibmvnic_send_crq(adapter, &crq);
4309 if (rc) {
4310 adapter->login_pending = false;
4311 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4312 goto buf_rsp_map_failed;
4313 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004314
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004315 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004316
Thomas Falcon032c5e82015-12-21 11:26:06 -06004317buf_rsp_map_failed:
4318 kfree(login_rsp_buffer);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004319 adapter->login_rsp_buf = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004320buf_rsp_alloc_failed:
4321 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4322buf_map_failed:
4323 kfree(login_buffer);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004324 adapter->login_buf = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004325buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004326 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004327}
4328
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004329static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4330 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004331{
4332 union ibmvnic_crq crq;
4333
4334 memset(&crq, 0, sizeof(crq));
4335 crq.request_map.first = IBMVNIC_CRQ_CMD;
4336 crq.request_map.cmd = REQUEST_MAP;
4337 crq.request_map.map_id = map_id;
4338 crq.request_map.ioba = cpu_to_be32(addr);
4339 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004340 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004341}
4342
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004343static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004344{
4345 union ibmvnic_crq crq;
4346
4347 memset(&crq, 0, sizeof(crq));
4348 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4349 crq.request_unmap.cmd = REQUEST_UNMAP;
4350 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004351 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004352}
4353
Lijun Pan69980d02020-09-27 20:13:28 -05004354static void send_query_map(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004355{
4356 union ibmvnic_crq crq;
4357
4358 memset(&crq, 0, sizeof(crq));
4359 crq.query_map.first = IBMVNIC_CRQ_CMD;
4360 crq.query_map.cmd = QUERY_MAP;
4361 ibmvnic_send_crq(adapter, &crq);
4362}
4363
4364/* Send a series of CRQs requesting various capabilities of the VNIC server */
Lijun Pan491099a2020-09-27 20:13:26 -05004365static void send_query_cap(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004366{
4367 union ibmvnic_crq crq;
4368
Thomas Falcon901e0402017-02-15 12:17:59 -06004369 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004370 memset(&crq, 0, sizeof(crq));
4371 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4372 crq.query_capability.cmd = QUERY_CAPABILITY;
4373
4374 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004375 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004376 ibmvnic_send_crq(adapter, &crq);
4377
4378 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004379 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004380 ibmvnic_send_crq(adapter, &crq);
4381
4382 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004383 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004384 ibmvnic_send_crq(adapter, &crq);
4385
4386 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004387 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004388 ibmvnic_send_crq(adapter, &crq);
4389
4390 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004391 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004392 ibmvnic_send_crq(adapter, &crq);
4393
4394 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004395 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004396 ibmvnic_send_crq(adapter, &crq);
4397
4398 crq.query_capability.capability =
4399 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004400 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004401 ibmvnic_send_crq(adapter, &crq);
4402
4403 crq.query_capability.capability =
4404 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004405 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004406 ibmvnic_send_crq(adapter, &crq);
4407
4408 crq.query_capability.capability =
4409 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004410 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004411 ibmvnic_send_crq(adapter, &crq);
4412
4413 crq.query_capability.capability =
4414 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004415 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004416 ibmvnic_send_crq(adapter, &crq);
4417
4418 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06004419 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004420 ibmvnic_send_crq(adapter, &crq);
4421
4422 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06004423 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004424 ibmvnic_send_crq(adapter, &crq);
4425
4426 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06004427 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004428 ibmvnic_send_crq(adapter, &crq);
4429
4430 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06004431 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004432 ibmvnic_send_crq(adapter, &crq);
4433
4434 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06004435 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004436 ibmvnic_send_crq(adapter, &crq);
4437
4438 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06004439 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004440 ibmvnic_send_crq(adapter, &crq);
4441
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004442 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4443 atomic_inc(&adapter->running_cap_crqs);
4444 ibmvnic_send_crq(adapter, &crq);
4445
Thomas Falcon032c5e82015-12-21 11:26:06 -06004446 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004447 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004448 ibmvnic_send_crq(adapter, &crq);
4449
4450 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06004451 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004452 ibmvnic_send_crq(adapter, &crq);
4453
4454 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004455 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004456 ibmvnic_send_crq(adapter, &crq);
4457
4458 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004459 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004460 ibmvnic_send_crq(adapter, &crq);
4461
4462 crq.query_capability.capability =
4463 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06004464 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004465 ibmvnic_send_crq(adapter, &crq);
4466
4467 crq.query_capability.capability =
4468 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004469 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004470 ibmvnic_send_crq(adapter, &crq);
4471
4472 crq.query_capability.capability =
4473 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004474 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004475 ibmvnic_send_crq(adapter, &crq);
4476
4477 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004478 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004479 ibmvnic_send_crq(adapter, &crq);
4480}
4481
Lijun Pan16e811f2020-09-27 20:13:29 -05004482static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4483{
4484 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4485 struct device *dev = &adapter->vdev->dev;
4486 union ibmvnic_crq crq;
4487
4488 adapter->ip_offload_tok =
4489 dma_map_single(dev,
4490 &adapter->ip_offload_buf,
4491 buf_sz,
4492 DMA_FROM_DEVICE);
4493
4494 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4495 if (!firmware_has_feature(FW_FEATURE_CMO))
4496 dev_err(dev, "Couldn't map offload buffer\n");
4497 return;
4498 }
4499
4500 memset(&crq, 0, sizeof(crq));
4501 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4502 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4503 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4504 crq.query_ip_offload.ioba =
4505 cpu_to_be32(adapter->ip_offload_tok);
4506
4507 ibmvnic_send_crq(adapter, &crq);
4508}
4509
Lijun Pan46899bd2020-09-27 20:13:30 -05004510static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4511{
4512 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4513 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4514 struct device *dev = &adapter->vdev->dev;
4515 netdev_features_t old_hw_features = 0;
4516 union ibmvnic_crq crq;
4517
4518 adapter->ip_offload_ctrl_tok =
4519 dma_map_single(dev,
4520 ctrl_buf,
4521 sizeof(adapter->ip_offload_ctrl),
4522 DMA_TO_DEVICE);
4523
4524 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4525 dev_err(dev, "Couldn't map ip offload control buffer\n");
4526 return;
4527 }
4528
4529 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4530 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4531 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4532 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4533 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4534 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4535 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4536 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4537 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4538 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4539
4540 /* large_rx disabled for now, additional features needed */
4541 ctrl_buf->large_rx_ipv4 = 0;
4542 ctrl_buf->large_rx_ipv6 = 0;
4543
4544 if (adapter->state != VNIC_PROBING) {
4545 old_hw_features = adapter->netdev->hw_features;
4546 adapter->netdev->hw_features = 0;
4547 }
4548
4549 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4550
4551 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4552 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4553
4554 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4555 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4556
4557 if ((adapter->netdev->features &
4558 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4559 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4560
4561 if (buf->large_tx_ipv4)
4562 adapter->netdev->hw_features |= NETIF_F_TSO;
4563 if (buf->large_tx_ipv6)
4564 adapter->netdev->hw_features |= NETIF_F_TSO6;
4565
4566 if (adapter->state == VNIC_PROBING) {
4567 adapter->netdev->features |= adapter->netdev->hw_features;
4568 } else if (old_hw_features != adapter->netdev->hw_features) {
4569 netdev_features_t tmp = 0;
4570
4571 /* disable features no longer supported */
4572 adapter->netdev->features &= adapter->netdev->hw_features;
4573 /* turn on features now supported if previously enabled */
4574 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4575 adapter->netdev->hw_features;
4576 adapter->netdev->features |=
4577 tmp & adapter->netdev->wanted_features;
4578 }
4579
4580 memset(&crq, 0, sizeof(crq));
4581 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4582 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4583 crq.control_ip_offload.len =
4584 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4585 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4586 ibmvnic_send_crq(adapter, &crq);
4587}
4588
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004589static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4590 struct ibmvnic_adapter *adapter)
4591{
4592 struct device *dev = &adapter->vdev->dev;
4593
4594 if (crq->get_vpd_size_rsp.rc.code) {
4595 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4596 crq->get_vpd_size_rsp.rc.code);
4597 complete(&adapter->fw_done);
4598 return;
4599 }
4600
4601 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4602 complete(&adapter->fw_done);
4603}
4604
4605static void handle_vpd_rsp(union ibmvnic_crq *crq,
4606 struct ibmvnic_adapter *adapter)
4607{
4608 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004609 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004610 u8 fw_level_len = 0;
4611
4612 memset(adapter->fw_version, 0, 32);
4613
4614 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4615 DMA_FROM_DEVICE);
4616
4617 if (crq->get_vpd_rsp.rc.code) {
4618 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4619 crq->get_vpd_rsp.rc.code);
4620 goto complete;
4621 }
4622
4623 /* get the position of the firmware version info
4624 * located after the ASCII 'RM' substring in the buffer
4625 */
4626 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4627 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02004628 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004629 goto complete;
4630 }
4631
4632 /* get length of firmware level ASCII substring */
4633 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4634 fw_level_len = *(substr + 2);
4635 } else {
4636 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4637 goto complete;
4638 }
4639
4640 /* copy firmware version string from vpd into adapter */
4641 if ((substr + 3 + fw_level_len) <
4642 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004643 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004644 } else {
4645 dev_info(dev, "FW substr extrapolated VPD buff\n");
4646 }
4647
4648complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004649 if (adapter->fw_version[0] == '\0')
Lijun Pan0b217d32021-06-11 13:33:53 -05004650 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004651 complete(&adapter->fw_done);
4652}
4653
Thomas Falcon032c5e82015-12-21 11:26:06 -06004654static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4655{
4656 struct device *dev = &adapter->vdev->dev;
4657 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004658 int i;
4659
4660 dma_unmap_single(dev, adapter->ip_offload_tok,
4661 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4662
4663 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4664 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4665 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004666 ((unsigned long *)(buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004667
4668 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4669 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4670 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4671 buf->tcp_ipv4_chksum);
4672 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4673 buf->tcp_ipv6_chksum);
4674 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4675 buf->udp_ipv4_chksum);
4676 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4677 buf->udp_ipv6_chksum);
4678 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4679 buf->large_tx_ipv4);
4680 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4681 buf->large_tx_ipv6);
4682 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4683 buf->large_rx_ipv4);
4684 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4685 buf->large_rx_ipv6);
4686 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4687 buf->max_ipv4_header_size);
4688 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4689 buf->max_ipv6_header_size);
4690 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4691 buf->max_tcp_header_size);
4692 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4693 buf->max_udp_header_size);
4694 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4695 buf->max_large_tx_size);
4696 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4697 buf->max_large_rx_size);
4698 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4699 buf->ipv6_extension_header);
4700 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4701 buf->tcp_pseudosum_req);
4702 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4703 buf->num_ipv6_ext_headers);
4704 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4705 buf->off_ipv6_ext_headers);
4706
Lijun Pan46899bd2020-09-27 20:13:30 -05004707 send_control_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004708}
4709
Thomas Falconc9008d32018-08-06 21:39:59 -05004710static const char *ibmvnic_fw_err_cause(u16 cause)
4711{
4712 switch (cause) {
4713 case ADAPTER_PROBLEM:
4714 return "adapter problem";
4715 case BUS_PROBLEM:
4716 return "bus problem";
4717 case FW_PROBLEM:
4718 return "firmware problem";
4719 case DD_PROBLEM:
4720 return "device driver problem";
4721 case EEH_RECOVERY:
4722 return "EEH recovery";
4723 case FW_UPDATED:
4724 return "firmware updated";
4725 case LOW_MEMORY:
4726 return "low Memory";
4727 default:
4728 return "unknown";
4729 }
4730}
4731
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004732static void handle_error_indication(union ibmvnic_crq *crq,
4733 struct ibmvnic_adapter *adapter)
4734{
4735 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004736 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004737
Thomas Falconc9008d32018-08-06 21:39:59 -05004738 cause = be16_to_cpu(crq->error_indication.error_cause);
4739
4740 dev_warn_ratelimited(dev,
4741 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4742 crq->error_indication.flags
4743 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4744 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004745
Nathan Fontenoted651a12017-05-03 14:04:38 -04004746 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4747 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004748 else
4749 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004750}
4751
Thomas Falconf8136142018-01-29 13:45:05 -06004752static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4753 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004754{
4755 struct net_device *netdev = adapter->netdev;
4756 struct device *dev = &adapter->vdev->dev;
4757 long rc;
4758
4759 rc = crq->change_mac_addr_rsp.rc.code;
4760 if (rc) {
4761 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004762 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004763 }
Lijun Pand9b0e592020-10-20 17:39:19 -05004764 /* crq->change_mac_addr.mac_addr is the requested one
4765 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4766 */
Jakub Kicinskif3956eb2021-10-01 14:32:23 -07004767 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
Lijun Pand9b0e592020-10-20 17:39:19 -05004768 ether_addr_copy(adapter->mac_addr,
4769 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004770out:
4771 complete(&adapter->fw_done);
4772 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004773}
4774
4775static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4776 struct ibmvnic_adapter *adapter)
4777{
4778 struct device *dev = &adapter->vdev->dev;
4779 u64 *req_value;
4780 char *name;
4781
Thomas Falcon901e0402017-02-15 12:17:59 -06004782 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004783 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4784 case REQ_TX_QUEUES:
4785 req_value = &adapter->req_tx_queues;
4786 name = "tx";
4787 break;
4788 case REQ_RX_QUEUES:
4789 req_value = &adapter->req_rx_queues;
4790 name = "rx";
4791 break;
4792 case REQ_RX_ADD_QUEUES:
4793 req_value = &adapter->req_rx_add_queues;
4794 name = "rx_add";
4795 break;
4796 case REQ_TX_ENTRIES_PER_SUBCRQ:
4797 req_value = &adapter->req_tx_entries_per_subcrq;
4798 name = "tx_entries_per_subcrq";
4799 break;
4800 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4801 req_value = &adapter->req_rx_add_entries_per_subcrq;
4802 name = "rx_add_entries_per_subcrq";
4803 break;
4804 case REQ_MTU:
4805 req_value = &adapter->req_mtu;
4806 name = "mtu";
4807 break;
4808 case PROMISC_REQUESTED:
4809 req_value = &adapter->promisc;
4810 name = "promisc";
4811 break;
4812 default:
4813 dev_err(dev, "Got invalid cap request rsp %d\n",
4814 crq->request_capability.capability);
4815 return;
4816 }
4817
4818 switch (crq->request_capability_rsp.rc.code) {
4819 case SUCCESS:
4820 break;
4821 case PARTIALSUCCESS:
4822 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4823 *req_value,
Lijun Pan914789a2021-02-11 00:43:21 -06004824 (long)be64_to_cpu(crq->request_capability_rsp.number),
4825 name);
John Allene7913802018-01-18 16:27:12 -06004826
4827 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4828 REQ_MTU) {
4829 pr_err("mtu of %llu is not supported. Reverting.\n",
4830 *req_value);
4831 *req_value = adapter->fallback.mtu;
4832 } else {
4833 *req_value =
4834 be64_to_cpu(crq->request_capability_rsp.number);
4835 }
4836
Lijun Pan09081b92020-09-27 20:13:27 -05004837 send_request_cap(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004838 return;
4839 default:
4840 dev_err(dev, "Error %d in request cap rsp\n",
4841 crq->request_capability_rsp.rc.code);
4842 return;
4843 }
4844
4845 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004846 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon249168a2017-02-15 12:18:00 -06004847 adapter->wait_capability = false;
Lijun Pan16e811f2020-09-27 20:13:29 -05004848 send_query_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004849 }
4850}
4851
4852static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4853 struct ibmvnic_adapter *adapter)
4854{
4855 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004856 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004857 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4858 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004859 u64 *tx_handle_array;
4860 u64 *rx_handle_array;
4861 int num_tx_pools;
4862 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004863 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004864 int i;
4865
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06004866 /* CHECK: Test/set of login_pending does not need to be atomic
4867 * because only ibmvnic_tasklet tests/clears this.
4868 */
4869 if (!adapter->login_pending) {
4870 netdev_warn(netdev, "Ignoring unexpected login response\n");
4871 return 0;
4872 }
4873 adapter->login_pending = false;
4874
Thomas Falcon032c5e82015-12-21 11:26:06 -06004875 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004876 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004877 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004878 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004879
John Allen498cd8e2016-04-06 11:49:55 -05004880 /* If the number of queues requested can't be allocated by the
4881 * server, the login response will return with code 1. We will need
4882 * to resend the login buffer with fewer queues requested.
4883 */
4884 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004885 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004886 complete(&adapter->init_done);
4887 return 0;
4888 }
4889
Sukadev Bhattiprolud437f5a2021-09-07 22:07:03 -07004890 if (adapter->failover_pending) {
4891 adapter->init_done_rc = -EAGAIN;
4892 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
4893 complete(&adapter->init_done);
4894 /* login response buffer will be released on reset */
4895 return 0;
4896 }
4897
John Allenc26eba02017-10-26 16:23:25 -05004898 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4899
Thomas Falcon032c5e82015-12-21 11:26:06 -06004900 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4901 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4902 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004903 ((unsigned long *)(adapter->login_rsp_buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004904 }
4905
4906 /* Sanity checks */
4907 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4908 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4909 adapter->req_rx_add_queues !=
4910 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4911 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
Dany Madden31d6b402020-11-25 18:04:24 -06004912 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004913 return -EIO;
4914 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004915 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4916 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4917 /* variable buffer sizes are not supported, so just read the
4918 * first entry.
4919 */
4920 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004921
4922 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4923 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4924
4925 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4926 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4927 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4928 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4929
4930 for (i = 0; i < num_tx_pools; i++)
4931 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4932
4933 for (i = 0; i < num_rx_pools; i++)
4934 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4935
Thomas Falcon507ebe62020-08-21 13:39:01 -05004936 adapter->num_active_tx_scrqs = num_tx_pools;
4937 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004938 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004939 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004940 complete(&adapter->init_done);
4941
Thomas Falcon032c5e82015-12-21 11:26:06 -06004942 return 0;
4943}
4944
Thomas Falcon032c5e82015-12-21 11:26:06 -06004945static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4946 struct ibmvnic_adapter *adapter)
4947{
4948 struct device *dev = &adapter->vdev->dev;
4949 long rc;
4950
4951 rc = crq->request_unmap_rsp.rc.code;
4952 if (rc)
4953 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4954}
4955
4956static void handle_query_map_rsp(union ibmvnic_crq *crq,
4957 struct ibmvnic_adapter *adapter)
4958{
4959 struct net_device *netdev = adapter->netdev;
4960 struct device *dev = &adapter->vdev->dev;
4961 long rc;
4962
4963 rc = crq->query_map_rsp.rc.code;
4964 if (rc) {
4965 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4966 return;
4967 }
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -07004968 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
4969 crq->query_map_rsp.page_size,
4970 __be32_to_cpu(crq->query_map_rsp.tot_pages),
4971 __be32_to_cpu(crq->query_map_rsp.free_pages));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004972}
4973
4974static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4975 struct ibmvnic_adapter *adapter)
4976{
4977 struct net_device *netdev = adapter->netdev;
4978 struct device *dev = &adapter->vdev->dev;
4979 long rc;
4980
Thomas Falcon901e0402017-02-15 12:17:59 -06004981 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004982 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004983 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004984 rc = crq->query_capability.rc.code;
4985 if (rc) {
4986 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4987 goto out;
4988 }
4989
4990 switch (be16_to_cpu(crq->query_capability.capability)) {
4991 case MIN_TX_QUEUES:
4992 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004993 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004994 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4995 adapter->min_tx_queues);
4996 break;
4997 case MIN_RX_QUEUES:
4998 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004999 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005000 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5001 adapter->min_rx_queues);
5002 break;
5003 case MIN_RX_ADD_QUEUES:
5004 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005005 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005006 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5007 adapter->min_rx_add_queues);
5008 break;
5009 case MAX_TX_QUEUES:
5010 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005011 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005012 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5013 adapter->max_tx_queues);
5014 break;
5015 case MAX_RX_QUEUES:
5016 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005017 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005018 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5019 adapter->max_rx_queues);
5020 break;
5021 case MAX_RX_ADD_QUEUES:
5022 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005023 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005024 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5025 adapter->max_rx_add_queues);
5026 break;
5027 case MIN_TX_ENTRIES_PER_SUBCRQ:
5028 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005029 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005030 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5031 adapter->min_tx_entries_per_subcrq);
5032 break;
5033 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5034 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005035 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005036 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5037 adapter->min_rx_add_entries_per_subcrq);
5038 break;
5039 case MAX_TX_ENTRIES_PER_SUBCRQ:
5040 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005041 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005042 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5043 adapter->max_tx_entries_per_subcrq);
5044 break;
5045 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5046 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005047 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005048 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5049 adapter->max_rx_add_entries_per_subcrq);
5050 break;
5051 case TCP_IP_OFFLOAD:
5052 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06005053 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005054 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5055 adapter->tcp_ip_offload);
5056 break;
5057 case PROMISC_SUPPORTED:
5058 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06005059 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005060 netdev_dbg(netdev, "promisc_supported = %lld\n",
5061 adapter->promisc_supported);
5062 break;
5063 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06005064 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06005065 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005066 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5067 break;
5068 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06005069 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06005070 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005071 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5072 break;
5073 case MAX_MULTICAST_FILTERS:
5074 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06005075 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005076 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5077 adapter->max_multicast_filters);
5078 break;
5079 case VLAN_HEADER_INSERTION:
5080 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06005081 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005082 if (adapter->vlan_header_insertion)
5083 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5084 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5085 adapter->vlan_header_insertion);
5086 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04005087 case RX_VLAN_HEADER_INSERTION:
5088 adapter->rx_vlan_header_insertion =
5089 be64_to_cpu(crq->query_capability.number);
5090 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5091 adapter->rx_vlan_header_insertion);
5092 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005093 case MAX_TX_SG_ENTRIES:
5094 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06005095 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005096 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5097 adapter->max_tx_sg_entries);
5098 break;
5099 case RX_SG_SUPPORTED:
5100 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06005101 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005102 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5103 adapter->rx_sg_supported);
5104 break;
5105 case OPT_TX_COMP_SUB_QUEUES:
5106 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005107 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005108 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5109 adapter->opt_tx_comp_sub_queues);
5110 break;
5111 case OPT_RX_COMP_QUEUES:
5112 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06005113 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005114 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5115 adapter->opt_rx_comp_queues);
5116 break;
5117 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5118 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06005119 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005120 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5121 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5122 break;
5123 case OPT_TX_ENTRIES_PER_SUBCRQ:
5124 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005125 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005126 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5127 adapter->opt_tx_entries_per_subcrq);
5128 break;
5129 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5130 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06005131 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005132 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5133 adapter->opt_rxba_entries_per_subcrq);
5134 break;
5135 case TX_RX_DESC_REQ:
5136 adapter->tx_rx_desc_req = crq->query_capability.number;
5137 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5138 adapter->tx_rx_desc_req);
5139 break;
5140
5141 default:
5142 netdev_err(netdev, "Got invalid cap rsp %d\n",
5143 crq->query_capability.capability);
5144 }
5145
5146out:
Thomas Falcon249168a2017-02-15 12:18:00 -06005147 if (atomic_read(&adapter->running_cap_crqs) == 0) {
5148 adapter->wait_capability = false;
Lijun Pan09081b92020-09-27 20:13:27 -05005149 send_request_cap(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06005150 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06005151}
5152
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005153static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5154{
5155 union ibmvnic_crq crq;
5156 int rc;
5157
5158 memset(&crq, 0, sizeof(crq));
5159 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5160 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005161
5162 mutex_lock(&adapter->fw_lock);
5163 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06005164 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005165
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005166 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005167 if (rc) {
5168 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005169 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005170 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06005171
5172 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005173 if (rc) {
5174 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06005175 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005176 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06005177
Thomas Falconff25dcb2019-11-25 17:12:56 -06005178 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005179 return adapter->fw_done_rc ? -EIO : 0;
5180}
5181
5182static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5183 struct ibmvnic_adapter *adapter)
5184{
5185 struct net_device *netdev = adapter->netdev;
5186 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005187 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005188
5189 rc = crq->query_phys_parms_rsp.rc.code;
5190 if (rc) {
5191 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5192 return rc;
5193 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005194 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005195 case IBMVNIC_10MBPS:
5196 adapter->speed = SPEED_10;
5197 break;
5198 case IBMVNIC_100MBPS:
5199 adapter->speed = SPEED_100;
5200 break;
5201 case IBMVNIC_1GBPS:
5202 adapter->speed = SPEED_1000;
5203 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05005204 case IBMVNIC_10GBPS:
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005205 adapter->speed = SPEED_10000;
5206 break;
5207 case IBMVNIC_25GBPS:
5208 adapter->speed = SPEED_25000;
5209 break;
5210 case IBMVNIC_40GBPS:
5211 adapter->speed = SPEED_40000;
5212 break;
5213 case IBMVNIC_50GBPS:
5214 adapter->speed = SPEED_50000;
5215 break;
5216 case IBMVNIC_100GBPS:
5217 adapter->speed = SPEED_100000;
5218 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05005219 case IBMVNIC_200GBPS:
5220 adapter->speed = SPEED_200000;
5221 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005222 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005223 if (netif_carrier_ok(netdev))
5224 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005225 adapter->speed = SPEED_UNKNOWN;
5226 }
5227 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5228 adapter->duplex = DUPLEX_FULL;
5229 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5230 adapter->duplex = DUPLEX_HALF;
5231 else
5232 adapter->duplex = DUPLEX_UNKNOWN;
5233
5234 return rc;
5235}
5236
Thomas Falcon032c5e82015-12-21 11:26:06 -06005237static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5238 struct ibmvnic_adapter *adapter)
5239{
5240 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5241 struct net_device *netdev = adapter->netdev;
5242 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04005243 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005244 long rc;
5245
5246 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06005247 (unsigned long)cpu_to_be64(u64_crq[0]),
5248 (unsigned long)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06005249 switch (gen_crq->first) {
5250 case IBMVNIC_CRQ_INIT_RSP:
5251 switch (gen_crq->cmd) {
5252 case IBMVNIC_CRQ_INIT:
5253 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04005254 adapter->from_passive_init = true;
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06005255 /* Discard any stale login responses from prev reset.
5256 * CHECK: should we clear even on INIT_COMPLETE?
5257 */
5258 adapter->login_pending = false;
5259
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005260 if (adapter->state == VNIC_DOWN)
5261 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5262 else
5263 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5264
Sukadev Bhattiproluef66a1e2021-02-02 21:08:02 -08005265 if (rc && rc != -EBUSY) {
5266 /* We were unable to schedule the failover
5267 * reset either because the adapter was still
5268 * probing (eg: during kexec) or we could not
5269 * allocate memory. Clear the failover_pending
5270 * flag since no one else will. We ignore
5271 * EBUSY because it means either FAILOVER reset
5272 * is already scheduled or the adapter is
5273 * being removed.
5274 */
5275 netdev_err(netdev,
5276 "Error %ld scheduling failover reset\n",
5277 rc);
5278 adapter->failover_pending = false;
5279 }
Sukadev Bhattiprolu6b278c02021-10-29 15:03:16 -07005280
5281 if (!completion_done(&adapter->init_done)) {
5282 complete(&adapter->init_done);
5283 if (!adapter->init_done_rc)
5284 adapter->init_done_rc = -EAGAIN;
5285 }
5286
Thomas Falcon032c5e82015-12-21 11:26:06 -06005287 break;
5288 case IBMVNIC_CRQ_INIT_COMPLETE:
5289 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05005290 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005291 send_version_xchg(adapter);
5292 break;
5293 default:
5294 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5295 }
5296 return;
5297 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04005298 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05005299 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06005300 /* terminate any thread waiting for a response
5301 * from the device
5302 */
5303 if (!completion_done(&adapter->fw_done)) {
5304 adapter->fw_done_rc = -EIO;
5305 complete(&adapter->fw_done);
5306 }
5307 if (!completion_done(&adapter->stats_done))
5308 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005309 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05005310 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005311 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04005312 dev_info(dev, "Migrated, re-enabling adapter\n");
5313 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05005314 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5315 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05005316 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005317 } else {
5318 /* The adapter lost the connection */
5319 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5320 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005321 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005322 }
5323 return;
5324 case IBMVNIC_CRQ_CMD_RSP:
5325 break;
5326 default:
5327 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5328 gen_crq->first);
5329 return;
5330 }
5331
5332 switch (gen_crq->cmd) {
5333 case VERSION_EXCHANGE_RSP:
5334 rc = crq->version_exchange_rsp.rc.code;
5335 if (rc) {
5336 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5337 break;
5338 }
Thomas Falcon78468892020-05-28 11:19:17 -05005339 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06005340 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05005341 dev_info(dev, "Partner protocol version is %d\n",
5342 ibmvnic_version);
Lijun Pan491099a2020-09-27 20:13:26 -05005343 send_query_cap(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005344 break;
5345 case QUERY_CAPABILITY_RSP:
5346 handle_query_cap_rsp(crq, adapter);
5347 break;
5348 case QUERY_MAP_RSP:
5349 handle_query_map_rsp(crq, adapter);
5350 break;
5351 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05005352 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5353 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005354 break;
5355 case REQUEST_UNMAP_RSP:
5356 handle_request_unmap_rsp(crq, adapter);
5357 break;
5358 case REQUEST_CAPABILITY_RSP:
5359 handle_request_cap_rsp(crq, adapter);
5360 break;
5361 case LOGIN_RSP:
5362 netdev_dbg(netdev, "Got Login Response\n");
5363 handle_login_rsp(crq, adapter);
5364 break;
5365 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04005366 netdev_dbg(netdev,
5367 "Got Logical Link State Response, state: %d rc: %d\n",
5368 crq->logical_link_state_rsp.link_state,
5369 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005370 adapter->logical_link_state =
5371 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04005372 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5373 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005374 break;
5375 case LINK_STATE_INDICATION:
5376 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5377 adapter->phys_link_state =
5378 crq->link_state_indication.phys_link_state;
5379 adapter->logical_link_state =
5380 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05005381 if (adapter->phys_link_state && adapter->logical_link_state)
5382 netif_carrier_on(netdev);
5383 else
5384 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005385 break;
5386 case CHANGE_MAC_ADDR_RSP:
5387 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06005388 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005389 break;
5390 case ERROR_INDICATION:
5391 netdev_dbg(netdev, "Got Error Indication\n");
5392 handle_error_indication(crq, adapter);
5393 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005394 case REQUEST_STATISTICS_RSP:
5395 netdev_dbg(netdev, "Got Statistics Response\n");
5396 complete(&adapter->stats_done);
5397 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005398 case QUERY_IP_OFFLOAD_RSP:
5399 netdev_dbg(netdev, "Got Query IP offload Response\n");
5400 handle_query_ip_offload_rsp(adapter);
5401 break;
5402 case MULTICAST_CTRL_RSP:
5403 netdev_dbg(netdev, "Got multicast control Response\n");
5404 break;
5405 case CONTROL_IP_OFFLOAD_RSP:
5406 netdev_dbg(netdev, "Got Control IP offload Response\n");
5407 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5408 sizeof(adapter->ip_offload_ctrl),
5409 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05005410 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005411 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005412 case COLLECT_FW_TRACE_RSP:
5413 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5414 complete(&adapter->fw_done);
5415 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02005416 case GET_VPD_SIZE_RSP:
5417 handle_vpd_size_rsp(crq, adapter);
5418 break;
5419 case GET_VPD_RSP:
5420 handle_vpd_rsp(crq, adapter);
5421 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005422 case QUERY_PHYS_PARMS_RSP:
5423 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5424 complete(&adapter->fw_done);
5425 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005426 default:
5427 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5428 gen_crq->cmd);
5429 }
5430}
5431
5432static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5433{
5434 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06005435
Thomas Falcon6c267b32017-02-15 12:17:58 -06005436 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005437 return IRQ_HANDLED;
5438}
5439
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305440static void ibmvnic_tasklet(struct tasklet_struct *t)
Thomas Falcon6c267b32017-02-15 12:17:58 -06005441{
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305442 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005443 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005444 union ibmvnic_crq *crq;
5445 unsigned long flags;
5446 bool done = false;
5447
5448 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005449 while (!done) {
5450 /* Pull all the valid messages off the CRQ */
5451 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
Lijun Pane41aec72021-01-27 19:34:42 -06005452 /* This barrier makes sure ibmvnic_next_crq()'s
5453 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5454 * before ibmvnic_handle_crq()'s
5455 * switch(gen_crq->first) and switch(gen_crq->cmd).
5456 */
5457 dma_rmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06005458 ibmvnic_handle_crq(crq, adapter);
5459 crq->generic.first = 0;
5460 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04005461
5462 /* remain in tasklet until all
5463 * capabilities responses are received
5464 */
5465 if (!adapter->wait_capability)
5466 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005467 }
Thomas Falcon249168a2017-02-15 12:18:00 -06005468 /* if capabilities CRQ's were sent in this tasklet, the following
5469 * tasklet must wait until all responses are received
5470 */
5471 if (atomic_read(&adapter->running_cap_crqs) != 0)
5472 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005473 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005474}
5475
5476static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5477{
5478 struct vio_dev *vdev = adapter->vdev;
5479 int rc;
5480
5481 do {
5482 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5483 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5484
5485 if (rc)
5486 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5487
5488 return rc;
5489}
5490
5491static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5492{
5493 struct ibmvnic_crq_queue *crq = &adapter->crq;
5494 struct device *dev = &adapter->vdev->dev;
5495 struct vio_dev *vdev = adapter->vdev;
5496 int rc;
5497
5498 /* Close the CRQ */
5499 do {
5500 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5501 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5502
5503 /* Clean out the queue */
Lijun Pan0e435be2020-11-23 13:35:46 -06005504 if (!crq->msgs)
5505 return -EINVAL;
5506
Thomas Falcon032c5e82015-12-21 11:26:06 -06005507 memset(crq->msgs, 0, PAGE_SIZE);
5508 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05005509 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005510
5511 /* And re-open it again */
5512 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5513 crq->msg_token, PAGE_SIZE);
5514
5515 if (rc == H_CLOSED)
5516 /* Adapter is good, but other end is not ready */
5517 dev_warn(dev, "Partner adapter not ready\n");
5518 else if (rc != 0)
5519 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5520
5521 return rc;
5522}
5523
Nathan Fontenotf9928872017-03-30 02:48:54 -04005524static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005525{
5526 struct ibmvnic_crq_queue *crq = &adapter->crq;
5527 struct vio_dev *vdev = adapter->vdev;
5528 long rc;
5529
Nathan Fontenotf9928872017-03-30 02:48:54 -04005530 if (!crq->msgs)
5531 return;
5532
Thomas Falcon032c5e82015-12-21 11:26:06 -06005533 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5534 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005535 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005536 do {
5537 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5538 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5539
5540 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5541 DMA_BIDIRECTIONAL);
5542 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005543 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05005544 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005545}
5546
Nathan Fontenotf9928872017-03-30 02:48:54 -04005547static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005548{
5549 struct ibmvnic_crq_queue *crq = &adapter->crq;
5550 struct device *dev = &adapter->vdev->dev;
5551 struct vio_dev *vdev = adapter->vdev;
5552 int rc, retrc = -ENOMEM;
5553
Nathan Fontenotf9928872017-03-30 02:48:54 -04005554 if (crq->msgs)
5555 return 0;
5556
Thomas Falcon032c5e82015-12-21 11:26:06 -06005557 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5558 /* Should we allocate more than one page? */
5559
5560 if (!crq->msgs)
5561 return -ENOMEM;
5562
5563 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5564 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5565 DMA_BIDIRECTIONAL);
5566 if (dma_mapping_error(dev, crq->msg_token))
5567 goto map_failed;
5568
5569 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5570 crq->msg_token, PAGE_SIZE);
5571
5572 if (rc == H_RESOURCE)
5573 /* maybe kexecing and resource is busy. try a reset */
5574 rc = ibmvnic_reset_crq(adapter);
5575 retrc = rc;
5576
5577 if (rc == H_CLOSED) {
5578 dev_warn(dev, "Partner adapter not ready\n");
5579 } else if (rc) {
5580 dev_warn(dev, "Error %d opening adapter\n", rc);
5581 goto reg_crq_failed;
5582 }
5583
5584 retrc = 0;
5585
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305586 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005587
Thomas Falcon032c5e82015-12-21 11:26:06 -06005588 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03005589 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5590 adapter->vdev->unit_address);
5591 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005592 if (rc) {
5593 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5594 vdev->irq, rc);
5595 goto req_irq_failed;
5596 }
5597
5598 rc = vio_enable_interrupts(vdev);
5599 if (rc) {
5600 dev_err(dev, "Error %d enabling interrupts\n", rc);
5601 goto req_irq_failed;
5602 }
5603
5604 crq->cur = 0;
5605 spin_lock_init(&crq->lock);
5606
Sukadev Bhattiprolu6e20d002021-10-29 15:03:15 -07005607 /* process any CRQs that were queued before we enabled interrupts */
5608 tasklet_schedule(&adapter->tasklet);
5609
Thomas Falcon032c5e82015-12-21 11:26:06 -06005610 return retrc;
5611
5612req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06005613 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005614 do {
5615 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5616 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5617reg_crq_failed:
5618 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5619map_failed:
5620 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005621 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005622 return retrc;
5623}
5624
Lijun Pan635e4422020-08-19 17:52:26 -05005625static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005626{
5627 struct device *dev = &adapter->vdev->dev;
Dany Madden98c41f02020-11-25 18:04:32 -06005628 unsigned long timeout = msecs_to_jiffies(20000);
Michal Suchanek6881b072021-03-02 20:47:47 +01005629 u64 old_num_rx_queues = adapter->req_rx_queues;
5630 u64 old_num_tx_queues = adapter->req_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005631 int rc;
5632
John Allen017892c12017-05-26 10:30:19 -04005633 adapter->from_passive_init = false;
5634
Michal Suchanek6881b072021-03-02 20:47:47 +01005635 if (reset)
Lijun Pan635e4422020-08-19 17:52:26 -05005636 reinit_completion(&adapter->init_done);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005637
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005638 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005639 rc = ibmvnic_send_crq_init(adapter);
5640 if (rc) {
5641 dev_err(dev, "Send crq init failed with error %d\n", rc);
5642 return rc;
5643 }
5644
John Allenf6ef6402017-03-17 17:13:42 -05005645 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5646 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04005647 return -1;
5648 }
5649
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005650 if (adapter->init_done_rc) {
5651 release_crq_queue(adapter);
5652 return adapter->init_done_rc;
5653 }
5654
Lijun Pan785a2b12020-09-17 21:12:46 -05005655 if (adapter->from_passive_init) {
5656 adapter->state = VNIC_OPEN;
5657 adapter->from_passive_init = false;
5658 return -1;
5659 }
5660
Lijun Pan635e4422020-08-19 17:52:26 -05005661 if (reset &&
5662 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005663 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005664 if (adapter->req_rx_queues != old_num_rx_queues ||
5665 adapter->req_tx_queues != old_num_tx_queues) {
5666 release_sub_crqs(adapter, 0);
5667 rc = init_sub_crqs(adapter);
5668 } else {
5669 rc = reset_sub_crq_queues(adapter);
5670 }
5671 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005672 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005673 }
5674
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005675 if (rc) {
5676 dev_err(dev, "Initialization of sub crqs failed\n");
5677 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005678 return rc;
5679 }
5680
5681 rc = init_sub_crq_irqs(adapter);
5682 if (rc) {
5683 dev_err(dev, "Failed to initialize sub crq irqs\n");
5684 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005685 }
5686
5687 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005688}
5689
Thomas Falcon40c9db82017-06-12 12:35:04 -05005690static struct device_attribute dev_attr_failover;
5691
Thomas Falcon032c5e82015-12-21 11:26:06 -06005692static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5693{
5694 struct ibmvnic_adapter *adapter;
5695 struct net_device *netdev;
5696 unsigned char *mac_addr_p;
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005697 bool init_success;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005698 int rc;
5699
5700 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5701 dev->unit_address);
5702
5703 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5704 VETH_MAC_ADDR, NULL);
5705 if (!mac_addr_p) {
5706 dev_err(&dev->dev,
5707 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5708 __FILE__, __LINE__);
5709 return 0;
5710 }
5711
5712 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005713 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005714 if (!netdev)
5715 return -ENOMEM;
5716
5717 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005718 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005719 dev_set_drvdata(&dev->dev, netdev);
5720 adapter->vdev = dev;
5721 adapter->netdev = netdev;
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06005722 adapter->login_pending = false;
Sukadev Bhattiprolu129854f02021-09-14 20:52:56 -07005723 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
5724 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
5725 bitmap_set(adapter->map_ids, 0, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005726
5727 ether_addr_copy(adapter->mac_addr, mac_addr_p);
Jakub Kicinskif3956eb2021-10-01 14:32:23 -07005728 eth_hw_addr_set(netdev, adapter->mac_addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005729 netdev->irq = dev->irq;
5730 netdev->netdev_ops = &ibmvnic_netdev_ops;
5731 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5732 SET_NETDEV_DEV(netdev, &dev->dev);
5733
Nathan Fontenoted651a12017-05-03 14:04:38 -04005734 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005735 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5736 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005737 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005738 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005739 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005740 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005741 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005742 init_completion(&adapter->fw_done);
5743 init_completion(&adapter->reset_done);
5744 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005745 clear_bit(0, &adapter->resetting);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07005746 adapter->prev_rx_buf_sz = 0;
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07005747 adapter->prev_mtu = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04005748
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005749 init_success = false;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005750 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005751 rc = init_crq_queue(adapter);
5752 if (rc) {
5753 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5754 rc);
5755 goto ibmvnic_init_fail;
5756 }
5757
Lijun Pan635e4422020-08-19 17:52:26 -05005758 rc = ibmvnic_reset_init(adapter, false);
Sukadev Bhattiprolu6b278c02021-10-29 15:03:16 -07005759 } while (rc == -EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005760
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005761 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5762 * partner is not ready. CRQ is not active. When the partner becomes
5763 * ready, we will do the passive init reset.
5764 */
5765
5766 if (!rc)
5767 init_success = true;
5768
Thomas Falcon07184212018-05-16 15:49:05 -05005769 rc = init_stats_buffers(adapter);
5770 if (rc)
5771 goto ibmvnic_init_fail;
5772
5773 rc = init_stats_token(adapter);
5774 if (rc)
5775 goto ibmvnic_stats_fail;
5776
Thomas Falcon40c9db82017-06-12 12:35:04 -05005777 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005778 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005779 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005780
Mick Tarsele876a8a2017-09-28 13:53:18 -07005781 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005782 rc = register_netdev(netdev);
5783 if (rc) {
5784 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005785 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005786 }
5787 dev_info(&dev->dev, "ibmvnic registered\n");
5788
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005789 if (init_success) {
5790 adapter->state = VNIC_PROBED;
5791 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5792 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5793 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5794 } else {
5795 adapter->state = VNIC_DOWN;
5796 }
John Allenc26eba02017-10-26 16:23:25 -05005797
5798 adapter->wait_for_reset = false;
Dany Maddena86d5c62020-11-25 18:04:31 -06005799 adapter->last_reset_time = jiffies;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005800 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005801
5802ibmvnic_register_fail:
5803 device_remove_file(&dev->dev, &dev_attr_failover);
5804
Thomas Falcon07184212018-05-16 15:49:05 -05005805ibmvnic_dev_file_err:
5806 release_stats_token(adapter);
5807
5808ibmvnic_stats_fail:
5809 release_stats_buffers(adapter);
5810
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005811ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005812 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005813 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005814 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005815 free_netdev(netdev);
5816
5817 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005818}
5819
Uwe Kleine-König386a9662021-02-25 23:18:34 +01005820static void ibmvnic_remove(struct vio_dev *dev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005821{
5822 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005823 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005824 unsigned long flags;
5825
5826 spin_lock_irqsave(&adapter->state_lock, flags);
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08005827
5828 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5829 * finish. Then, set the state to REMOVING to prevent it from
5830 * scheduling any more work and to have reset functions ignore
5831 * any resets that have already been scheduled. Drop the lock
5832 * after setting state, so __ibmvnic_reset() which is called
5833 * from the flush_work() below, can make progress.
5834 */
Junlin Yang69cdb792021-03-05 16:48:39 +08005835 spin_lock(&adapter->rwi_lock);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005836 adapter->state = VNIC_REMOVING;
Junlin Yang69cdb792021-03-05 16:48:39 +08005837 spin_unlock(&adapter->rwi_lock);
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08005838
Juliet Kim7d7195a2020-03-10 09:23:58 -05005839 spin_unlock_irqrestore(&adapter->state_lock, flags);
5840
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005841 flush_work(&adapter->ibmvnic_reset);
5842 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5843
Juliet Kima5681e22018-11-19 15:59:22 -06005844 rtnl_lock();
5845 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005846
5847 release_resources(adapter);
Sukadev Bhattiprolu489de952021-09-14 20:52:58 -07005848 release_rx_pools(adapter);
Sukadev Bhattiprolubbd80932021-09-14 20:52:59 -07005849 release_tx_pools(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005850 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005851 release_crq_queue(adapter);
5852
Thomas Falcon53cc7722018-02-26 18:10:56 -06005853 release_stats_token(adapter);
5854 release_stats_buffers(adapter);
5855
Nathan Fontenot90c80142017-05-03 14:04:32 -04005856 adapter->state = VNIC_REMOVED;
5857
Juliet Kima5681e22018-11-19 15:59:22 -06005858 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005859 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005860 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005861 free_netdev(netdev);
5862 dev_set_drvdata(&dev->dev, NULL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005863}
5864
Thomas Falcon40c9db82017-06-12 12:35:04 -05005865static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5866 const char *buf, size_t count)
5867{
5868 struct net_device *netdev = dev_get_drvdata(dev);
5869 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5870 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5871 __be64 session_token;
5872 long rc;
5873
5874 if (!sysfs_streq(buf, "1"))
5875 return -EINVAL;
5876
5877 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5878 H_GET_SESSION_TOKEN, 0, 0, 0);
5879 if (rc) {
5880 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5881 rc);
Lijun Pan334c4242021-04-13 03:31:44 -05005882 goto last_resort;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005883 }
5884
5885 session_token = (__be64)retbuf[0];
5886 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5887 be64_to_cpu(session_token));
5888 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5889 H_SESSION_ERR_DETECTED, session_token, 0, 0);
Lijun Pan334c4242021-04-13 03:31:44 -05005890 if (rc)
5891 netdev_err(netdev,
5892 "H_VIOCTL initiated failover failed, rc %ld\n",
Thomas Falcon40c9db82017-06-12 12:35:04 -05005893 rc);
Lijun Pan334c4242021-04-13 03:31:44 -05005894
5895last_resort:
5896 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5897 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005898
5899 return count;
5900}
Joe Perches6cbaefb2017-12-19 10:15:09 -08005901static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005902
Thomas Falcon032c5e82015-12-21 11:26:06 -06005903static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5904{
5905 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5906 struct ibmvnic_adapter *adapter;
5907 struct iommu_table *tbl;
5908 unsigned long ret = 0;
5909 int i;
5910
5911 tbl = get_iommu_table_base(&vdev->dev);
5912
5913 /* netdev inits at probe time along with the structures we need below*/
5914 if (!netdev)
5915 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5916
5917 adapter = netdev_priv(netdev);
5918
5919 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005920 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5921
5922 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5923 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5924
Thomas Falcon507ebe62020-08-21 13:39:01 -05005925 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005926 ret += adapter->rx_pool[i].size *
5927 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5928
5929 return ret;
5930}
5931
5932static int ibmvnic_resume(struct device *dev)
5933{
5934 struct net_device *netdev = dev_get_drvdata(dev);
5935 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005936
John Allencb89ba22017-06-19 11:27:53 -05005937 if (adapter->state != VNIC_OPEN)
5938 return 0;
5939
John Allena2488782017-07-24 13:26:06 -05005940 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005941
5942 return 0;
5943}
5944
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305945static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005946 {"network", "IBM,vnic"},
5947 {"", "" }
5948};
5949MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5950
5951static const struct dev_pm_ops ibmvnic_pm_ops = {
5952 .resume = ibmvnic_resume
5953};
5954
5955static struct vio_driver ibmvnic_driver = {
5956 .id_table = ibmvnic_device_table,
5957 .probe = ibmvnic_probe,
5958 .remove = ibmvnic_remove,
5959 .get_desired_dma = ibmvnic_get_desired_dma,
5960 .name = ibmvnic_driver_name,
5961 .pm = &ibmvnic_pm_ops,
5962};
5963
5964/* module functions */
5965static int __init ibmvnic_module_init(void)
5966{
5967 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5968 IBMVNIC_DRIVER_VERSION);
5969
5970 return vio_register_driver(&ibmvnic_driver);
5971}
5972
5973static void __exit ibmvnic_module_exit(void)
5974{
5975 vio_unregister_driver(&ibmvnic_driver);
5976}
5977
5978module_init(ibmvnic_module_init);
5979module_exit(ibmvnic_module_exit);