blob: bb9b8aec9c9b0bb54ac224bb28824ff0dd1a1141 [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060081static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060082static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
Thomas Falconad7775d2016-04-01 17:20:34 -050086static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060087static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96static int ibmvnic_poll(struct napi_struct *napi, int data);
Lijun Pan69980d02020-09-27 20:13:28 -050097static void send_query_map(struct ibmvnic_adapter *adapter);
Lijun Pan673ead22021-06-14 00:20:45 -050098static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -050099static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600100static int send_login(struct ibmvnic_adapter *adapter);
Lijun Pan491099a2020-09-27 20:13:26 -0500101static void send_query_cap(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500102static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500103static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500104static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400105static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500106static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500107static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300108static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -0700109static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600111
112struct ibmvnic_stat {
113 char name[ETH_GSTRING_LEN];
114 int offset;
115};
116
117#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
118 offsetof(struct ibmvnic_statistics, stat))
Lijun Pan91dc5d22021-02-11 00:43:22 -0600119#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
Thomas Falcon032c5e82015-12-21 11:26:06 -0600120
121static const struct ibmvnic_stat ibmvnic_stats[] = {
122 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
123 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
124 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
125 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
126 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
127 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
128 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
129 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
130 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
131 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
132 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
133 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
134 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
135 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
136 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
137 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
138 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
139 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
140 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
141 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
142 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
143 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
144};
145
Cristobal Forno53f8b1b2021-06-10 11:08:35 -0600146static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
147{
148 union ibmvnic_crq crq;
149
150 memset(&crq, 0, sizeof(crq));
151 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
152 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
153
154 return ibmvnic_send_crq(adapter, &crq);
155}
156
157static int send_version_xchg(struct ibmvnic_adapter *adapter)
158{
159 union ibmvnic_crq crq;
160
161 memset(&crq, 0, sizeof(crq));
162 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
163 crq.version_exchange.cmd = VERSION_EXCHANGE;
164 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
165
166 return ibmvnic_send_crq(adapter, &crq);
167}
168
Thomas Falcon032c5e82015-12-21 11:26:06 -0600169static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
170 unsigned long length, unsigned long *number,
171 unsigned long *irq)
172{
173 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
174 long rc;
175
176 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
177 *number = retbuf[0];
178 *irq = retbuf[1];
179
180 return rc;
181}
182
Thomas Falcon476d96c2019-11-25 17:12:55 -0600183/**
184 * ibmvnic_wait_for_completion - Check device state and wait for completion
185 * @adapter: private device data
186 * @comp_done: completion structure to wait for
187 * @timeout: time to wait in milliseconds
188 *
189 * Wait for a completion signal or until the timeout limit is reached
190 * while checking that the device is still active.
191 */
192static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
193 struct completion *comp_done,
194 unsigned long timeout)
195{
196 struct net_device *netdev;
197 unsigned long div_timeout;
198 u8 retry;
199
200 netdev = adapter->netdev;
201 retry = 5;
202 div_timeout = msecs_to_jiffies(timeout / retry);
203 while (true) {
204 if (!adapter->crq.active) {
205 netdev_err(netdev, "Device down!\n");
206 return -ENODEV;
207 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600208 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600209 break;
210 if (wait_for_completion_timeout(comp_done, div_timeout))
211 return 0;
212 }
213 netdev_err(netdev, "Operation timed out.\n");
214 return -ETIMEDOUT;
215}
216
Thomas Falcon032c5e82015-12-21 11:26:06 -0600217static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
218 struct ibmvnic_long_term_buff *ltb, int size)
219{
220 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500221 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600222
223 ltb->size = size;
224 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
225 GFP_KERNEL);
226
227 if (!ltb->buff) {
228 dev_err(dev, "Couldn't alloc long term buffer\n");
229 return -ENOMEM;
230 }
231 ltb->map_id = adapter->map_id;
232 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500233
Thomas Falconff25dcb2019-11-25 17:12:56 -0600234 mutex_lock(&adapter->fw_lock);
235 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600236 reinit_completion(&adapter->fw_done);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700237
238 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500239 if (rc) {
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700240 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
241 goto out;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500242 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600243
244 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
245 if (rc) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700246 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
Thomas Falcon476d96c2019-11-25 17:12:55 -0600247 rc);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700248 goto out;
Thomas Falcon476d96c2019-11-25 17:12:55 -0600249 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500250
251 if (adapter->fw_done_rc) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700252 dev_err(dev, "Couldn't map LTB, rc = %d\n",
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500253 adapter->fw_done_rc);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700254 rc = -1;
255 goto out;
256 }
257 rc = 0;
258out:
259 if (rc) {
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500260 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700261 ltb->buff = NULL;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500262 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600263 mutex_unlock(&adapter->fw_lock);
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700264 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600265}
266
267static void free_long_term_buff(struct ibmvnic_adapter *adapter,
268 struct ibmvnic_long_term_buff *ltb)
269{
270 struct device *dev = &adapter->vdev->dev;
271
Nathan Fontenotc657e322017-03-30 02:49:06 -0400272 if (!ltb->buff)
273 return;
274
Lijun Pan7d3a7b92021-02-12 20:49:00 -0600275 /* VIOS automatically unmaps the long term buffer at remote
276 * end for the following resets:
277 * FAILOVER, MOBILITY, TIMEOUT.
278 */
Nathan Fontenoted651a12017-05-03 14:04:38 -0400279 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
Lijun Pan7d3a7b92021-02-12 20:49:00 -0600280 adapter->reset_reason != VNIC_RESET_MOBILITY &&
281 adapter->reset_reason != VNIC_RESET_TIMEOUT)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500282 send_request_unmap(adapter, ltb->map_id);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700283
Brian King59af56c2017-04-19 13:44:41 -0400284 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700285
Sukadev Bhattiprolu552a3372021-06-23 21:13:14 -0700286 ltb->buff = NULL;
287 ltb->map_id = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600288}
289
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700290static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
291 struct ibmvnic_long_term_buff *ltb)
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500292{
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700293 struct device *dev = &adapter->vdev->dev;
294 int rc;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500295
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500296 memset(ltb->buff, 0, ltb->size);
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700297
298 mutex_lock(&adapter->fw_lock);
299 adapter->fw_done_rc = 0;
300
301 reinit_completion(&adapter->fw_done);
302 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
303 if (rc) {
304 mutex_unlock(&adapter->fw_lock);
305 return rc;
306 }
307
308 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
309 if (rc) {
310 dev_info(dev,
311 "Reset failed, long term map request timed out or aborted\n");
312 mutex_unlock(&adapter->fw_lock);
313 return rc;
314 }
315
316 if (adapter->fw_done_rc) {
317 dev_info(dev,
318 "Reset failed, attempting to free and reallocate buffer\n");
319 free_long_term_buff(adapter, ltb);
320 mutex_unlock(&adapter->fw_lock);
321 return alloc_long_term_buff(adapter, ltb, ltb->size);
322 }
323 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500324 return 0;
325}
326
Thomas Falconf185a492017-05-26 10:30:48 -0400327static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
328{
329 int i;
330
Thomas Falcon507ebe62020-08-21 13:39:01 -0500331 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400332 adapter->rx_pool[i].active = 0;
333}
334
Thomas Falcon032c5e82015-12-21 11:26:06 -0600335static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
336 struct ibmvnic_rx_pool *pool)
337{
338 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500339 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600340 struct device *dev = &adapter->vdev->dev;
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600341 struct ibmvnic_ind_xmit_queue *ind_bufp;
342 struct ibmvnic_sub_crq_queue *rx_scrq;
343 union sub_crq *sub_crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600344 int buffers_added = 0;
345 unsigned long lpar_rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600346 struct sk_buff *skb;
347 unsigned int offset;
348 dma_addr_t dma_addr;
349 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600350 int shift = 0;
351 int index;
352 int i;
353
Thomas Falconf185a492017-05-26 10:30:48 -0400354 if (!pool->active)
355 return;
356
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600357 rx_scrq = adapter->rx_scrq[pool->index];
358 ind_bufp = &rx_scrq->ind_buf;
Sukadev Bhattiprolu72368f82021-06-23 21:13:13 -0700359
360 /* netdev_skb_alloc() could have failed after we saved a few skbs
361 * in the indir_buf and we would not have sent them to VIOS yet.
362 * To account for them, start the loop at ind_bufp->index rather
363 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
364 * be 0.
365 */
366 for (i = ind_bufp->index; i < count; ++i) {
Dwip N. Banerjeee552aa32020-11-18 19:12:24 -0600367 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600368 if (!skb) {
369 dev_err(dev, "Couldn't replenish rx buff\n");
370 adapter->replenish_no_mem++;
371 break;
372 }
373
374 index = pool->free_map[pool->next_free];
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700375 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
376 pool->next_free = (pool->next_free + 1) % pool->size;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600377
378 if (pool->rx_buff[index].skb)
379 dev_err(dev, "Inconsistent free_map!\n");
380
381 /* Copy the skb to the long term mapped DMA buffer */
382 offset = index * pool->buff_size;
383 dst = pool->long_term_buff.buff + offset;
384 memset(dst, 0, pool->buff_size);
385 dma_addr = pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600386
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700387 /* add the skb to an rx_buff in the pool */
388 pool->rx_buff[index].data = dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600389 pool->rx_buff[index].dma = dma_addr;
390 pool->rx_buff[index].skb = skb;
391 pool->rx_buff[index].pool_index = pool->index;
392 pool->rx_buff[index].size = pool->buff_size;
393
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700394 /* queue the rx_buff for the next send_subcrq_indirect */
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600395 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
396 memset(sub_crq, 0, sizeof(*sub_crq));
397 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
398 sub_crq->rx_add.correlator =
Thomas Falcon032c5e82015-12-21 11:26:06 -0600399 cpu_to_be64((u64)&pool->rx_buff[index]);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600400 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
401 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600402
403 /* The length field of the sCRQ is defined to be 24 bits so the
404 * buffer size needs to be left shifted by a byte before it is
405 * converted to big endian to prevent the last byte from being
406 * truncated.
407 */
408#ifdef __LITTLE_ENDIAN__
409 shift = 8;
410#endif
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600411 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
Sukadev Bhattiprolu38106b22021-09-14 20:52:51 -0700412
413 /* if send_subcrq_indirect queue is full, flush to VIOS */
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600414 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
415 i == count - 1) {
416 lpar_rc =
417 send_subcrq_indirect(adapter, handle,
418 (u64)ind_bufp->indir_dma,
419 (u64)ind_bufp->index);
420 if (lpar_rc != H_SUCCESS)
421 goto failure;
422 buffers_added += ind_bufp->index;
423 adapter->replenish_add_buff_success += ind_bufp->index;
424 ind_bufp->index = 0;
425 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600426 }
427 atomic_add(buffers_added, &pool->available);
428 return;
429
430failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500431 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
432 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600433 for (i = ind_bufp->index - 1; i >= 0; --i) {
434 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600435
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600436 pool->next_free = pool->next_free == 0 ?
437 pool->size - 1 : pool->next_free - 1;
438 sub_crq = &ind_bufp->indir_arr[i];
439 rx_buff = (struct ibmvnic_rx_buff *)
440 be64_to_cpu(sub_crq->rx_add.correlator);
441 index = (int)(rx_buff - pool->rx_buff);
442 pool->free_map[pool->next_free] = index;
443 dev_kfree_skb_any(pool->rx_buff[index].skb);
444 pool->rx_buff[index].skb = NULL;
445 }
Dwip N. Banerjeec2af6222020-12-09 20:53:31 -0500446 adapter->replenish_add_buff_failure += ind_bufp->index;
447 atomic_add(buffers_added, &pool->available);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600448 ind_bufp->index = 0;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500449 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400450 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500451 * queue is closed or pending failover.
452 * Firmware guarantees that a signal will be sent to the
453 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400454 */
455 deactivate_rx_pools(adapter);
456 netif_carrier_off(adapter->netdev);
457 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600458}
459
460static void replenish_pools(struct ibmvnic_adapter *adapter)
461{
462 int i;
463
Thomas Falcon032c5e82015-12-21 11:26:06 -0600464 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500465 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600466 if (adapter->rx_pool[i].active)
467 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
468 }
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -0800469
470 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600471}
472
John Allen3d52b592017-08-02 16:44:14 -0500473static void release_stats_buffers(struct ibmvnic_adapter *adapter)
474{
475 kfree(adapter->tx_stats_buffers);
476 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600477 adapter->tx_stats_buffers = NULL;
478 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500479}
480
481static int init_stats_buffers(struct ibmvnic_adapter *adapter)
482{
483 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600484 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500485 sizeof(struct ibmvnic_tx_queue_stats),
486 GFP_KERNEL);
487 if (!adapter->tx_stats_buffers)
488 return -ENOMEM;
489
490 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600491 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500492 sizeof(struct ibmvnic_rx_queue_stats),
493 GFP_KERNEL);
494 if (!adapter->rx_stats_buffers)
495 return -ENOMEM;
496
497 return 0;
498}
499
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400500static void release_stats_token(struct ibmvnic_adapter *adapter)
501{
502 struct device *dev = &adapter->vdev->dev;
503
504 if (!adapter->stats_token)
505 return;
506
507 dma_unmap_single(dev, adapter->stats_token,
508 sizeof(struct ibmvnic_statistics),
509 DMA_FROM_DEVICE);
510 adapter->stats_token = 0;
511}
512
513static int init_stats_token(struct ibmvnic_adapter *adapter)
514{
515 struct device *dev = &adapter->vdev->dev;
516 dma_addr_t stok;
517
518 stok = dma_map_single(dev, &adapter->stats,
519 sizeof(struct ibmvnic_statistics),
520 DMA_FROM_DEVICE);
521 if (dma_mapping_error(dev, stok)) {
522 dev_err(dev, "Couldn't map stats buffer\n");
523 return -1;
524 }
525
526 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500527 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400528 return 0;
529}
530
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400531static int reset_rx_pools(struct ibmvnic_adapter *adapter)
532{
533 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500534 u64 buff_size;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400535 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500536 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600537
Mingming Cao9f134572020-08-25 13:26:41 -0400538 if (!adapter->rx_pool)
539 return -1;
540
Thomas Falcon507ebe62020-08-21 13:39:01 -0500541 buff_size = adapter->cur_rx_buf_sz;
542 rx_scrqs = adapter->num_active_rx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400543 for (i = 0; i < rx_scrqs; i++) {
544 rx_pool = &adapter->rx_pool[i];
545
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500546 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
547
Thomas Falcon507ebe62020-08-21 13:39:01 -0500548 if (rx_pool->buff_size != buff_size) {
John Allen896d8692018-01-18 16:26:31 -0600549 free_long_term_buff(adapter, &rx_pool->long_term_buff);
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600550 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Thomas Falcon7c940b12019-06-07 16:03:55 -0500551 rc = alloc_long_term_buff(adapter,
552 &rx_pool->long_term_buff,
553 rx_pool->size *
554 rx_pool->buff_size);
John Allen896d8692018-01-18 16:26:31 -0600555 } else {
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700556 rc = reset_long_term_buff(adapter,
557 &rx_pool->long_term_buff);
John Allen896d8692018-01-18 16:26:31 -0600558 }
559
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500560 if (rc)
561 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400562
563 for (j = 0; j < rx_pool->size; j++)
564 rx_pool->free_map[j] = j;
565
566 memset(rx_pool->rx_buff, 0,
567 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
568
569 atomic_set(&rx_pool->available, 0);
570 rx_pool->next_alloc = 0;
571 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500572 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400573 }
574
575 return 0;
576}
577
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700578/**
579 * release_rx_pools() - Release any rx pools attached to @adapter.
580 * @adapter: ibmvnic adapter
581 *
582 * Safe to call this multiple times - even if no pools are attached.
583 */
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400584static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600585{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400586 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400587 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600588
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400589 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600590 return;
591
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600592 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400593 rx_pool = &adapter->rx_pool[i];
594
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500595 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
596
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400597 kfree(rx_pool->free_map);
598 free_long_term_buff(adapter, &rx_pool->long_term_buff);
599
600 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400601 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400602
603 for (j = 0; j < rx_pool->size; j++) {
604 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600605 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
606 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400607 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600608 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400609
610 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600611 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400612
613 kfree(adapter->rx_pool);
614 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600615 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400616}
617
618static int init_rx_pools(struct net_device *netdev)
619{
620 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
621 struct device *dev = &adapter->vdev->dev;
622 struct ibmvnic_rx_pool *rx_pool;
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700623 u64 num_pools;
624 u64 pool_size; /* # of buffers in one pool */
Thomas Falcon507ebe62020-08-21 13:39:01 -0500625 u64 buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400626 int i, j;
627
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700628 num_pools = adapter->num_active_rx_scrqs;
629 pool_size = adapter->req_rx_add_entries_per_subcrq;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500630 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400631
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700632 adapter->rx_pool = kcalloc(num_pools,
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400633 sizeof(struct ibmvnic_rx_pool),
634 GFP_KERNEL);
635 if (!adapter->rx_pool) {
636 dev_err(dev, "Failed to allocate rx pools\n");
637 return -1;
638 }
639
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700640 /* Set num_active_rx_pools early. If we fail below after partial
641 * allocation, release_rx_pools() will know how many to look for.
642 */
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700643 adapter->num_active_rx_pools = num_pools;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600644
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700645 for (i = 0; i < num_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400646 rx_pool = &adapter->rx_pool[i];
647
648 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500649 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700650 i, pool_size, buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400651
Sukadev Bhattiprolu0df7b9a2021-09-14 20:52:53 -0700652 rx_pool->size = pool_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400653 rx_pool->index = i;
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600654 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400655 rx_pool->active = 1;
656
657 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
658 GFP_KERNEL);
659 if (!rx_pool->free_map) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700660 dev_err(dev, "Couldn't alloc free_map %d\n", i);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400661 release_rx_pools(adapter);
662 return -1;
663 }
664
665 rx_pool->rx_buff = kcalloc(rx_pool->size,
666 sizeof(struct ibmvnic_rx_buff),
667 GFP_KERNEL);
668 if (!rx_pool->rx_buff) {
669 dev_err(dev, "Couldn't alloc rx buffers\n");
670 release_rx_pools(adapter);
671 return -1;
672 }
673
674 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
675 rx_pool->size * rx_pool->buff_size)) {
676 release_rx_pools(adapter);
677 return -1;
678 }
679
680 for (j = 0; j < rx_pool->size; ++j)
681 rx_pool->free_map[j] = j;
682
683 atomic_set(&rx_pool->available, 0);
684 rx_pool->next_alloc = 0;
685 rx_pool->next_free = 0;
686 }
687
688 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600689}
690
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700691static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
692 struct ibmvnic_tx_pool *tx_pool)
Thomas Falcone26dc252018-03-16 20:00:25 -0500693{
694 int rc, i;
695
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700696 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
Thomas Falcone26dc252018-03-16 20:00:25 -0500697 if (rc)
698 return rc;
699
700 memset(tx_pool->tx_buff, 0,
701 tx_pool->num_buffers *
702 sizeof(struct ibmvnic_tx_buff));
703
704 for (i = 0; i < tx_pool->num_buffers; i++)
705 tx_pool->free_map[i] = i;
706
707 tx_pool->consumer_index = 0;
708 tx_pool->producer_index = 0;
709
710 return 0;
711}
712
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400713static int reset_tx_pools(struct ibmvnic_adapter *adapter)
714{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400715 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500716 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400717
Mingming Cao9f134572020-08-25 13:26:41 -0400718 if (!adapter->tx_pool)
719 return -1;
720
Thomas Falcon507ebe62020-08-21 13:39:01 -0500721 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400722 for (i = 0; i < tx_scrqs; i++) {
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -0700723 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700724 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500725 if (rc)
726 return rc;
Sukadev Bhattiprolu0ec13af2021-06-23 21:13:10 -0700727 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500728 if (rc)
729 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400730 }
731
732 return 0;
733}
734
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200735static void release_vpd_data(struct ibmvnic_adapter *adapter)
736{
737 if (!adapter->vpd)
738 return;
739
740 kfree(adapter->vpd->buff);
741 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600742
743 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200744}
745
Thomas Falconfb794212018-03-16 20:00:26 -0500746static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
747 struct ibmvnic_tx_pool *tx_pool)
748{
749 kfree(tx_pool->tx_buff);
750 kfree(tx_pool->free_map);
751 free_long_term_buff(adapter, &tx_pool->long_term_buff);
752}
753
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700754/**
755 * release_tx_pools() - Release any tx pools attached to @adapter.
756 * @adapter: ibmvnic adapter
757 *
758 * Safe to call this multiple times - even if no pools are attached.
759 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400760static void release_tx_pools(struct ibmvnic_adapter *adapter)
761{
John Allen896d8692018-01-18 16:26:31 -0600762 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400763
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700764 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
765 * both NULL or both non-NULL. So we only need to check one.
766 */
Nathan Fontenotc657e322017-03-30 02:49:06 -0400767 if (!adapter->tx_pool)
768 return;
769
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600770 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500771 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
772 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400773 }
774
775 kfree(adapter->tx_pool);
776 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500777 kfree(adapter->tso_pool);
778 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600779 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400780}
781
Thomas Falcon32053062018-03-16 20:00:27 -0500782static int init_one_tx_pool(struct net_device *netdev,
783 struct ibmvnic_tx_pool *tx_pool,
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700784 int pool_size, int buf_size)
Thomas Falcon32053062018-03-16 20:00:27 -0500785{
786 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
787 int i;
788
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700789 tx_pool->tx_buff = kcalloc(pool_size,
Thomas Falcon32053062018-03-16 20:00:27 -0500790 sizeof(struct ibmvnic_tx_buff),
791 GFP_KERNEL);
792 if (!tx_pool->tx_buff)
793 return -1;
794
795 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700796 pool_size * buf_size))
Thomas Falcon32053062018-03-16 20:00:27 -0500797 return -1;
798
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700799 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
Thomas Falcon32053062018-03-16 20:00:27 -0500800 if (!tx_pool->free_map)
801 return -1;
802
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700803 for (i = 0; i < pool_size; i++)
Thomas Falcon32053062018-03-16 20:00:27 -0500804 tx_pool->free_map[i] = i;
805
806 tx_pool->consumer_index = 0;
807 tx_pool->producer_index = 0;
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700808 tx_pool->num_buffers = pool_size;
Thomas Falcon32053062018-03-16 20:00:27 -0500809 tx_pool->buf_size = buf_size;
810
811 return 0;
812}
813
Nathan Fontenotc657e322017-03-30 02:49:06 -0400814static int init_tx_pools(struct net_device *netdev)
815{
816 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700817 struct device *dev = &adapter->vdev->dev;
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700818 int num_pools;
819 u64 pool_size; /* # of buffers in pool */
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600820 u64 buff_size;
Thomas Falcon32053062018-03-16 20:00:27 -0500821 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400822
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700823 pool_size = adapter->req_tx_entries_per_subcrq;
824 num_pools = adapter->num_active_tx_scrqs;
825
826 adapter->tx_pool = kcalloc(num_pools,
Nathan Fontenotc657e322017-03-30 02:49:06 -0400827 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
828 if (!adapter->tx_pool)
829 return -1;
830
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700831 adapter->tso_pool = kcalloc(num_pools,
Thomas Falcon32053062018-03-16 20:00:27 -0500832 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700833 /* To simplify release_tx_pools() ensure that ->tx_pool and
834 * ->tso_pool are either both NULL or both non-NULL.
835 */
Sukadev Bhattiproluf6ebca82021-06-23 21:13:15 -0700836 if (!adapter->tso_pool) {
837 kfree(adapter->tx_pool);
838 adapter->tx_pool = NULL;
Thomas Falcon32053062018-03-16 20:00:27 -0500839 return -1;
Sukadev Bhattiproluf6ebca82021-06-23 21:13:15 -0700840 }
Thomas Falcon32053062018-03-16 20:00:27 -0500841
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700842 /* Set num_active_tx_pools early. If we fail below after partial
843 * allocation, release_tx_pools() will know how many to look for.
844 */
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700845 adapter->num_active_tx_pools = num_pools;
Sukadev Bhattiprolu0d1af4f2021-09-14 20:52:55 -0700846 buff_size = adapter->req_mtu + VLAN_HLEN;
847 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600848
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700849 for (i = 0; i < num_pools; i++) {
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -0700850 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
851 i, adapter->req_tx_entries_per_subcrq, buff_size);
852
Thomas Falcon32053062018-03-16 20:00:27 -0500853 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
Sukadev Bhattiprolu8243c7e2021-09-14 20:52:54 -0700854 pool_size, buff_size);
Thomas Falcon32053062018-03-16 20:00:27 -0500855 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400856 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500857 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400858 }
859
Thomas Falcon7c940b12019-06-07 16:03:55 -0500860 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
861 IBMVNIC_TSO_BUFS,
862 IBMVNIC_TSO_BUF_SZ);
Thomas Falcon32053062018-03-16 20:00:27 -0500863 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400864 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500865 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400866 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400867 }
868
869 return 0;
870}
871
John Allend944c3d62017-05-26 10:30:13 -0400872static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
873{
874 int i;
875
876 if (adapter->napi_enabled)
877 return;
878
879 for (i = 0; i < adapter->req_rx_queues; i++)
880 napi_enable(&adapter->napi[i]);
881
882 adapter->napi_enabled = true;
883}
884
885static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
886{
887 int i;
888
889 if (!adapter->napi_enabled)
890 return;
891
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500892 for (i = 0; i < adapter->req_rx_queues; i++) {
893 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400894 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500895 }
John Allend944c3d62017-05-26 10:30:13 -0400896
897 adapter->napi_enabled = false;
898}
899
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600900static int init_napi(struct ibmvnic_adapter *adapter)
901{
902 int i;
903
904 adapter->napi = kcalloc(adapter->req_rx_queues,
905 sizeof(struct napi_struct), GFP_KERNEL);
906 if (!adapter->napi)
907 return -ENOMEM;
908
909 for (i = 0; i < adapter->req_rx_queues; i++) {
910 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
911 netif_napi_add(adapter->netdev, &adapter->napi[i],
912 ibmvnic_poll, NAPI_POLL_WEIGHT);
913 }
914
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600915 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600916 return 0;
917}
918
919static void release_napi(struct ibmvnic_adapter *adapter)
920{
921 int i;
922
923 if (!adapter->napi)
924 return;
925
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600926 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800927 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
928 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600929 }
930
931 kfree(adapter->napi);
932 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600933 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500934 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600935}
936
Lijun Pan0666ef72021-04-12 02:41:28 -0500937static const char *adapter_state_to_string(enum vnic_state state)
938{
939 switch (state) {
940 case VNIC_PROBING:
941 return "PROBING";
942 case VNIC_PROBED:
943 return "PROBED";
944 case VNIC_OPENING:
945 return "OPENING";
946 case VNIC_OPEN:
947 return "OPEN";
948 case VNIC_CLOSING:
949 return "CLOSING";
950 case VNIC_CLOSED:
951 return "CLOSED";
952 case VNIC_REMOVING:
953 return "REMOVING";
954 case VNIC_REMOVED:
955 return "REMOVED";
Lijun Pan822ebc22021-06-11 10:35:37 -0500956 case VNIC_DOWN:
957 return "DOWN";
Lijun Pan0666ef72021-04-12 02:41:28 -0500958 }
Michal Suchanek07b5dc12021-05-20 08:50:34 +0200959 return "UNKNOWN";
Lijun Pan0666ef72021-04-12 02:41:28 -0500960}
961
John Allena57a5d22017-03-17 17:13:41 -0500962static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600963{
964 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Dany Madden98c41f02020-11-25 18:04:32 -0600965 unsigned long timeout = msecs_to_jiffies(20000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500966 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -0500967 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -0500968 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500969 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600970
John Allenbd0b6722017-03-17 17:13:40 -0500971 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500972 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -0500973 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500974 netdev_warn(netdev, "Login attempts exceeded\n");
975 return -1;
976 }
977
978 adapter->init_done_rc = 0;
979 reinit_completion(&adapter->init_done);
980 rc = send_login(adapter);
Dany Maddenc98d9cc2020-11-25 18:04:30 -0600981 if (rc)
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500982 return rc;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500983
984 if (!wait_for_completion_timeout(&adapter->init_done,
985 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -0500986 netdev_warn(netdev, "Login timed out, retrying...\n");
987 retry = true;
988 adapter->init_done_rc = 0;
989 retry_count++;
990 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500991 }
992
Thomas Falcondff515a32020-06-15 10:29:23 -0500993 if (adapter->init_done_rc == ABORTED) {
994 netdev_warn(netdev, "Login aborted, retrying...\n");
995 retry = true;
996 adapter->init_done_rc = 0;
997 retry_count++;
998 /* FW or device may be busy, so
999 * wait a bit before retrying login
1000 */
1001 msleep(500);
1002 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001003 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06001004 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -05001005
Thomas Falconeb110412018-05-24 14:37:53 -05001006 retry = true;
1007 netdev_dbg(netdev,
1008 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001009 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -05001010 reinit_completion(&adapter->init_done);
Lijun Pan491099a2020-09-27 20:13:26 -05001011 send_query_cap(adapter);
John Allenbd0b6722017-03-17 17:13:40 -05001012 if (!wait_for_completion_timeout(&adapter->init_done,
1013 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001014 netdev_warn(netdev,
1015 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -05001016 return -1;
1017 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001018
Thomas Falcon4d96f122017-08-01 15:04:36 -05001019 rc = init_sub_crqs(adapter);
1020 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001021 netdev_warn(netdev,
1022 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -05001023 return -1;
1024 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001025
Thomas Falcon4d96f122017-08-01 15:04:36 -05001026 rc = init_sub_crq_irqs(adapter);
1027 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001028 netdev_warn(netdev,
1029 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -05001030 return -1;
1031 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05001032 } else if (adapter->init_done_rc) {
1033 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -05001034 return -1;
1035 }
Thomas Falconeb110412018-05-24 14:37:53 -05001036 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -05001037
Thomas Falcon62740e92019-05-09 23:13:43 -05001038 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -06001039
Lijun Pan0666ef72021-04-12 02:41:28 -05001040 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
John Allena57a5d22017-03-17 17:13:41 -05001041 return 0;
1042}
1043
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06001044static void release_login_buffer(struct ibmvnic_adapter *adapter)
1045{
1046 kfree(adapter->login_buf);
1047 adapter->login_buf = NULL;
1048}
1049
1050static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1051{
1052 kfree(adapter->login_rsp_buf);
1053 adapter->login_rsp_buf = NULL;
1054}
1055
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001056static void release_resources(struct ibmvnic_adapter *adapter)
1057{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001058 release_vpd_data(adapter);
1059
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001060 release_tx_pools(adapter);
1061 release_rx_pools(adapter);
1062
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001063 release_napi(adapter);
Lijun Pana0c8be52020-12-19 15:39:19 -06001064 release_login_buffer(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06001065 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -04001066}
1067
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001068static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1069{
1070 struct net_device *netdev = adapter->netdev;
Dany Madden98c41f02020-11-25 18:04:32 -06001071 unsigned long timeout = msecs_to_jiffies(20000);
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001072 union ibmvnic_crq crq;
1073 bool resend;
1074 int rc;
1075
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001076 netdev_dbg(netdev, "setting link state %d\n", link_state);
1077
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001078 memset(&crq, 0, sizeof(crq));
1079 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1080 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1081 crq.logical_link_state.link_state = link_state;
1082
1083 do {
1084 resend = false;
1085
1086 reinit_completion(&adapter->init_done);
1087 rc = ibmvnic_send_crq(adapter, &crq);
1088 if (rc) {
1089 netdev_err(netdev, "Failed to set link state\n");
1090 return rc;
1091 }
1092
1093 if (!wait_for_completion_timeout(&adapter->init_done,
1094 timeout)) {
1095 netdev_err(netdev, "timeout setting link state\n");
1096 return -1;
1097 }
1098
Lijun Pan4c5f6af2020-08-19 17:52:23 -05001099 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001100 /* Partuial success, delay and re-send */
1101 mdelay(1000);
1102 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -05001103 } else if (adapter->init_done_rc) {
1104 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1105 adapter->init_done_rc);
1106 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001107 }
1108 } while (resend);
1109
1110 return 0;
1111}
1112
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001113static int set_real_num_queues(struct net_device *netdev)
1114{
1115 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1116 int rc;
1117
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001118 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1119 adapter->req_tx_queues, adapter->req_rx_queues);
1120
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001121 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1122 if (rc) {
1123 netdev_err(netdev, "failed to set the number of tx queues\n");
1124 return rc;
1125 }
1126
1127 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1128 if (rc)
1129 netdev_err(netdev, "failed to set the number of rx queues\n");
1130
1131 return rc;
1132}
1133
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001134static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1135{
1136 struct device *dev = &adapter->vdev->dev;
1137 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001138 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001139 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001140
1141 if (adapter->vpd->buff)
1142 len = adapter->vpd->len;
1143
Thomas Falconff25dcb2019-11-25 17:12:56 -06001144 mutex_lock(&adapter->fw_lock);
1145 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001146 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001147
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001148 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1149 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001150 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001151 if (rc) {
1152 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001153 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001154 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001155
1156 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1157 if (rc) {
1158 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001159 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001160 return rc;
1161 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001162 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001163
1164 if (!adapter->vpd->len)
1165 return -ENODATA;
1166
1167 if (!adapter->vpd->buff)
1168 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1169 else if (adapter->vpd->len != len)
1170 adapter->vpd->buff =
1171 krealloc(adapter->vpd->buff,
1172 adapter->vpd->len, GFP_KERNEL);
1173
1174 if (!adapter->vpd->buff) {
1175 dev_err(dev, "Could allocate VPD buffer\n");
1176 return -ENOMEM;
1177 }
1178
1179 adapter->vpd->dma_addr =
1180 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1181 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001182 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001183 dev_err(dev, "Could not map VPD buffer\n");
1184 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001185 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001186 return -ENOMEM;
1187 }
1188
Thomas Falconff25dcb2019-11-25 17:12:56 -06001189 mutex_lock(&adapter->fw_lock);
1190 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001191 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001192
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001193 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1194 crq.get_vpd.cmd = GET_VPD;
1195 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1196 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001197 rc = ibmvnic_send_crq(adapter, &crq);
1198 if (rc) {
1199 kfree(adapter->vpd->buff);
1200 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001201 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001202 return rc;
1203 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001204
1205 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1206 if (rc) {
1207 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1208 kfree(adapter->vpd->buff);
1209 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001210 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001211 return rc;
1212 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001213
Thomas Falconff25dcb2019-11-25 17:12:56 -06001214 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001215 return 0;
1216}
1217
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001218static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001219{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001220 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001221 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001222
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001223 rc = set_real_num_queues(netdev);
1224 if (rc)
1225 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001226
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001227 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1228 if (!adapter->vpd)
1229 return -ENOMEM;
1230
John Allen69d08dc2018-01-18 16:27:58 -06001231 /* Vital Product Data (VPD) */
1232 rc = ibmvnic_get_vpd(adapter);
1233 if (rc) {
1234 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1235 return rc;
1236 }
1237
Thomas Falcon032c5e82015-12-21 11:26:06 -06001238 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001239
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001240 rc = init_napi(adapter);
1241 if (rc)
1242 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001243
Lijun Pan69980d02020-09-27 20:13:28 -05001244 send_query_map(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001245
1246 rc = init_rx_pools(netdev);
1247 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001248 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001249
Nathan Fontenotc657e322017-03-30 02:49:06 -04001250 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001251 return rc;
1252}
1253
Nathan Fontenoted651a12017-05-03 14:04:38 -04001254static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001255{
1256 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001257 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001258 int i, rc;
1259
Nathan Fontenot90c80142017-05-03 14:04:32 -04001260 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001261 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001262 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001263
Thomas Falcon032c5e82015-12-21 11:26:06 -06001264 /* We're ready to receive frames, enable the sub-crq interrupts and
1265 * set the logical link state to up
1266 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001267 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001268 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001269 if (prev_state == VNIC_CLOSED)
1270 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001271 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001272 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001273
Nathan Fontenoted651a12017-05-03 14:04:38 -04001274 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001275 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001276 if (prev_state == VNIC_CLOSED)
1277 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001278 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Thomas Falcon0d973382020-11-18 19:12:19 -06001279 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
Nathan Fontenoted651a12017-05-03 14:04:38 -04001280 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001281
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001282 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001283 if (rc) {
Lijun Pan0775ebc2021-04-14 02:46:14 -05001284 ibmvnic_napi_disable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001285 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001286 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001287 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001288
Nathan Fontenoted651a12017-05-03 14:04:38 -04001289 netif_tx_start_all_queues(netdev);
1290
Dany Madden2ca220f2021-06-23 21:13:11 -07001291 if (prev_state == VNIC_CLOSED) {
1292 for (i = 0; i < adapter->req_rx_queues; i++)
1293 napi_schedule(&adapter->napi[i]);
1294 }
1295
Nathan Fontenoted651a12017-05-03 14:04:38 -04001296 adapter->state = VNIC_OPEN;
1297 return rc;
1298}
1299
1300static int ibmvnic_open(struct net_device *netdev)
1301{
1302 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001303 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001304
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001305 ASSERT_RTNL();
1306
1307 /* If device failover is pending or we are about to reset, just set
1308 * device state and return. Device operation will be handled by reset
1309 * routine.
1310 *
1311 * It should be safe to overwrite the adapter->state here. Since
1312 * we hold the rtnl, either the reset has not actually started or
1313 * the rtnl got dropped during the set_link_state() in do_reset().
1314 * In the former case, no one else is changing the state (again we
1315 * have the rtnl) and in the latter case, do_reset() will detect and
1316 * honor our setting below.
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001317 */
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001318 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
Lijun Pan0666ef72021-04-12 02:41:28 -05001319 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1320 adapter_state_to_string(adapter->state),
1321 adapter->failover_pending);
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001322 adapter->state = VNIC_OPEN;
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001323 rc = 0;
1324 goto out;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001325 }
1326
Nathan Fontenoted651a12017-05-03 14:04:38 -04001327 if (adapter->state != VNIC_CLOSED) {
1328 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001329 if (rc)
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001330 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001331
1332 rc = init_resources(adapter);
1333 if (rc) {
1334 netdev_err(netdev, "failed to initialize resources\n");
1335 release_resources(adapter);
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001336 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001337 }
1338 }
1339
1340 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001341
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001342out:
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001343 /* If open failed and there is a pending failover or in-progress reset,
1344 * set device state and return. Device operation will be handled by
1345 * reset routine. See also comments above regarding rtnl.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001346 */
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08001347 if (rc &&
1348 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001349 adapter->state = VNIC_OPEN;
1350 rc = 0;
1351 }
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001352 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001353}
1354
Thomas Falcond0869c02018-02-13 18:23:43 -06001355static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1356{
1357 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001358 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001359 u64 rx_entries;
1360 int rx_scrqs;
1361 int i, j;
1362
1363 if (!adapter->rx_pool)
1364 return;
1365
Thomas Falcon660e3092018-04-20 14:25:32 -05001366 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001367 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1368
1369 /* Free any remaining skbs in the rx buffer pools */
1370 for (i = 0; i < rx_scrqs; i++) {
1371 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001372 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001373 continue;
1374
1375 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1376 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001377 rx_buff = &rx_pool->rx_buff[j];
1378 if (rx_buff && rx_buff->skb) {
1379 dev_kfree_skb_any(rx_buff->skb);
1380 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001381 }
1382 }
1383 }
1384}
1385
Thomas Falcone9e1e972018-03-16 20:00:30 -05001386static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1387 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001388{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001389 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001390 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001391 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001392
Dan Carpenter050e85c2018-03-23 14:36:15 +03001393 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001394 return;
1395
1396 tx_entries = tx_pool->num_buffers;
1397
1398 for (i = 0; i < tx_entries; i++) {
1399 tx_buff = &tx_pool->tx_buff[i];
1400 if (tx_buff && tx_buff->skb) {
1401 dev_kfree_skb_any(tx_buff->skb);
1402 tx_buff->skb = NULL;
1403 }
1404 }
1405}
1406
1407static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1408{
1409 int tx_scrqs;
1410 int i;
1411
1412 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001413 return;
1414
Thomas Falcon660e3092018-04-20 14:25:32 -05001415 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001416
1417 /* Free any remaining skbs in the tx buffer pools */
1418 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001419 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001420 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1421 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001422 }
1423}
1424
John Allen6095e592018-03-30 13:44:21 -05001425static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001426{
John Allen6095e592018-03-30 13:44:21 -05001427 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001428 int i;
1429
Nathan Fontenot46293b92017-05-03 14:05:02 -04001430 if (adapter->tx_scrq) {
1431 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001432 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001433 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001434 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001435 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001436 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001437 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001438 }
1439
Nathan Fontenot46293b92017-05-03 14:05:02 -04001440 if (adapter->rx_scrq) {
1441 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001442 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001443 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001444 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001445 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001446 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001447 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001448 }
1449 }
John Allen6095e592018-03-30 13:44:21 -05001450}
1451
1452static void ibmvnic_cleanup(struct net_device *netdev)
1453{
1454 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1455
1456 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001457 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001458 netif_tx_disable(netdev);
1459 else
1460 netif_tx_stop_all_queues(netdev);
1461
1462 ibmvnic_napi_disable(adapter);
1463 ibmvnic_disable_irqs(adapter);
1464
Thomas Falcond0869c02018-02-13 18:23:43 -06001465 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001466 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001467}
1468
1469static int __ibmvnic_close(struct net_device *netdev)
1470{
1471 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1472 int rc = 0;
1473
1474 adapter->state = VNIC_CLOSING;
1475 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
Nathan Fontenot90c80142017-05-03 14:04:32 -04001476 adapter->state = VNIC_CLOSED;
Sukadev Bhattiprolud4083d32021-02-10 17:41:43 -08001477 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001478}
1479
Nathan Fontenoted651a12017-05-03 14:04:38 -04001480static int ibmvnic_close(struct net_device *netdev)
1481{
1482 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1483 int rc;
1484
Lijun Pan0666ef72021-04-12 02:41:28 -05001485 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1486 adapter_state_to_string(adapter->state),
1487 adapter->failover_pending,
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08001488 adapter->force_reset_recovery);
1489
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001490 /* If device failover is pending, just set device state and return.
1491 * Device operation will be handled by reset routine.
1492 */
1493 if (adapter->failover_pending) {
1494 adapter->state = VNIC_CLOSED;
1495 return 0;
1496 }
1497
Nathan Fontenoted651a12017-05-03 14:04:38 -04001498 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001499 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001500
1501 return rc;
1502}
1503
Thomas Falconad7775d2016-04-01 17:20:34 -05001504/**
1505 * build_hdr_data - creates L2/L3/L4 header data buffer
Lee Jones80708602021-01-15 20:09:03 +00001506 * @hdr_field: bitfield determining needed headers
1507 * @skb: socket buffer
1508 * @hdr_len: array of header lengths
1509 * @hdr_data: buffer to write the header to
Thomas Falconad7775d2016-04-01 17:20:34 -05001510 *
1511 * Reads hdr_field to determine which headers are needed by firmware.
1512 * Builds a buffer containing these headers. Saves individual header
1513 * lengths and total buffer length to be used to build descriptors.
1514 */
1515static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1516 int *hdr_len, u8 *hdr_data)
1517{
1518 int len = 0;
1519 u8 *hdr;
1520
Thomas Falconda75e3b2018-03-12 11:51:02 -05001521 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1522 hdr_len[0] = sizeof(struct vlan_ethhdr);
1523 else
1524 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001525
1526 if (skb->protocol == htons(ETH_P_IP)) {
1527 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1528 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1529 hdr_len[2] = tcp_hdrlen(skb);
1530 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1531 hdr_len[2] = sizeof(struct udphdr);
1532 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1533 hdr_len[1] = sizeof(struct ipv6hdr);
1534 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1535 hdr_len[2] = tcp_hdrlen(skb);
1536 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1537 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001538 } else if (skb->protocol == htons(ETH_P_ARP)) {
1539 hdr_len[1] = arp_hdr_len(skb->dev);
1540 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001541 }
1542
1543 memset(hdr_data, 0, 120);
1544 if ((hdr_field >> 6) & 1) {
1545 hdr = skb_mac_header(skb);
1546 memcpy(hdr_data, hdr, hdr_len[0]);
1547 len += hdr_len[0];
1548 }
1549
1550 if ((hdr_field >> 5) & 1) {
1551 hdr = skb_network_header(skb);
1552 memcpy(hdr_data + len, hdr, hdr_len[1]);
1553 len += hdr_len[1];
1554 }
1555
1556 if ((hdr_field >> 4) & 1) {
1557 hdr = skb_transport_header(skb);
1558 memcpy(hdr_data + len, hdr, hdr_len[2]);
1559 len += hdr_len[2];
1560 }
1561 return len;
1562}
1563
1564/**
1565 * create_hdr_descs - create header and header extension descriptors
Lee Jones80708602021-01-15 20:09:03 +00001566 * @hdr_field: bitfield determining needed headers
1567 * @hdr_data: buffer containing header data
1568 * @len: length of data buffer
1569 * @hdr_len: array of individual header lengths
1570 * @scrq_arr: descriptor array
Thomas Falconad7775d2016-04-01 17:20:34 -05001571 *
1572 * Creates header and, if needed, header extension descriptors and
1573 * places them in a descriptor array, scrq_arr
1574 */
1575
Thomas Falcon2de09682017-10-16 10:02:11 -05001576static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1577 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001578{
1579 union sub_crq hdr_desc;
1580 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001581 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001582 u8 *data, *cur;
1583 int tmp;
1584
1585 while (tmp_len > 0) {
1586 cur = hdr_data + len - tmp_len;
1587
1588 memset(&hdr_desc, 0, sizeof(hdr_desc));
1589 if (cur != hdr_data) {
1590 data = hdr_desc.hdr_ext.data;
1591 tmp = tmp_len > 29 ? 29 : tmp_len;
1592 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1593 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1594 hdr_desc.hdr_ext.len = tmp;
1595 } else {
1596 data = hdr_desc.hdr.data;
1597 tmp = tmp_len > 24 ? 24 : tmp_len;
1598 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1599 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1600 hdr_desc.hdr.len = tmp;
1601 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1602 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1603 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1604 hdr_desc.hdr.flag = hdr_field << 1;
1605 }
1606 memcpy(data, cur, tmp);
1607 tmp_len -= tmp;
1608 *scrq_arr = hdr_desc;
1609 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001610 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001611 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001612
1613 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001614}
1615
1616/**
1617 * build_hdr_descs_arr - build a header descriptor array
Lijun Pan73214a62021-06-11 10:43:39 -05001618 * @skb: tx socket buffer
1619 * @indir_arr: indirect array
Lee Jones80708602021-01-15 20:09:03 +00001620 * @num_entries: number of descriptors to be sent
1621 * @hdr_field: bit field determining which headers will be sent
Thomas Falconad7775d2016-04-01 17:20:34 -05001622 *
1623 * This function will build a TX descriptor array with applicable
1624 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1625 */
1626
Thomas Falconc62aa372020-11-18 19:12:20 -06001627static void build_hdr_descs_arr(struct sk_buff *skb,
1628 union sub_crq *indir_arr,
Thomas Falconad7775d2016-04-01 17:20:34 -05001629 int *num_entries, u8 hdr_field)
1630{
1631 int hdr_len[3] = {0, 0, 0};
Thomas Falconc62aa372020-11-18 19:12:20 -06001632 u8 hdr_data[140] = {0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001633 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001634
Thomas Falconc62aa372020-11-18 19:12:20 -06001635 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1636 hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001637 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconc62aa372020-11-18 19:12:20 -06001638 indir_arr + 1);
Thomas Falconad7775d2016-04-01 17:20:34 -05001639}
1640
Thomas Falcon1f247a62018-03-12 11:51:04 -05001641static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1642 struct net_device *netdev)
1643{
1644 /* For some backing devices, mishandling of small packets
1645 * can result in a loss of connection or TX stall. Device
1646 * architects recommend that no packet should be smaller
1647 * than the minimum MTU value provided to the driver, so
1648 * pad any packets to that length
1649 */
1650 if (skb->len < netdev->min_mtu)
1651 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001652
1653 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001654}
1655
Thomas Falcon0d973382020-11-18 19:12:19 -06001656static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1657 struct ibmvnic_sub_crq_queue *tx_scrq)
1658{
1659 struct ibmvnic_ind_xmit_queue *ind_bufp;
1660 struct ibmvnic_tx_buff *tx_buff;
1661 struct ibmvnic_tx_pool *tx_pool;
1662 union sub_crq tx_scrq_entry;
1663 int queue_num;
1664 int entries;
1665 int index;
1666 int i;
1667
1668 ind_bufp = &tx_scrq->ind_buf;
1669 entries = (u64)ind_bufp->index;
1670 queue_num = tx_scrq->pool_index;
1671
1672 for (i = entries - 1; i >= 0; --i) {
1673 tx_scrq_entry = ind_bufp->indir_arr[i];
1674 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1675 continue;
1676 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1677 if (index & IBMVNIC_TSO_POOL_MASK) {
1678 tx_pool = &adapter->tso_pool[queue_num];
1679 index &= ~IBMVNIC_TSO_POOL_MASK;
1680 } else {
1681 tx_pool = &adapter->tx_pool[queue_num];
1682 }
1683 tx_pool->free_map[tx_pool->consumer_index] = index;
1684 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1685 tx_pool->num_buffers - 1 :
1686 tx_pool->consumer_index - 1;
1687 tx_buff = &tx_pool->tx_buff[index];
1688 adapter->netdev->stats.tx_packets--;
1689 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1690 adapter->tx_stats_buffers[queue_num].packets--;
1691 adapter->tx_stats_buffers[queue_num].bytes -=
1692 tx_buff->skb->len;
1693 dev_kfree_skb_any(tx_buff->skb);
1694 tx_buff->skb = NULL;
1695 adapter->netdev->stats.tx_dropped++;
1696 }
1697 ind_bufp->index = 0;
1698 if (atomic_sub_return(entries, &tx_scrq->used) <=
1699 (adapter->req_tx_entries_per_subcrq / 2) &&
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -07001700 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1701 !test_bit(0, &adapter->resetting)) {
Thomas Falcon0d973382020-11-18 19:12:19 -06001702 netif_wake_subqueue(adapter->netdev, queue_num);
1703 netdev_dbg(adapter->netdev, "Started queue %d\n",
1704 queue_num);
1705 }
1706}
1707
1708static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1709 struct ibmvnic_sub_crq_queue *tx_scrq)
1710{
1711 struct ibmvnic_ind_xmit_queue *ind_bufp;
1712 u64 dma_addr;
1713 u64 entries;
1714 u64 handle;
1715 int rc;
1716
1717 ind_bufp = &tx_scrq->ind_buf;
1718 dma_addr = (u64)ind_bufp->indir_dma;
1719 entries = (u64)ind_bufp->index;
1720 handle = tx_scrq->handle;
1721
1722 if (!entries)
1723 return 0;
1724 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1725 if (rc)
1726 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1727 else
1728 ind_bufp->index = 0;
1729 return 0;
1730}
1731
YueHaibing94b2bb22018-09-18 14:35:47 +08001732static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001733{
1734 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1735 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001736 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001737 struct device *dev = &adapter->vdev->dev;
Thomas Falcon0d973382020-11-18 19:12:19 -06001738 struct ibmvnic_ind_xmit_queue *ind_bufp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001739 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001740 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001741 struct ibmvnic_tx_pool *tx_pool;
1742 unsigned int tx_send_failed = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001743 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001744 unsigned int tx_map_failed = 0;
Thomas Falconc62aa372020-11-18 19:12:20 -06001745 union sub_crq indir_arr[16];
Thomas Falcon032c5e82015-12-21 11:26:06 -06001746 unsigned int tx_dropped = 0;
1747 unsigned int tx_packets = 0;
1748 unsigned int tx_bytes = 0;
1749 dma_addr_t data_dma_addr;
1750 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001751 unsigned long lpar_rc;
1752 union sub_crq tx_crq;
1753 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001754 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001755 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001756 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001757 u8 proto = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001758
1759 tx_scrq = adapter->tx_scrq[queue_num];
1760 txq = netdev_get_tx_queue(netdev, queue_num);
1761 ind_bufp = &tx_scrq->ind_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001762
Juliet Kim7ed5b312019-09-20 16:11:23 -04001763 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001764 if (!netif_subqueue_stopped(netdev, skb))
1765 netif_stop_subqueue(netdev, queue_num);
1766 dev_kfree_skb_any(skb);
1767
Thomas Falcon032c5e82015-12-21 11:26:06 -06001768 tx_send_failed++;
1769 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001770 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001771 goto out;
1772 }
1773
Thomas Falcon7083a452018-03-12 21:05:26 -05001774 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001775 tx_dropped++;
1776 tx_send_failed++;
1777 ret = NETDEV_TX_OK;
Thomas Falcon0d973382020-11-18 19:12:19 -06001778 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon1f247a62018-03-12 11:51:04 -05001779 goto out;
1780 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001781 if (skb_is_gso(skb))
1782 tx_pool = &adapter->tso_pool[queue_num];
1783 else
1784 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001785
Thomas Falcon032c5e82015-12-21 11:26:06 -06001786 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001787
Thomas Falcon86b61a52018-03-16 20:00:29 -05001788 if (index == IBMVNIC_INVALID_MAP) {
1789 dev_kfree_skb_any(skb);
1790 tx_send_failed++;
1791 tx_dropped++;
Sukadev Bhattiprolubb553622021-07-20 19:34:39 -07001792 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon86b61a52018-03-16 20:00:29 -05001793 ret = NETDEV_TX_OK;
1794 goto out;
1795 }
1796
1797 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1798
Thomas Falcon06b3e352018-03-16 20:00:28 -05001799 offset = index * tx_pool->buf_size;
1800 dst = tx_pool->long_term_buff.buff + offset;
1801 memset(dst, 0, tx_pool->buf_size);
1802 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001803
Thomas Falcon15482052017-10-17 12:36:54 -05001804 if (skb_shinfo(skb)->nr_frags) {
1805 int cur, i;
1806
1807 /* Copy the head */
1808 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1809 cur = skb_headlen(skb);
1810
1811 /* Copy the frags */
1812 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1813 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1814
Christophe JAILLETc3105f82021-04-04 10:54:37 +02001815 memcpy(dst + cur, skb_frag_address(frag),
1816 skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001817 cur += skb_frag_size(frag);
1818 }
1819 } else {
1820 skb_copy_from_linear_data(skb, dst, skb->len);
1821 }
1822
Lijun Pan42557da2021-02-12 20:48:40 -06001823 /* post changes to long_term_buff *dst before VIOS accessing it */
1824 dma_wmb();
1825
Thomas Falcon032c5e82015-12-21 11:26:06 -06001826 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001827 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001828
1829 tx_buff = &tx_pool->tx_buff[index];
1830 tx_buff->skb = skb;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001831 tx_buff->index = index;
1832 tx_buff->pool_index = queue_num;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001833
1834 memset(&tx_crq, 0, sizeof(tx_crq));
1835 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1836 tx_crq.v1.type = IBMVNIC_TX_DESC;
1837 tx_crq.v1.n_crq_elem = 1;
1838 tx_crq.v1.n_sge = 1;
1839 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001840
Thomas Falconfdb06102017-10-17 12:36:55 -05001841 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001842 tx_crq.v1.correlator =
1843 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001844 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001845 tx_crq.v1.correlator = cpu_to_be32(index);
1846 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001847 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1848 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1849
Michał Mirosławe84b4792018-11-07 17:50:52 +01001850 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001851 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1852 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1853 }
1854
1855 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001856 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1857 proto = ip_hdr(skb)->protocol;
1858 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1859 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1860 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001861 }
1862
Thomas Falcona0dca102018-01-18 19:29:48 -06001863 if (proto == IPPROTO_TCP)
1864 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1865 else if (proto == IPPROTO_UDP)
1866 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1867
Thomas Falconad7775d2016-04-01 17:20:34 -05001868 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001869 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001870 hdrs += 2;
1871 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001872 if (skb_is_gso(skb)) {
1873 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1874 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1875 hdrs += 2;
1876 }
Thomas Falcon0d973382020-11-18 19:12:19 -06001877
1878 if ((*hdrs >> 7) & 1)
Thomas Falconc62aa372020-11-18 19:12:20 -06001879 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
Thomas Falcon0d973382020-11-18 19:12:19 -06001880
1881 tx_crq.v1.n_crq_elem = num_entries;
1882 tx_buff->num_entries = num_entries;
1883 /* flush buffer if current entry can not fit */
1884 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1885 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1886 if (lpar_rc != H_SUCCESS)
1887 goto tx_flush_err;
Thomas Falconad7775d2016-04-01 17:20:34 -05001888 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001889
Thomas Falconc62aa372020-11-18 19:12:20 -06001890 indir_arr[0] = tx_crq;
1891 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
Thomas Falcon0d973382020-11-18 19:12:19 -06001892 num_entries * sizeof(struct ibmvnic_generic_scrq));
1893 ind_bufp->index += num_entries;
1894 if (__netdev_tx_sent_queue(txq, skb->len,
1895 netdev_xmit_more() &&
1896 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1897 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1898 if (lpar_rc != H_SUCCESS)
1899 goto tx_err;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001900 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001901
Thomas Falconffc385b2018-02-18 10:08:41 -06001902 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001903 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001904 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001905 netif_stop_subqueue(netdev, queue_num);
1906 }
1907
Thomas Falcon032c5e82015-12-21 11:26:06 -06001908 tx_packets++;
1909 tx_bytes += skb->len;
1910 txq->trans_start = jiffies;
1911 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001912 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001913
Thomas Falcon0d973382020-11-18 19:12:19 -06001914tx_flush_err:
1915 dev_kfree_skb_any(skb);
1916 tx_buff->skb = NULL;
1917 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1918 tx_pool->num_buffers - 1 :
1919 tx_pool->consumer_index - 1;
1920 tx_dropped++;
1921tx_err:
1922 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1923 dev_err_ratelimited(dev, "tx: send failed\n");
1924
1925 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1926 /* Disable TX and report carrier off if queue is closed
1927 * or pending failover.
1928 * Firmware guarantees that a signal will be sent to the
1929 * driver, triggering a reset or some other action.
1930 */
1931 netif_tx_stop_all_queues(netdev);
1932 netif_carrier_off(netdev);
1933 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001934out:
1935 netdev->stats.tx_dropped += tx_dropped;
1936 netdev->stats.tx_bytes += tx_bytes;
1937 netdev->stats.tx_packets += tx_packets;
1938 adapter->tx_send_failed += tx_send_failed;
1939 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001940 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1941 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1942 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001943
1944 return ret;
1945}
1946
1947static void ibmvnic_set_multi(struct net_device *netdev)
1948{
1949 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1950 struct netdev_hw_addr *ha;
1951 union ibmvnic_crq crq;
1952
1953 memset(&crq, 0, sizeof(crq));
1954 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1955 crq.request_capability.cmd = REQUEST_CAPABILITY;
1956
1957 if (netdev->flags & IFF_PROMISC) {
1958 if (!adapter->promisc_supported)
1959 return;
1960 } else {
1961 if (netdev->flags & IFF_ALLMULTI) {
1962 /* Accept all multicast */
1963 memset(&crq, 0, sizeof(crq));
1964 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1965 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1966 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1967 ibmvnic_send_crq(adapter, &crq);
1968 } else if (netdev_mc_empty(netdev)) {
1969 /* Reject all multicast */
1970 memset(&crq, 0, sizeof(crq));
1971 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1972 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1973 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1974 ibmvnic_send_crq(adapter, &crq);
1975 } else {
1976 /* Accept one or more multicast(s) */
1977 netdev_for_each_mc_addr(ha, netdev) {
1978 memset(&crq, 0, sizeof(crq));
1979 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1980 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1981 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1982 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1983 ha->addr);
1984 ibmvnic_send_crq(adapter, &crq);
1985 }
1986 }
1987 }
1988}
1989
Thomas Falcon62740e92019-05-09 23:13:43 -05001990static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001991{
1992 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001993 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001994 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001995
Thomas Falcon62740e92019-05-09 23:13:43 -05001996 if (!is_valid_ether_addr(dev_addr)) {
1997 rc = -EADDRNOTAVAIL;
1998 goto err;
1999 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002000
2001 memset(&crq, 0, sizeof(crq));
2002 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2003 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05002004 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06002005
Thomas Falconff25dcb2019-11-25 17:12:56 -06002006 mutex_lock(&adapter->fw_lock);
2007 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06002008 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06002009
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002010 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05002011 if (rc) {
2012 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06002013 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002014 goto err;
2015 }
2016
Thomas Falcon476d96c2019-11-25 17:12:55 -06002017 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002018 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06002019 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05002020 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06002021 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002022 goto err;
2023 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06002024 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05002025 return 0;
2026err:
2027 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2028 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002029}
2030
John Allenc26eba02017-10-26 16:23:25 -05002031static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2032{
2033 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2034 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06002035 int rc;
John Allenc26eba02017-10-26 16:23:25 -05002036
Thomas Falcon62740e92019-05-09 23:13:43 -05002037 rc = 0;
Lijun Pan8fc36722020-10-27 17:04:56 -05002038 if (!is_valid_ether_addr(addr->sa_data))
2039 return -EADDRNOTAVAIL;
2040
Jiri Wiesner67eb2112021-03-04 17:18:28 +01002041 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2042 if (adapter->state != VNIC_PROBED)
Thomas Falcon62740e92019-05-09 23:13:43 -05002043 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
John Allenc26eba02017-10-26 16:23:25 -05002044
Thomas Falconf8136142018-01-29 13:45:05 -06002045 return rc;
John Allenc26eba02017-10-26 16:23:25 -05002046}
2047
Lijun Pancaee7bf2021-04-12 02:41:27 -05002048static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2049{
2050 switch (reason) {
2051 case VNIC_RESET_FAILOVER:
2052 return "FAILOVER";
2053 case VNIC_RESET_MOBILITY:
2054 return "MOBILITY";
2055 case VNIC_RESET_FATAL:
2056 return "FATAL";
2057 case VNIC_RESET_NON_FATAL:
2058 return "NON_FATAL";
2059 case VNIC_RESET_TIMEOUT:
2060 return "TIMEOUT";
2061 case VNIC_RESET_CHANGE_PARAM:
2062 return "CHANGE_PARAM";
Lijun Pan822ebc22021-06-11 10:35:37 -05002063 case VNIC_RESET_PASSIVE_INIT:
2064 return "PASSIVE_INIT";
Lijun Pancaee7bf2021-04-12 02:41:27 -05002065 }
Michal Suchanek07b5dc12021-05-20 08:50:34 +02002066 return "UNKNOWN";
Lijun Pancaee7bf2021-04-12 02:41:27 -05002067}
2068
Lee Jones80708602021-01-15 20:09:03 +00002069/*
Nathan Fontenoted651a12017-05-03 14:04:38 -04002070 * do_reset returns zero if we are able to keep processing reset events, or
2071 * non-zero if we hit a fatal error and must halt.
2072 */
2073static int do_reset(struct ibmvnic_adapter *adapter,
2074 struct ibmvnic_rwi *rwi, u32 reset_state)
2075{
John Allen896d8692018-01-18 16:26:31 -06002076 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002077 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002078 struct net_device *netdev = adapter->netdev;
Lijun Pand3a6abc2021-04-14 02:46:15 -05002079 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002080
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002081 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002082 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2083 adapter_state_to_string(adapter->state),
2084 adapter->failover_pending,
2085 reset_reason_to_string(rwi->reset_reason),
2086 adapter_state_to_string(reset_state));
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002087
Lijun Pan3f5ec372021-01-06 15:35:14 -06002088 adapter->reset_reason = rwi->reset_reason;
2089 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2090 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2091 rtnl_lock();
2092
Lijun Panbab08be2021-02-11 00:43:19 -06002093 /* Now that we have the rtnl lock, clear any pending failover.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002094 * This will ensure ibmvnic_open() has either completed or will
2095 * block until failover is complete.
2096 */
2097 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2098 adapter->failover_pending = false;
Juliet Kimb27507b2019-09-20 16:11:22 -04002099
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002100 /* read the state and check (again) after getting rtnl */
2101 reset_state = adapter->state;
2102
2103 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2104 rc = -EBUSY;
2105 goto out;
2106 }
2107
Nathan Fontenoted651a12017-05-03 14:04:38 -04002108 netif_carrier_off(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002109
John Allen896d8692018-01-18 16:26:31 -06002110 old_num_rx_queues = adapter->req_rx_queues;
2111 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002112 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2113 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06002114
Nathan Fontenot30f79622018-04-06 18:37:06 -05002115 ibmvnic_cleanup(netdev);
2116
Thomas Falcon1f946082019-06-07 16:03:53 -05002117 if (reset_state == VNIC_OPEN &&
2118 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05002119 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Lijun Pan3f5ec372021-01-06 15:35:14 -06002120 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2121 rc = __ibmvnic_close(netdev);
2122 if (rc)
2123 goto out;
2124 } else {
2125 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002126
Lijun Pan3f5ec372021-01-06 15:35:14 -06002127 /* Release the RTNL lock before link state change and
2128 * re-acquire after the link state change to allow
2129 * linkwatch_event to grab the RTNL lock and run during
2130 * a reset.
2131 */
2132 rtnl_unlock();
2133 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2134 rtnl_lock();
2135 if (rc)
2136 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04002137
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002138 if (adapter->state == VNIC_OPEN) {
2139 /* When we dropped rtnl, ibmvnic_open() got
2140 * it and noticed that we are resetting and
2141 * set the adapter state to OPEN. Update our
2142 * new "target" state, and resume the reset
2143 * from VNIC_CLOSING state.
2144 */
2145 netdev_dbg(netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002146 "Open changed state from %s, updating.\n",
2147 adapter_state_to_string(reset_state));
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002148 reset_state = VNIC_OPEN;
2149 adapter->state = VNIC_CLOSING;
2150 }
2151
Lijun Pan3f5ec372021-01-06 15:35:14 -06002152 if (adapter->state != VNIC_CLOSING) {
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002153 /* If someone else changed the adapter state
2154 * when we dropped the rtnl, fail the reset
2155 */
Lijun Pan3f5ec372021-01-06 15:35:14 -06002156 rc = -1;
2157 goto out;
2158 }
Lijun Pan3f5ec372021-01-06 15:35:14 -06002159 adapter->state = VNIC_CLOSED;
Juliet Kimb27507b2019-09-20 16:11:22 -04002160 }
Lijun Pan3f5ec372021-01-06 15:35:14 -06002161 }
Juliet Kimb27507b2019-09-20 16:11:22 -04002162
Lijun Pan3f5ec372021-01-06 15:35:14 -06002163 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2164 release_resources(adapter);
2165 release_sub_crqs(adapter, 1);
2166 release_crq_queue(adapter);
John Allenc26eba02017-10-26 16:23:25 -05002167 }
2168
John Allen8cb31cf2017-05-26 10:30:37 -04002169 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2170 /* remove the closed state so when we call open it appears
2171 * we are coming from the probed state.
2172 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002173 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04002174
Lijun Pan3f5ec372021-01-06 15:35:14 -06002175 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2176 rc = init_crq_queue(adapter);
2177 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002178 rc = ibmvnic_reenable_crq_queue(adapter);
2179 release_sub_crqs(adapter, 1);
2180 } else {
2181 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04002182 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002183 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04002184 if (rc)
2185 netdev_err(adapter->netdev,
2186 "Reset failed to enable interrupts. rc=%d\n",
2187 rc);
2188 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05002189 }
2190
2191 if (rc) {
2192 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04002193 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002194 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05002195 }
2196
Lijun Pan635e4422020-08-19 17:52:26 -05002197 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04002198 if (rc) {
2199 rc = IBMVNIC_INIT_FAILED;
2200 goto out;
2201 }
John Allen8cb31cf2017-05-26 10:30:37 -04002202
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002203 /* If the adapter was in PROBE or DOWN state prior to the reset,
John Allen8cb31cf2017-05-26 10:30:37 -04002204 * exit here.
2205 */
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002206 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002207 rc = 0;
2208 goto out;
2209 }
John Allen8cb31cf2017-05-26 10:30:37 -04002210
2211 rc = ibmvnic_login(netdev);
Lijun Panf78afaa2021-02-11 00:43:20 -06002212 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002213 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002214
Lijun Pan3f5ec372021-01-06 15:35:14 -06002215 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2216 rc = init_resources(adapter);
2217 if (rc)
2218 goto out;
2219 } else if (adapter->req_rx_queues != old_num_rx_queues ||
Juliet Kimb27507b2019-09-20 16:11:22 -04002220 adapter->req_tx_queues != old_num_tx_queues ||
2221 adapter->req_rx_add_entries_per_subcrq !=
2222 old_num_rx_slots ||
2223 adapter->req_tx_entries_per_subcrq !=
Mingming Cao9f134572020-08-25 13:26:41 -04002224 old_num_tx_slots ||
2225 !adapter->rx_pool ||
2226 !adapter->tso_pool ||
2227 !adapter->tx_pool) {
John Allen896d8692018-01-18 16:26:31 -06002228 release_rx_pools(adapter);
2229 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06002230 release_napi(adapter);
2231 release_vpd_data(adapter);
2232
2233 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002234 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002235 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002236
John Allenc26eba02017-10-26 16:23:25 -05002237 } else {
2238 rc = reset_tx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002239 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002240 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
Lijun Pan91dc5d22021-02-11 00:43:22 -06002241 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002242 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002243 }
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002244
John Allenc26eba02017-10-26 16:23:25 -05002245 rc = reset_rx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002246 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002247 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
Lijun Pan91dc5d22021-02-11 00:43:22 -06002248 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002249 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002250 }
John Allenc26eba02017-10-26 16:23:25 -05002251 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002252 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002253 }
John Allene676d812018-03-14 10:41:29 -05002254 adapter->state = VNIC_CLOSED;
2255
Juliet Kimb27507b2019-09-20 16:11:22 -04002256 if (reset_state == VNIC_CLOSED) {
2257 rc = 0;
2258 goto out;
2259 }
John Allene676d812018-03-14 10:41:29 -05002260
Nathan Fontenoted651a12017-05-03 14:04:38 -04002261 rc = __ibmvnic_open(netdev);
2262 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002263 rc = IBMVNIC_OPEN_FAILED;
2264 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002265 }
2266
Thomas Falconbe32a242019-06-07 16:03:54 -05002267 /* refresh device's multicast list */
2268 ibmvnic_set_multi(netdev);
2269
Lijun Pan98025bc2020-11-20 16:40:12 -06002270 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
Lijun Pan6be46662020-12-14 15:19:29 -06002271 adapter->reset_reason == VNIC_RESET_MOBILITY)
2272 __netdev_notify_peers(netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002273
Juliet Kimb27507b2019-09-20 16:11:22 -04002274 rc = 0;
2275
2276out:
Dany Madden0cb4bc62020-11-25 18:04:27 -06002277 /* restore the adapter state if reset failed */
2278 if (rc)
2279 adapter->state = reset_state;
Lijun Pan3f5ec372021-01-06 15:35:14 -06002280 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2281 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2282 rtnl_unlock();
Juliet Kimb27507b2019-09-20 16:11:22 -04002283
Lijun Pan0666ef72021-04-12 02:41:28 -05002284 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2285 adapter_state_to_string(adapter->state),
2286 adapter->failover_pending, rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002287 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002288}
2289
Thomas Falcon2770a792018-05-23 13:38:02 -05002290static int do_hard_reset(struct ibmvnic_adapter *adapter,
2291 struct ibmvnic_rwi *rwi, u32 reset_state)
2292{
2293 struct net_device *netdev = adapter->netdev;
2294 int rc;
2295
Lijun Pancaee7bf2021-04-12 02:41:27 -05002296 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2297 reset_reason_to_string(rwi->reset_reason));
Thomas Falcon2770a792018-05-23 13:38:02 -05002298
Sukadev Bhattiprolu8f1c0fd2021-02-23 21:02:29 -08002299 /* read the state and check (again) after getting rtnl */
2300 reset_state = adapter->state;
2301
2302 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2303 rc = -EBUSY;
2304 goto out;
2305 }
2306
Thomas Falcon2770a792018-05-23 13:38:02 -05002307 netif_carrier_off(netdev);
2308 adapter->reset_reason = rwi->reset_reason;
2309
2310 ibmvnic_cleanup(netdev);
2311 release_resources(adapter);
2312 release_sub_crqs(adapter, 0);
2313 release_crq_queue(adapter);
2314
2315 /* remove the closed state so when we call open it appears
2316 * we are coming from the probed state.
2317 */
2318 adapter->state = VNIC_PROBED;
2319
Thomas Falconbbd669a2019-04-04 18:58:26 -05002320 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002321 rc = init_crq_queue(adapter);
2322 if (rc) {
2323 netdev_err(adapter->netdev,
2324 "Couldn't initialize crq. rc=%d\n", rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002325 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002326 }
2327
Lijun Pan635e4422020-08-19 17:52:26 -05002328 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002329 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002330 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002331
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002332 /* If the adapter was in PROBE or DOWN state prior to the reset,
Thomas Falcon2770a792018-05-23 13:38:02 -05002333 * exit here.
2334 */
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002335 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002336 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002337
2338 rc = ibmvnic_login(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002339 if (rc)
2340 goto out;
Juliet Kima5681e22018-11-19 15:59:22 -06002341
2342 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002343 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002344 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002345
2346 ibmvnic_disable_irqs(adapter);
2347 adapter->state = VNIC_CLOSED;
2348
2349 if (reset_state == VNIC_CLOSED)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002350 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002351
2352 rc = __ibmvnic_open(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002353 if (rc) {
2354 rc = IBMVNIC_OPEN_FAILED;
2355 goto out;
2356 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002357
Lijun Pan6be46662020-12-14 15:19:29 -06002358 __netdev_notify_peers(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002359out:
2360 /* restore adapter state if reset failed */
2361 if (rc)
2362 adapter->state = reset_state;
Lijun Pan0666ef72021-04-12 02:41:28 -05002363 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2364 adapter_state_to_string(adapter->state),
2365 adapter->failover_pending, rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002366 return rc;
Thomas Falcon2770a792018-05-23 13:38:02 -05002367}
2368
Nathan Fontenoted651a12017-05-03 14:04:38 -04002369static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2370{
2371 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002372 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002373
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002374 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002375
2376 if (!list_empty(&adapter->rwi_list)) {
2377 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2378 list);
2379 list_del(&rwi->list);
2380 } else {
2381 rwi = NULL;
2382 }
2383
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002384 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002385 return rwi;
2386}
2387
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002388/**
2389 * do_passive_init - complete probing when partner device is detected.
2390 * @adapter: ibmvnic_adapter struct
2391 *
2392 * If the ibmvnic device does not have a partner device to communicate with at boot
2393 * and that partner device comes online at a later time, this function is called
2394 * to complete the initialization process of ibmvnic device.
2395 * Caller is expected to hold rtnl_lock().
2396 *
2397 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2398 * in the down state.
2399 * Returns 0 upon success and the device is in PROBED state.
2400 */
2401
2402static int do_passive_init(struct ibmvnic_adapter *adapter)
2403{
2404 unsigned long timeout = msecs_to_jiffies(30000);
2405 struct net_device *netdev = adapter->netdev;
2406 struct device *dev = &adapter->vdev->dev;
2407 int rc;
2408
2409 netdev_dbg(netdev, "Partner device found, probing.\n");
2410
2411 adapter->state = VNIC_PROBING;
2412 reinit_completion(&adapter->init_done);
2413 adapter->init_done_rc = 0;
2414 adapter->crq.active = true;
2415
2416 rc = send_crq_init_complete(adapter);
2417 if (rc)
2418 goto out;
2419
2420 rc = send_version_xchg(adapter);
2421 if (rc)
2422 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2423
2424 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2425 dev_err(dev, "Initialization sequence timed out\n");
2426 rc = -ETIMEDOUT;
2427 goto out;
2428 }
2429
2430 rc = init_sub_crqs(adapter);
2431 if (rc) {
2432 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2433 goto out;
2434 }
2435
2436 rc = init_sub_crq_irqs(adapter);
2437 if (rc) {
2438 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2439 goto init_failed;
2440 }
2441
2442 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2443 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2444 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2445
2446 adapter->state = VNIC_PROBED;
2447 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2448
2449 return 0;
2450
2451init_failed:
2452 release_sub_crqs(adapter, 1);
2453out:
2454 adapter->state = VNIC_DOWN;
2455 return rc;
2456}
2457
Nathan Fontenoted651a12017-05-03 14:04:38 -04002458static void __ibmvnic_reset(struct work_struct *work)
2459{
Nathan Fontenoted651a12017-05-03 14:04:38 -04002460 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002461 bool saved_state = false;
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002462 struct ibmvnic_rwi *tmprwi;
2463 struct ibmvnic_rwi *rwi;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002464 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002465 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002466 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002467
2468 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002469
Juliet Kim7ed5b312019-09-20 16:11:23 -04002470 if (test_and_set_bit_lock(0, &adapter->resetting)) {
Lijun Pan870e04a2021-04-13 14:33:39 -05002471 queue_delayed_work(system_long_wq,
2472 &adapter->ibmvnic_delayed_reset,
2473 IBMVNIC_RESET_DELAY);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002474 return;
2475 }
2476
Nathan Fontenoted651a12017-05-03 14:04:38 -04002477 rwi = get_next_rwi(adapter);
2478 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002479 spin_lock_irqsave(&adapter->state_lock, flags);
2480
Thomas Falcon36f10312019-08-27 11:10:04 -05002481 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002482 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002483 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002484 kfree(rwi);
2485 rc = EBUSY;
2486 break;
2487 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002488
Juliet Kim7d7195a2020-03-10 09:23:58 -05002489 if (!saved_state) {
2490 reset_state = adapter->state;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002491 saved_state = true;
2492 }
2493 spin_unlock_irqrestore(&adapter->state_lock, flags);
2494
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06002495 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2496 rtnl_lock();
2497 rc = do_passive_init(adapter);
2498 rtnl_unlock();
2499 if (!rc)
2500 netif_carrier_on(adapter->netdev);
2501 } else if (adapter->force_reset_recovery) {
Lijun Panbab08be2021-02-11 00:43:19 -06002502 /* Since we are doing a hard reset now, clear the
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002503 * failover_pending flag so we don't ignore any
2504 * future MOBILITY or other resets.
2505 */
2506 adapter->failover_pending = false;
2507
Juliet Kimb27507b2019-09-20 16:11:22 -04002508 /* Transport event occurred during previous reset */
2509 if (adapter->wait_for_reset) {
2510 /* Previous was CHANGE_PARAM; caller locked */
2511 adapter->force_reset_recovery = false;
2512 rc = do_hard_reset(adapter, rwi, reset_state);
2513 } else {
2514 rtnl_lock();
2515 adapter->force_reset_recovery = false;
2516 rc = do_hard_reset(adapter, rwi, reset_state);
2517 rtnl_unlock();
2518 }
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002519 if (rc) {
2520 /* give backing device time to settle down */
2521 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002522 "[S:%s] Hard reset failed, waiting 60 secs\n",
2523 adapter_state_to_string(adapter->state));
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002524 set_current_state(TASK_UNINTERRUPTIBLE);
2525 schedule_timeout(60 * HZ);
2526 }
Lijun Pan1f45dc22020-12-23 14:49:04 -06002527 } else {
Thomas Falcon2770a792018-05-23 13:38:02 -05002528 rc = do_reset(adapter, rwi, reset_state);
2529 }
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002530 tmprwi = rwi;
Dany Maddena86d5c62020-11-25 18:04:31 -06002531 adapter->last_reset_time = jiffies;
Dany Madden0cb4bc62020-11-25 18:04:27 -06002532
Dany Madden18f141b2020-11-25 18:04:25 -06002533 if (rc)
2534 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002535
2536 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002537
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002538 /*
2539 * If there is another reset queued, free the previous rwi
2540 * and process the new reset even if previous reset failed
2541 * (the previous reset could have failed because of a fail
2542 * over for instance, so process the fail over).
2543 *
2544 * If there are no resets queued and the previous reset failed,
2545 * the adapter would be in an undefined state. So retry the
2546 * previous reset as a hard reset.
2547 */
2548 if (rwi)
2549 kfree(tmprwi);
2550 else if (rc)
2551 rwi = tmprwi;
2552
Juliet Kim7ed5b312019-09-20 16:11:23 -04002553 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
Sukadev Bhattiprolu4f408e12021-06-30 14:36:17 -04002554 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
Juliet Kim7ed5b312019-09-20 16:11:23 -04002555 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002556 }
2557
John Allenc26eba02017-10-26 16:23:25 -05002558 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002559 adapter->reset_done_rc = rc;
2560 complete(&adapter->reset_done);
2561 }
2562
Juliet Kim7ed5b312019-09-20 16:11:23 -04002563 clear_bit_unlock(0, &adapter->resetting);
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002564
2565 netdev_dbg(adapter->netdev,
Lijun Pan0666ef72021-04-12 02:41:28 -05002566 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2567 adapter_state_to_string(adapter->state),
2568 adapter->force_reset_recovery,
Sukadev Bhattiprolu38bd5ce2020-12-04 18:22:35 -08002569 adapter->wait_for_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002570}
2571
2572static void __ibmvnic_delayed_reset(struct work_struct *work)
2573{
2574 struct ibmvnic_adapter *adapter;
2575
2576 adapter = container_of(work, struct ibmvnic_adapter,
2577 ibmvnic_delayed_reset.work);
2578 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002579}
2580
Thomas Falconaf894d22018-04-06 18:37:04 -05002581static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2582 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002583{
Thomas Falcon2770a792018-05-23 13:38:02 -05002584 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002585 struct ibmvnic_rwi *rwi, *tmp;
2586 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002587 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002588 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002589
Jakub Kicinskib646acd52021-02-16 22:58:44 -08002590 spin_lock_irqsave(&adapter->rwi_lock, flags);
2591
2592 /* If failover is pending don't schedule any other reset.
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002593 * Instead let the failover complete. If there is already a
2594 * a failover reset scheduled, we will detect and drop the
2595 * duplicate reset when walking the ->rwi_list below.
2596 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002597 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002598 adapter->state == VNIC_REMOVED ||
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002599 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002600 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002601 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002602 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002603 }
2604
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002605 if (adapter->state == VNIC_PROBING) {
2606 netdev_warn(netdev, "Adapter reset during probe\n");
Lijun Pan91dc5d22021-02-11 00:43:22 -06002607 adapter->init_done_rc = EAGAIN;
2608 ret = EAGAIN;
Thomas Falconaf894d22018-04-06 18:37:04 -05002609 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002610 }
2611
Wang Hai3e98ae02021-06-10 20:54:17 +08002612 list_for_each_entry(tmp, &adapter->rwi_list, list) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04002613 if (tmp->reset_reason == reason) {
Lijun Pancaee7bf2021-04-12 02:41:27 -05002614 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2615 reset_reason_to_string(reason));
Thomas Falconaf894d22018-04-06 18:37:04 -05002616 ret = EBUSY;
2617 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002618 }
2619 }
2620
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002621 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002622 if (!rwi) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002623 ret = ENOMEM;
2624 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002625 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002626 /* if we just received a transport event,
2627 * flush reset queue and process this reset
2628 */
2629 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2630 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2631 list_del(entry);
2632 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002633 rwi->reset_reason = reason;
2634 list_add_tail(&rwi->list, &adapter->rwi_list);
Lijun Pancaee7bf2021-04-12 02:41:27 -05002635 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2636 reset_reason_to_string(reason));
Lijun Pan870e04a2021-04-13 14:33:39 -05002637 queue_work(system_long_wq, &adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002638
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08002639 ret = 0;
Thomas Falconaf894d22018-04-06 18:37:04 -05002640err:
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08002641 /* ibmvnic_close() below can block, so drop the lock first */
2642 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2643
2644 if (ret == ENOMEM)
2645 ibmvnic_close(netdev);
2646
Thomas Falconaf894d22018-04-06 18:37:04 -05002647 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002648}
2649
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002650static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002651{
2652 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002653
Lijun Pan855a6312020-11-20 16:40:13 -06002654 if (test_bit(0, &adapter->resetting)) {
2655 netdev_err(adapter->netdev,
2656 "Adapter is resetting, skip timeout reset\n");
2657 return;
2658 }
Dany Maddena86d5c62020-11-25 18:04:31 -06002659 /* No queuing up reset until at least 5 seconds (default watchdog val)
2660 * after last reset
2661 */
2662 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2663 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2664 return;
2665 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002666 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002667}
2668
2669static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2670 struct ibmvnic_rx_buff *rx_buff)
2671{
2672 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2673
2674 rx_buff->skb = NULL;
2675
2676 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2677 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2678
2679 atomic_dec(&pool->available);
2680}
2681
2682static int ibmvnic_poll(struct napi_struct *napi, int budget)
2683{
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002684 struct ibmvnic_sub_crq_queue *rx_scrq;
2685 struct ibmvnic_adapter *adapter;
2686 struct net_device *netdev;
2687 int frames_processed;
2688 int scrq_num;
2689
2690 netdev = napi->dev;
2691 adapter = netdev_priv(netdev);
2692 scrq_num = (int)(napi - adapter->napi);
2693 frames_processed = 0;
2694 rx_scrq = adapter->rx_scrq[scrq_num];
Nathan Fontenot152ce472017-05-26 10:30:54 -04002695
Thomas Falcon032c5e82015-12-21 11:26:06 -06002696restart_poll:
2697 while (frames_processed < budget) {
2698 struct sk_buff *skb;
2699 struct ibmvnic_rx_buff *rx_buff;
2700 union sub_crq *next;
2701 u32 length;
2702 u16 offset;
2703 u8 flags = 0;
2704
Juliet Kim7ed5b312019-09-20 16:11:23 -04002705 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002706 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002707 enable_scrq_irq(adapter, rx_scrq);
Thomas Falcon21ecba62017-06-14 23:50:09 -05002708 napi_complete_done(napi, frames_processed);
2709 return frames_processed;
2710 }
2711
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002712 if (!pending_scrq(adapter, rx_scrq))
Thomas Falcon032c5e82015-12-21 11:26:06 -06002713 break;
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002714 next = ibmvnic_next_scrq(adapter, rx_scrq);
Lijun Pan914789a2021-02-11 00:43:21 -06002715 rx_buff = (struct ibmvnic_rx_buff *)
2716 be64_to_cpu(next->rx_comp.correlator);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002717 /* do error checking */
2718 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002719 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2720 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002721 /* free the entry */
2722 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002723 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002724 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002725 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002726 } else if (!rx_buff->skb) {
2727 /* free the entry */
2728 next->rx_comp.first = 0;
2729 remove_buff_from_pool(adapter, rx_buff);
2730 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002731 }
2732
2733 length = be32_to_cpu(next->rx_comp.len);
2734 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2735 flags = next->rx_comp.flags;
2736 skb = rx_buff->skb;
Lijun Pan42557da2021-02-12 20:48:40 -06002737 /* load long_term_buff before copying to skb */
2738 dma_rmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06002739 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2740 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002741
2742 /* VLAN Header has been stripped by the system firmware and
2743 * needs to be inserted by the driver
2744 */
2745 if (adapter->rx_vlan_header_insertion &&
2746 (flags & IBMVNIC_VLAN_STRIPPED))
2747 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2748 ntohs(next->rx_comp.vlan_tci));
2749
Thomas Falcon032c5e82015-12-21 11:26:06 -06002750 /* free the entry */
2751 next->rx_comp.first = 0;
2752 remove_buff_from_pool(adapter, rx_buff);
2753
2754 skb_put(skb, length);
2755 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002756 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002757
2758 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2759 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2760 skb->ip_summed = CHECKSUM_UNNECESSARY;
2761 }
2762
2763 length = skb->len;
2764 napi_gro_receive(napi, skb); /* send it up */
2765 netdev->stats.rx_packets++;
2766 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002767 adapter->rx_stats_buffers[scrq_num].packets++;
2768 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002769 frames_processed++;
2770 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002771
Dwip N. Banerjee41ed0a02020-11-18 19:12:25 -06002772 if (adapter->state != VNIC_CLOSING &&
2773 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2774 adapter->req_rx_add_entries_per_subcrq / 2) ||
2775 frames_processed < budget))
Nathan Fontenot152ce472017-05-26 10:30:54 -04002776 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002777 if (frames_processed < budget) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002778 if (napi_complete_done(napi, frames_processed)) {
2779 enable_scrq_irq(adapter, rx_scrq);
2780 if (pending_scrq(adapter, rx_scrq)) {
Dwip N. Banerjeeec20f362020-11-18 19:12:23 -06002781 if (napi_reschedule(napi)) {
2782 disable_scrq_irq(adapter, rx_scrq);
2783 goto restart_poll;
2784 }
2785 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002786 }
2787 }
2788 return frames_processed;
2789}
2790
John Allenc26eba02017-10-26 16:23:25 -05002791static int wait_for_reset(struct ibmvnic_adapter *adapter)
2792{
Thomas Falconaf894d22018-04-06 18:37:04 -05002793 int rc, ret;
2794
John Allenc26eba02017-10-26 16:23:25 -05002795 adapter->fallback.mtu = adapter->req_mtu;
2796 adapter->fallback.rx_queues = adapter->req_rx_queues;
2797 adapter->fallback.tx_queues = adapter->req_tx_queues;
2798 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2799 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2800
Thomas Falcon070eca92019-11-25 17:12:53 -06002801 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002802 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002803 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002804
2805 if (rc) {
2806 ret = rc;
2807 goto out;
2808 }
2809 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2810 if (rc) {
2811 ret = -ENODEV;
2812 goto out;
2813 }
John Allenc26eba02017-10-26 16:23:25 -05002814
Thomas Falconaf894d22018-04-06 18:37:04 -05002815 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002816 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002817 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002818 adapter->desired.mtu = adapter->fallback.mtu;
2819 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2820 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2821 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2822 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2823
Thomas Falcon070eca92019-11-25 17:12:53 -06002824 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002825 adapter->wait_for_reset = true;
2826 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002827 if (rc) {
2828 ret = rc;
2829 goto out;
2830 }
2831 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2832 60000);
2833 if (rc) {
2834 ret = -ENODEV;
2835 goto out;
2836 }
John Allenc26eba02017-10-26 16:23:25 -05002837 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002838out:
John Allenc26eba02017-10-26 16:23:25 -05002839 adapter->wait_for_reset = false;
2840
Thomas Falconaf894d22018-04-06 18:37:04 -05002841 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002842}
2843
John Allen3a807b72017-06-06 16:55:52 -05002844static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2845{
John Allenc26eba02017-10-26 16:23:25 -05002846 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2847
2848 adapter->desired.mtu = new_mtu + ETH_HLEN;
2849
2850 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002851}
2852
Thomas Falconf10b09e2018-03-12 11:51:05 -05002853static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2854 struct net_device *dev,
2855 netdev_features_t features)
2856{
2857 /* Some backing hardware adapters can not
2858 * handle packets with a MSS less than 224
2859 * or with only one segment.
2860 */
2861 if (skb_is_gso(skb)) {
2862 if (skb_shinfo(skb)->gso_size < 224 ||
2863 skb_shinfo(skb)->gso_segs == 1)
2864 features &= ~NETIF_F_GSO_MASK;
2865 }
2866
2867 return features;
2868}
2869
Thomas Falcon032c5e82015-12-21 11:26:06 -06002870static const struct net_device_ops ibmvnic_netdev_ops = {
2871 .ndo_open = ibmvnic_open,
2872 .ndo_stop = ibmvnic_close,
2873 .ndo_start_xmit = ibmvnic_xmit,
2874 .ndo_set_rx_mode = ibmvnic_set_multi,
2875 .ndo_set_mac_address = ibmvnic_set_mac,
2876 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002877 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002878 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002879 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002880};
2881
2882/* ethtool functions */
2883
Philippe Reynes8a433792017-01-07 22:37:29 +01002884static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2885 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002886{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002887 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2888 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002889
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002890 rc = send_query_phys_parms(adapter);
2891 if (rc) {
2892 adapter->speed = SPEED_UNKNOWN;
2893 adapter->duplex = DUPLEX_UNKNOWN;
2894 }
2895 cmd->base.speed = adapter->speed;
2896 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002897 cmd->base.port = PORT_FIBRE;
2898 cmd->base.phy_address = 0;
2899 cmd->base.autoneg = AUTONEG_ENABLE;
2900
Thomas Falcon032c5e82015-12-21 11:26:06 -06002901 return 0;
2902}
2903
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002904static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002905 struct ethtool_drvinfo *info)
2906{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002907 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2908
Lijun Pan8a96c802021-02-11 00:43:25 -06002909 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2910 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2911 strscpy(info->fw_version, adapter->fw_version,
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002912 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002913}
2914
2915static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2916{
2917 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2918
2919 return adapter->msg_enable;
2920}
2921
2922static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2923{
2924 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2925
2926 adapter->msg_enable = data;
2927}
2928
2929static u32 ibmvnic_get_link(struct net_device *netdev)
2930{
2931 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2932
2933 /* Don't need to send a query because we request a logical link up at
2934 * init and then we wait for link state indications
2935 */
2936 return adapter->logical_link_state;
2937}
2938
2939static void ibmvnic_get_ringparam(struct net_device *netdev,
2940 struct ethtool_ringparam *ring)
2941{
John Allenbc131b32017-08-02 16:46:30 -05002942 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2943
Thomas Falcon723ad912018-09-28 18:38:26 -05002944 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2945 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2946 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2947 } else {
2948 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2949 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2950 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002951 ring->rx_mini_max_pending = 0;
2952 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002953 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2954 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002955 ring->rx_mini_pending = 0;
2956 ring->rx_jumbo_pending = 0;
2957}
2958
John Allenc26eba02017-10-26 16:23:25 -05002959static int ibmvnic_set_ringparam(struct net_device *netdev,
2960 struct ethtool_ringparam *ring)
2961{
2962 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002963 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002964
Thomas Falcon723ad912018-09-28 18:38:26 -05002965 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002966 adapter->desired.rx_entries = ring->rx_pending;
2967 adapter->desired.tx_entries = ring->tx_pending;
2968
Thomas Falcon723ad912018-09-28 18:38:26 -05002969 ret = wait_for_reset(adapter);
2970
2971 if (!ret &&
2972 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2973 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2974 netdev_info(netdev,
2975 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2976 ring->rx_pending, ring->tx_pending,
2977 adapter->req_rx_add_entries_per_subcrq,
2978 adapter->req_tx_entries_per_subcrq);
2979 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002980}
2981
John Allenc2dbeb62017-08-02 16:47:17 -05002982static void ibmvnic_get_channels(struct net_device *netdev,
2983 struct ethtool_channels *channels)
2984{
2985 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2986
Thomas Falcon723ad912018-09-28 18:38:26 -05002987 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2988 channels->max_rx = adapter->max_rx_queues;
2989 channels->max_tx = adapter->max_tx_queues;
2990 } else {
2991 channels->max_rx = IBMVNIC_MAX_QUEUES;
2992 channels->max_tx = IBMVNIC_MAX_QUEUES;
2993 }
2994
John Allenc2dbeb62017-08-02 16:47:17 -05002995 channels->max_other = 0;
2996 channels->max_combined = 0;
2997 channels->rx_count = adapter->req_rx_queues;
2998 channels->tx_count = adapter->req_tx_queues;
2999 channels->other_count = 0;
3000 channels->combined_count = 0;
3001}
3002
John Allenc26eba02017-10-26 16:23:25 -05003003static int ibmvnic_set_channels(struct net_device *netdev,
3004 struct ethtool_channels *channels)
3005{
3006 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05003007 int ret;
John Allenc26eba02017-10-26 16:23:25 -05003008
Thomas Falcon723ad912018-09-28 18:38:26 -05003009 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05003010 adapter->desired.rx_queues = channels->rx_count;
3011 adapter->desired.tx_queues = channels->tx_count;
3012
Thomas Falcon723ad912018-09-28 18:38:26 -05003013 ret = wait_for_reset(adapter);
3014
3015 if (!ret &&
3016 (adapter->req_rx_queues != channels->rx_count ||
3017 adapter->req_tx_queues != channels->tx_count))
3018 netdev_info(netdev,
3019 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3020 channels->rx_count, channels->tx_count,
3021 adapter->req_rx_queues, adapter->req_tx_queues);
3022 return ret;
John Allenc26eba02017-10-26 16:23:25 -05003023}
3024
Thomas Falcon032c5e82015-12-21 11:26:06 -06003025static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3026{
John Allen3d52b592017-08-02 16:44:14 -05003027 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003028 int i;
3029
Thomas Falcon723ad912018-09-28 18:38:26 -05003030 switch (stringset) {
3031 case ETH_SS_STATS:
3032 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3033 i++, data += ETH_GSTRING_LEN)
3034 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3035
3036 for (i = 0; i < adapter->req_tx_queues; i++) {
3037 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3038 data += ETH_GSTRING_LEN;
3039
3040 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3041 data += ETH_GSTRING_LEN;
3042
3043 snprintf(data, ETH_GSTRING_LEN,
3044 "tx%d_dropped_packets", i);
3045 data += ETH_GSTRING_LEN;
3046 }
3047
3048 for (i = 0; i < adapter->req_rx_queues; i++) {
3049 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3050 data += ETH_GSTRING_LEN;
3051
3052 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3053 data += ETH_GSTRING_LEN;
3054
3055 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3056 data += ETH_GSTRING_LEN;
3057 }
3058 break;
3059
3060 case ETH_SS_PRIV_FLAGS:
3061 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3062 strcpy(data + i * ETH_GSTRING_LEN,
3063 ibmvnic_priv_flags[i]);
3064 break;
3065 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06003066 return;
John Allen3d52b592017-08-02 16:44:14 -05003067 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003068}
3069
3070static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3071{
John Allen3d52b592017-08-02 16:44:14 -05003072 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3073
Thomas Falcon032c5e82015-12-21 11:26:06 -06003074 switch (sset) {
3075 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05003076 return ARRAY_SIZE(ibmvnic_stats) +
3077 adapter->req_tx_queues * NUM_TX_STATS +
3078 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05003079 case ETH_SS_PRIV_FLAGS:
3080 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003081 default:
3082 return -EOPNOTSUPP;
3083 }
3084}
3085
3086static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3087 struct ethtool_stats *stats, u64 *data)
3088{
3089 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3090 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05003091 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003092 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003093
3094 memset(&crq, 0, sizeof(crq));
3095 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3096 crq.request_statistics.cmd = REQUEST_STATISTICS;
3097 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3098 crq.request_statistics.len =
3099 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003100
3101 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06003102 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003103 rc = ibmvnic_send_crq(adapter, &crq);
3104 if (rc)
3105 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06003106 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3107 if (rc)
3108 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003109
3110 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
Lijun Pan91dc5d22021-02-11 00:43:22 -06003111 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3112 (adapter, ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05003113
3114 for (j = 0; j < adapter->req_tx_queues; j++) {
3115 data[i] = adapter->tx_stats_buffers[j].packets;
3116 i++;
3117 data[i] = adapter->tx_stats_buffers[j].bytes;
3118 i++;
3119 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3120 i++;
3121 }
3122
3123 for (j = 0; j < adapter->req_rx_queues; j++) {
3124 data[i] = adapter->rx_stats_buffers[j].packets;
3125 i++;
3126 data[i] = adapter->rx_stats_buffers[j].bytes;
3127 i++;
3128 data[i] = adapter->rx_stats_buffers[j].interrupts;
3129 i++;
3130 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003131}
3132
Thomas Falcon723ad912018-09-28 18:38:26 -05003133static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3134{
3135 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3136
3137 return adapter->priv_flags;
3138}
3139
3140static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3141{
3142 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3143 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3144
3145 if (which_maxes)
3146 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3147 else
3148 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3149
3150 return 0;
3151}
Lijun Pan91dc5d22021-02-11 00:43:22 -06003152
Thomas Falcon032c5e82015-12-21 11:26:06 -06003153static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003154 .get_drvinfo = ibmvnic_get_drvinfo,
3155 .get_msglevel = ibmvnic_get_msglevel,
3156 .set_msglevel = ibmvnic_set_msglevel,
3157 .get_link = ibmvnic_get_link,
3158 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05003159 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05003160 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05003161 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003162 .get_strings = ibmvnic_get_strings,
3163 .get_sset_count = ibmvnic_get_sset_count,
3164 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01003165 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05003166 .get_priv_flags = ibmvnic_get_priv_flags,
3167 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003168};
3169
3170/* Routines for managing CRQs/sCRQs */
3171
Nathan Fontenot57a49432017-05-26 10:31:12 -04003172static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3173 struct ibmvnic_sub_crq_queue *scrq)
3174{
3175 int rc;
3176
Dany Madden9281cf22020-11-25 18:04:26 -06003177 if (!scrq) {
YANG LI862aecb2020-12-30 15:23:14 +08003178 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
Dany Madden9281cf22020-11-25 18:04:26 -06003179 return -EINVAL;
3180 }
3181
Nathan Fontenot57a49432017-05-26 10:31:12 -04003182 if (scrq->irq) {
3183 free_irq(scrq->irq, scrq);
3184 irq_dispose_mapping(scrq->irq);
3185 scrq->irq = 0;
3186 }
3187
Dany Madden9281cf22020-11-25 18:04:26 -06003188 if (scrq->msgs) {
3189 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3190 atomic_set(&scrq->used, 0);
3191 scrq->cur = 0;
Jakub Kicinski55fd59b2020-12-03 15:42:13 -08003192 scrq->ind_buf.index = 0;
Dany Madden9281cf22020-11-25 18:04:26 -06003193 } else {
3194 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3195 return -EINVAL;
3196 }
Nathan Fontenot57a49432017-05-26 10:31:12 -04003197
3198 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3199 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3200 return rc;
3201}
3202
3203static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3204{
3205 int i, rc;
3206
Lijun Pana0faaa22020-11-23 13:35:45 -06003207 if (!adapter->tx_scrq || !adapter->rx_scrq)
3208 return -EINVAL;
3209
Nathan Fontenot57a49432017-05-26 10:31:12 -04003210 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003211 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04003212 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3213 if (rc)
3214 return rc;
3215 }
3216
3217 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003218 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04003219 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3220 if (rc)
3221 return rc;
3222 }
3223
Nathan Fontenot57a49432017-05-26 10:31:12 -04003224 return rc;
3225}
3226
Thomas Falcon032c5e82015-12-21 11:26:06 -06003227static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003228 struct ibmvnic_sub_crq_queue *scrq,
3229 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003230{
3231 struct device *dev = &adapter->vdev->dev;
3232 long rc;
3233
3234 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3235
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003236 if (do_h_free) {
3237 /* Close the sub-crqs */
3238 do {
3239 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3240 adapter->vdev->unit_address,
3241 scrq->crq_num);
3242 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003243
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003244 if (rc) {
3245 netdev_err(adapter->netdev,
3246 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3247 scrq->crq_num, rc);
3248 }
Thomas Falconffa73852017-04-19 13:44:29 -04003249 }
3250
Thomas Falconf019fb62020-11-18 19:12:17 -06003251 dma_free_coherent(dev,
3252 IBMVNIC_IND_ARR_SZ,
3253 scrq->ind_buf.indir_arr,
3254 scrq->ind_buf.indir_dma);
3255
Thomas Falcon032c5e82015-12-21 11:26:06 -06003256 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3257 DMA_BIDIRECTIONAL);
3258 free_pages((unsigned long)scrq->msgs, 2);
3259 kfree(scrq);
3260}
3261
3262static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3263 *adapter)
3264{
3265 struct device *dev = &adapter->vdev->dev;
3266 struct ibmvnic_sub_crq_queue *scrq;
3267 int rc;
3268
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003269 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003270 if (!scrq)
3271 return NULL;
3272
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04003273 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003274 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003275 if (!scrq->msgs) {
3276 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3277 goto zero_page_failed;
3278 }
3279
3280 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3281 DMA_BIDIRECTIONAL);
3282 if (dma_mapping_error(dev, scrq->msg_token)) {
3283 dev_warn(dev, "Couldn't map crq queue messages page\n");
3284 goto map_failed;
3285 }
3286
3287 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3288 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3289
3290 if (rc == H_RESOURCE)
3291 rc = ibmvnic_reset_crq(adapter);
3292
3293 if (rc == H_CLOSED) {
3294 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3295 } else if (rc) {
3296 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3297 goto reg_failed;
3298 }
3299
Thomas Falcon032c5e82015-12-21 11:26:06 -06003300 scrq->adapter = adapter;
3301 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falconf019fb62020-11-18 19:12:17 -06003302 scrq->ind_buf.index = 0;
3303
3304 scrq->ind_buf.indir_arr =
3305 dma_alloc_coherent(dev,
3306 IBMVNIC_IND_ARR_SZ,
3307 &scrq->ind_buf.indir_dma,
3308 GFP_KERNEL);
3309
3310 if (!scrq->ind_buf.indir_arr)
3311 goto indir_failed;
3312
Thomas Falcon032c5e82015-12-21 11:26:06 -06003313 spin_lock_init(&scrq->lock);
3314
3315 netdev_dbg(adapter->netdev,
3316 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3317 scrq->crq_num, scrq->hw_irq, scrq->irq);
3318
3319 return scrq;
3320
Thomas Falconf019fb62020-11-18 19:12:17 -06003321indir_failed:
3322 do {
3323 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3324 adapter->vdev->unit_address,
3325 scrq->crq_num);
3326 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003327reg_failed:
3328 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3329 DMA_BIDIRECTIONAL);
3330map_failed:
3331 free_pages((unsigned long)scrq->msgs, 2);
3332zero_page_failed:
3333 kfree(scrq);
3334
3335 return NULL;
3336}
3337
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003338static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003339{
3340 int i;
3341
3342 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003343 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003344 if (!adapter->tx_scrq[i])
3345 continue;
3346
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003347 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3348 i);
Sukadev Bhattiprolu65d64702021-06-23 21:13:12 -07003349 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003350 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003351 free_irq(adapter->tx_scrq[i]->irq,
3352 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003353 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003354 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003355 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003356
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003357 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3358 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003359 }
3360
Nathan Fontenot9501df32017-03-15 23:38:07 -04003361 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003362 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003363 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003364 }
3365
3366 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003367 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003368 if (!adapter->rx_scrq[i])
3369 continue;
3370
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003371 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3372 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003373 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003374 free_irq(adapter->rx_scrq[i]->irq,
3375 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003376 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003377 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003378 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003379
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003380 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3381 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003382 }
3383
Nathan Fontenot9501df32017-03-15 23:38:07 -04003384 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003385 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003386 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003387 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003388}
3389
3390static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3391 struct ibmvnic_sub_crq_queue *scrq)
3392{
3393 struct device *dev = &adapter->vdev->dev;
3394 unsigned long rc;
3395
3396 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3397 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3398 if (rc)
3399 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3400 scrq->hw_irq, rc);
3401 return rc;
3402}
3403
3404static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3405 struct ibmvnic_sub_crq_queue *scrq)
3406{
3407 struct device *dev = &adapter->vdev->dev;
3408 unsigned long rc;
3409
3410 if (scrq->hw_irq > 0x100000000ULL) {
3411 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3412 return 1;
3413 }
3414
Juliet Kim7ed5b312019-09-20 16:11:23 -04003415 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003416 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003417 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003418
Juliet Kim284f87d2019-11-20 10:50:03 -05003419 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003420 /* H_EOI would fail with rc = H_FUNCTION when running
3421 * in XIVE mode which is expected, but not an error.
3422 */
Sukadev Bhattiprolu154b3b22021-06-23 21:13:16 -07003423 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003424 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3425 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003426 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003427
Thomas Falcon032c5e82015-12-21 11:26:06 -06003428 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3429 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3430 if (rc)
3431 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3432 scrq->hw_irq, rc);
3433 return rc;
3434}
3435
3436static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3437 struct ibmvnic_sub_crq_queue *scrq)
3438{
3439 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003440 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003441 struct ibmvnic_tx_buff *txbuff;
Thomas Falcon0d973382020-11-18 19:12:19 -06003442 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003443 union sub_crq *next;
3444 int index;
Thomas Falconc62aa372020-11-18 19:12:20 -06003445 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003446
3447restart_loop:
3448 while (pending_scrq(adapter, scrq)) {
3449 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003450 int num_entries = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06003451 int total_bytes = 0;
3452 int num_packets = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003453
3454 next = ibmvnic_next_scrq(adapter, scrq);
3455 for (i = 0; i < next->tx_comp.num_comps; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003456 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003457 if (index & IBMVNIC_TSO_POOL_MASK) {
3458 tx_pool = &adapter->tso_pool[pool];
3459 index &= ~IBMVNIC_TSO_POOL_MASK;
3460 } else {
3461 tx_pool = &adapter->tx_pool[pool];
3462 }
3463
3464 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon0d973382020-11-18 19:12:19 -06003465 num_packets++;
Thomas Falconffc385b2018-02-18 10:08:41 -06003466 num_entries += txbuff->num_entries;
Thomas Falcon0d973382020-11-18 19:12:19 -06003467 if (txbuff->skb) {
3468 total_bytes += txbuff->skb->len;
Lijun Panca09bf72021-04-13 03:33:25 -05003469 if (next->tx_comp.rcs[i]) {
3470 dev_err(dev, "tx error %x\n",
3471 next->tx_comp.rcs[i]);
3472 dev_kfree_skb_irq(txbuff->skb);
3473 } else {
3474 dev_consume_skb_irq(txbuff->skb);
3475 }
Thomas Falcon0d973382020-11-18 19:12:19 -06003476 txbuff->skb = NULL;
3477 } else {
3478 netdev_warn(adapter->netdev,
3479 "TX completion received with NULL socket buffer\n");
3480 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05003481 tx_pool->free_map[tx_pool->producer_index] = index;
3482 tx_pool->producer_index =
3483 (tx_pool->producer_index + 1) %
3484 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003485 }
3486 /* remove tx_comp scrq*/
3487 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003488
Thomas Falcon0d973382020-11-18 19:12:19 -06003489 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3490 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3491
Thomas Falconffc385b2018-02-18 10:08:41 -06003492 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003493 (adapter->req_tx_entries_per_subcrq / 2) &&
3494 __netif_subqueue_stopped(adapter->netdev,
3495 scrq->pool_index)) {
3496 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003497 netdev_dbg(adapter->netdev, "Started queue %d\n",
3498 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003499 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003500 }
3501
3502 enable_scrq_irq(adapter, scrq);
3503
3504 if (pending_scrq(adapter, scrq)) {
3505 disable_scrq_irq(adapter, scrq);
3506 goto restart_loop;
3507 }
3508
3509 return 0;
3510}
3511
3512static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3513{
3514 struct ibmvnic_sub_crq_queue *scrq = instance;
3515 struct ibmvnic_adapter *adapter = scrq->adapter;
3516
3517 disable_scrq_irq(adapter, scrq);
3518 ibmvnic_complete_tx(adapter, scrq);
3519
3520 return IRQ_HANDLED;
3521}
3522
3523static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3524{
3525 struct ibmvnic_sub_crq_queue *scrq = instance;
3526 struct ibmvnic_adapter *adapter = scrq->adapter;
3527
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003528 /* When booting a kdump kernel we can hit pending interrupts
3529 * prior to completing driver initialization.
3530 */
3531 if (unlikely(adapter->state != VNIC_OPEN))
3532 return IRQ_NONE;
3533
John Allen3d52b592017-08-02 16:44:14 -05003534 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3535
Thomas Falcon032c5e82015-12-21 11:26:06 -06003536 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3537 disable_scrq_irq(adapter, scrq);
3538 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3539 }
3540
3541 return IRQ_HANDLED;
3542}
3543
Thomas Falconea22d512016-07-06 15:35:17 -05003544static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3545{
3546 struct device *dev = &adapter->vdev->dev;
3547 struct ibmvnic_sub_crq_queue *scrq;
3548 int i = 0, j = 0;
3549 int rc = 0;
3550
3551 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003552 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3553 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003554 scrq = adapter->tx_scrq[i];
3555 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3556
Michael Ellerman99c17902016-09-10 19:59:05 +10003557 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003558 rc = -EINVAL;
3559 dev_err(dev, "Error mapping irq\n");
3560 goto req_tx_irq_failed;
3561 }
3562
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003563 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3564 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003565 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003566 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003567
3568 if (rc) {
3569 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3570 scrq->irq, rc);
3571 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003572 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003573 }
3574 }
3575
3576 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003577 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3578 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003579 scrq = adapter->rx_scrq[i];
3580 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003581 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003582 rc = -EINVAL;
3583 dev_err(dev, "Error mapping irq\n");
3584 goto req_rx_irq_failed;
3585 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003586 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3587 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003588 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003589 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003590 if (rc) {
3591 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3592 scrq->irq, rc);
3593 irq_dispose_mapping(scrq->irq);
3594 goto req_rx_irq_failed;
3595 }
3596 }
3597 return rc;
3598
3599req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003600 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003601 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3602 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003603 }
Thomas Falconea22d512016-07-06 15:35:17 -05003604 i = adapter->req_tx_queues;
3605req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003606 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003607 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003608 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003609 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003610 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003611 return rc;
3612}
3613
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003614static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003615{
3616 struct device *dev = &adapter->vdev->dev;
3617 struct ibmvnic_sub_crq_queue **allqueues;
3618 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003619 int total_queues;
3620 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003621 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003622
Thomas Falcon032c5e82015-12-21 11:26:06 -06003623 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3624
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003625 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003626 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003627 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003628
3629 for (i = 0; i < total_queues; i++) {
3630 allqueues[i] = init_sub_crq_queue(adapter);
3631 if (!allqueues[i]) {
3632 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3633 break;
3634 }
3635 registered_queues++;
3636 }
3637
3638 /* Make sure we were able to register the minimum number of queues */
3639 if (registered_queues <
3640 adapter->min_tx_queues + adapter->min_rx_queues) {
3641 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3642 goto tx_failed;
3643 }
3644
3645 /* Distribute the failed allocated queues*/
3646 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3647 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3648 switch (i % 3) {
3649 case 0:
3650 if (adapter->req_rx_queues > adapter->min_rx_queues)
3651 adapter->req_rx_queues--;
3652 else
3653 more++;
3654 break;
3655 case 1:
3656 if (adapter->req_tx_queues > adapter->min_tx_queues)
3657 adapter->req_tx_queues--;
3658 else
3659 more++;
3660 break;
3661 }
3662 }
3663
3664 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003665 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003666 if (!adapter->tx_scrq)
3667 goto tx_failed;
3668
3669 for (i = 0; i < adapter->req_tx_queues; i++) {
3670 adapter->tx_scrq[i] = allqueues[i];
3671 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003672 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003673 }
3674
3675 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003676 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003677 if (!adapter->rx_scrq)
3678 goto rx_failed;
3679
3680 for (i = 0; i < adapter->req_rx_queues; i++) {
3681 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3682 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003683 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003684 }
3685
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003686 kfree(allqueues);
3687 return 0;
3688
3689rx_failed:
3690 kfree(adapter->tx_scrq);
3691 adapter->tx_scrq = NULL;
3692tx_failed:
3693 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003694 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003695 kfree(allqueues);
3696 return -1;
3697}
3698
Lijun Pan09081b92020-09-27 20:13:27 -05003699static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003700{
3701 struct device *dev = &adapter->vdev->dev;
3702 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003703 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003704
3705 if (!retry) {
3706 /* Sub-CRQ entries are 32 byte long */
3707 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3708
3709 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3710 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3711 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3712 return;
3713 }
3714
John Allenc26eba02017-10-26 16:23:25 -05003715 if (adapter->desired.mtu)
3716 adapter->req_mtu = adapter->desired.mtu;
3717 else
3718 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003719
John Allenc26eba02017-10-26 16:23:25 -05003720 if (!adapter->desired.tx_entries)
3721 adapter->desired.tx_entries =
3722 adapter->max_tx_entries_per_subcrq;
3723 if (!adapter->desired.rx_entries)
3724 adapter->desired.rx_entries =
3725 adapter->max_rx_add_entries_per_subcrq;
3726
3727 max_entries = IBMVNIC_MAX_LTB_SIZE /
3728 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3729
3730 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3731 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3732 adapter->desired.tx_entries = max_entries;
3733 }
3734
3735 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3736 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3737 adapter->desired.rx_entries = max_entries;
3738 }
3739
3740 if (adapter->desired.tx_entries)
3741 adapter->req_tx_entries_per_subcrq =
3742 adapter->desired.tx_entries;
3743 else
3744 adapter->req_tx_entries_per_subcrq =
3745 adapter->max_tx_entries_per_subcrq;
3746
3747 if (adapter->desired.rx_entries)
3748 adapter->req_rx_add_entries_per_subcrq =
3749 adapter->desired.rx_entries;
3750 else
3751 adapter->req_rx_add_entries_per_subcrq =
3752 adapter->max_rx_add_entries_per_subcrq;
3753
3754 if (adapter->desired.tx_queues)
3755 adapter->req_tx_queues =
3756 adapter->desired.tx_queues;
3757 else
3758 adapter->req_tx_queues =
3759 adapter->opt_tx_comp_sub_queues;
3760
3761 if (adapter->desired.rx_queues)
3762 adapter->req_rx_queues =
3763 adapter->desired.rx_queues;
3764 else
3765 adapter->req_rx_queues =
3766 adapter->opt_rx_comp_queues;
3767
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003768 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003769 }
3770
Thomas Falcon032c5e82015-12-21 11:26:06 -06003771 memset(&crq, 0, sizeof(crq));
3772 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3773 crq.request_capability.cmd = REQUEST_CAPABILITY;
3774
3775 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003776 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003777 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003778 ibmvnic_send_crq(adapter, &crq);
3779
3780 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003781 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003782 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003783 ibmvnic_send_crq(adapter, &crq);
3784
3785 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003786 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003787 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003788 ibmvnic_send_crq(adapter, &crq);
3789
3790 crq.request_capability.capability =
3791 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3792 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003793 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003794 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003795 ibmvnic_send_crq(adapter, &crq);
3796
3797 crq.request_capability.capability =
3798 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3799 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003800 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003801 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003802 ibmvnic_send_crq(adapter, &crq);
3803
3804 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003805 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003806 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003807 ibmvnic_send_crq(adapter, &crq);
3808
3809 if (adapter->netdev->flags & IFF_PROMISC) {
3810 if (adapter->promisc_supported) {
3811 crq.request_capability.capability =
3812 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003813 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003814 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003815 ibmvnic_send_crq(adapter, &crq);
3816 }
3817 } else {
3818 crq.request_capability.capability =
3819 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003820 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003821 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003822 ibmvnic_send_crq(adapter, &crq);
3823 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003824}
3825
3826static int pending_scrq(struct ibmvnic_adapter *adapter,
3827 struct ibmvnic_sub_crq_queue *scrq)
3828{
3829 union sub_crq *entry = &scrq->msgs[scrq->cur];
Lijun Pan665ab1e2021-01-29 19:19:04 -06003830 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003831
Lijun Pan665ab1e2021-01-29 19:19:04 -06003832 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3833
3834 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3835 * contents of the SCRQ descriptor
3836 */
3837 dma_rmb();
3838
3839 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003840}
3841
3842static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3843 struct ibmvnic_sub_crq_queue *scrq)
3844{
3845 union sub_crq *entry;
3846 unsigned long flags;
3847
3848 spin_lock_irqsave(&scrq->lock, flags);
3849 entry = &scrq->msgs[scrq->cur];
3850 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3851 if (++scrq->cur == scrq->size)
3852 scrq->cur = 0;
3853 } else {
3854 entry = NULL;
3855 }
3856 spin_unlock_irqrestore(&scrq->lock, flags);
3857
Lijun Pan665ab1e2021-01-29 19:19:04 -06003858 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3859 * contents of the SCRQ descriptor
Thomas Falconb71ec952020-12-01 09:52:10 -06003860 */
3861 dma_rmb();
3862
Thomas Falcon032c5e82015-12-21 11:26:06 -06003863 return entry;
3864}
3865
3866static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3867{
3868 struct ibmvnic_crq_queue *queue = &adapter->crq;
3869 union ibmvnic_crq *crq;
3870
3871 crq = &queue->msgs[queue->cur];
3872 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3873 if (++queue->cur == queue->size)
3874 queue->cur = 0;
3875 } else {
3876 crq = NULL;
3877 }
3878
3879 return crq;
3880}
3881
Thomas Falcon2d14d372018-07-13 12:03:32 -05003882static void print_subcrq_error(struct device *dev, int rc, const char *func)
3883{
3884 switch (rc) {
3885 case H_PARAMETER:
3886 dev_warn_ratelimited(dev,
3887 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3888 func, rc);
3889 break;
3890 case H_CLOSED:
3891 dev_warn_ratelimited(dev,
3892 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3893 func, rc);
3894 break;
3895 default:
3896 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3897 break;
3898 }
3899}
3900
Thomas Falconad7775d2016-04-01 17:20:34 -05003901static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3902 u64 remote_handle, u64 ioba, u64 num_entries)
3903{
3904 unsigned int ua = adapter->vdev->unit_address;
3905 struct device *dev = &adapter->vdev->dev;
3906 int rc;
3907
3908 /* Make sure the hypervisor sees the complete request */
Lijun Pan1a421562021-02-12 20:36:46 -06003909 dma_wmb();
Thomas Falconad7775d2016-04-01 17:20:34 -05003910 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3911 cpu_to_be64(remote_handle),
3912 ioba, num_entries);
3913
Thomas Falcon2d14d372018-07-13 12:03:32 -05003914 if (rc)
3915 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003916
3917 return rc;
3918}
3919
Thomas Falcon032c5e82015-12-21 11:26:06 -06003920static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3921 union ibmvnic_crq *crq)
3922{
3923 unsigned int ua = adapter->vdev->unit_address;
3924 struct device *dev = &adapter->vdev->dev;
3925 u64 *u64_crq = (u64 *)crq;
3926 int rc;
3927
3928 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06003929 (unsigned long)cpu_to_be64(u64_crq[0]),
3930 (unsigned long)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003931
Thomas Falcon51536982018-05-23 13:37:56 -05003932 if (!adapter->crq.active &&
3933 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3934 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3935 return -EINVAL;
3936 }
3937
Thomas Falcon032c5e82015-12-21 11:26:06 -06003938 /* Make sure the hypervisor sees the complete request */
Lijun Pan1a421562021-02-12 20:36:46 -06003939 dma_wmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06003940
3941 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3942 cpu_to_be64(u64_crq[0]),
3943 cpu_to_be64(u64_crq[1]));
3944
3945 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003946 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003947 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05003948 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003949 }
3950
Thomas Falcon032c5e82015-12-21 11:26:06 -06003951 dev_warn(dev, "Send error (rc=%d)\n", rc);
3952 }
3953
3954 return rc;
3955}
3956
3957static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3958{
Thomas Falcon36a782f2020-08-31 11:59:57 -05003959 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003960 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05003961 int retries = 100;
3962 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003963
3964 memset(&crq, 0, sizeof(crq));
3965 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3966 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3967 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3968
Thomas Falcon36a782f2020-08-31 11:59:57 -05003969 do {
3970 rc = ibmvnic_send_crq(adapter, &crq);
3971 if (rc != H_CLOSED)
3972 break;
3973 retries--;
3974 msleep(50);
3975
3976 } while (retries > 0);
3977
3978 if (rc) {
3979 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3980 return rc;
3981 }
3982
3983 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003984}
3985
Nathan Fontenot37798d02017-11-08 11:23:56 -06003986struct vnic_login_client_data {
3987 u8 type;
3988 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003989 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003990} __packed;
3991
3992static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3993{
3994 int len;
3995
3996 /* Calculate the amount of buffer space needed for the
3997 * vnic client data in the login buffer. There are four entries,
3998 * OS name, LPAR name, device name, and a null last entry.
3999 */
4000 len = 4 * sizeof(struct vnic_login_client_data);
4001 len += 6; /* "Linux" plus NULL */
4002 len += strlen(utsname()->nodename) + 1;
4003 len += strlen(adapter->netdev->name) + 1;
4004
4005 return len;
4006}
4007
4008static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4009 struct vnic_login_client_data *vlcd)
4010{
4011 const char *os_name = "Linux";
4012 int len;
4013
4014 /* Type 1 - LPAR OS */
4015 vlcd->type = 1;
4016 len = strlen(os_name) + 1;
4017 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004018 strscpy(vlcd->name, os_name, len);
Kees Cook08ea5562018-04-10 15:26:43 -07004019 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004020
4021 /* Type 2 - LPAR name */
4022 vlcd->type = 2;
4023 len = strlen(utsname()->nodename) + 1;
4024 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004025 strscpy(vlcd->name, utsname()->nodename, len);
Kees Cook08ea5562018-04-10 15:26:43 -07004026 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004027
4028 /* Type 3 - device name */
4029 vlcd->type = 3;
4030 len = strlen(adapter->netdev->name) + 1;
4031 vlcd->len = cpu_to_be16(len);
Kees Cookef2c3dd2021-06-21 14:35:09 -07004032 strscpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06004033}
4034
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004035static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004036{
4037 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4038 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004039 struct device *dev = &adapter->vdev->dev;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004040 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004041 dma_addr_t rsp_buffer_token;
4042 dma_addr_t buffer_token;
4043 size_t rsp_buffer_size;
4044 union ibmvnic_crq crq;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004045 int client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004046 size_t buffer_size;
4047 __be64 *tx_list_p;
4048 __be64 *rx_list_p;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004049 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004050 int i;
4051
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004052 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4053 netdev_err(adapter->netdev,
4054 "RX or TX queues are not allocated, device login failed\n");
4055 return -1;
4056 }
4057
Lijun Pana0c8be52020-12-19 15:39:19 -06004058 release_login_buffer(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06004059 release_login_rsp_buffer(adapter);
Lijun Pana0c8be52020-12-19 15:39:19 -06004060
Nathan Fontenot37798d02017-11-08 11:23:56 -06004061 client_data_len = vnic_client_data_len(adapter);
4062
Thomas Falcon032c5e82015-12-21 11:26:06 -06004063 buffer_size =
4064 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06004065 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4066 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004067
Nathan Fontenot37798d02017-11-08 11:23:56 -06004068 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004069 if (!login_buffer)
4070 goto buf_alloc_failed;
4071
4072 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4073 DMA_TO_DEVICE);
4074 if (dma_mapping_error(dev, buffer_token)) {
4075 dev_err(dev, "Couldn't map login buffer\n");
4076 goto buf_map_failed;
4077 }
4078
John Allen498cd8e2016-04-06 11:49:55 -05004079 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4080 sizeof(u64) * adapter->req_tx_queues +
4081 sizeof(u64) * adapter->req_rx_queues +
4082 sizeof(u64) * adapter->req_rx_queues +
4083 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004084
4085 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4086 if (!login_rsp_buffer)
4087 goto buf_rsp_alloc_failed;
4088
4089 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4090 rsp_buffer_size, DMA_FROM_DEVICE);
4091 if (dma_mapping_error(dev, rsp_buffer_token)) {
4092 dev_err(dev, "Couldn't map login rsp buffer\n");
4093 goto buf_rsp_map_failed;
4094 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04004095
Thomas Falcon032c5e82015-12-21 11:26:06 -06004096 adapter->login_buf = login_buffer;
4097 adapter->login_buf_token = buffer_token;
4098 adapter->login_buf_sz = buffer_size;
4099 adapter->login_rsp_buf = login_rsp_buffer;
4100 adapter->login_rsp_buf_token = rsp_buffer_token;
4101 adapter->login_rsp_buf_sz = rsp_buffer_size;
4102
4103 login_buffer->len = cpu_to_be32(buffer_size);
4104 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4105 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4106 login_buffer->off_txcomp_subcrqs =
4107 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4108 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4109 login_buffer->off_rxcomp_subcrqs =
4110 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4111 sizeof(u64) * adapter->req_tx_queues);
4112 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4113 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4114
4115 tx_list_p = (__be64 *)((char *)login_buffer +
4116 sizeof(struct ibmvnic_login_buffer));
4117 rx_list_p = (__be64 *)((char *)login_buffer +
4118 sizeof(struct ibmvnic_login_buffer) +
4119 sizeof(u64) * adapter->req_tx_queues);
4120
4121 for (i = 0; i < adapter->req_tx_queues; i++) {
4122 if (adapter->tx_scrq[i]) {
Lijun Pan914789a2021-02-11 00:43:21 -06004123 tx_list_p[i] =
4124 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004125 }
4126 }
4127
4128 for (i = 0; i < adapter->req_rx_queues; i++) {
4129 if (adapter->rx_scrq[i]) {
Lijun Pan914789a2021-02-11 00:43:21 -06004130 rx_list_p[i] =
4131 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004132 }
4133 }
4134
Nathan Fontenot37798d02017-11-08 11:23:56 -06004135 /* Insert vNIC login client data */
4136 vlcd = (struct vnic_login_client_data *)
4137 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4138 login_buffer->client_data_offset =
4139 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4140 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4141
4142 vnic_add_client_data(adapter, vlcd);
4143
Thomas Falcon032c5e82015-12-21 11:26:06 -06004144 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4145 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4146 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004147 ((unsigned long *)(adapter->login_buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004148 }
4149
4150 memset(&crq, 0, sizeof(crq));
4151 crq.login.first = IBMVNIC_CRQ_CMD;
4152 crq.login.cmd = LOGIN;
4153 crq.login.ioba = cpu_to_be32(buffer_token);
4154 crq.login.len = cpu_to_be32(buffer_size);
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06004155
4156 adapter->login_pending = true;
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004157 rc = ibmvnic_send_crq(adapter, &crq);
4158 if (rc) {
4159 adapter->login_pending = false;
4160 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4161 goto buf_rsp_map_failed;
4162 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004163
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004164 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004165
Thomas Falcon032c5e82015-12-21 11:26:06 -06004166buf_rsp_map_failed:
4167 kfree(login_rsp_buffer);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004168 adapter->login_rsp_buf = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004169buf_rsp_alloc_failed:
4170 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4171buf_map_failed:
4172 kfree(login_buffer);
Dany Maddenc98d9cc2020-11-25 18:04:30 -06004173 adapter->login_buf = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004174buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06004175 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004176}
4177
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004178static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4179 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004180{
4181 union ibmvnic_crq crq;
4182
4183 memset(&crq, 0, sizeof(crq));
4184 crq.request_map.first = IBMVNIC_CRQ_CMD;
4185 crq.request_map.cmd = REQUEST_MAP;
4186 crq.request_map.map_id = map_id;
4187 crq.request_map.ioba = cpu_to_be32(addr);
4188 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004189 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004190}
4191
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004192static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004193{
4194 union ibmvnic_crq crq;
4195
4196 memset(&crq, 0, sizeof(crq));
4197 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4198 crq.request_unmap.cmd = REQUEST_UNMAP;
4199 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05004200 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004201}
4202
Lijun Pan69980d02020-09-27 20:13:28 -05004203static void send_query_map(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004204{
4205 union ibmvnic_crq crq;
4206
4207 memset(&crq, 0, sizeof(crq));
4208 crq.query_map.first = IBMVNIC_CRQ_CMD;
4209 crq.query_map.cmd = QUERY_MAP;
4210 ibmvnic_send_crq(adapter, &crq);
4211}
4212
4213/* Send a series of CRQs requesting various capabilities of the VNIC server */
Lijun Pan491099a2020-09-27 20:13:26 -05004214static void send_query_cap(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004215{
4216 union ibmvnic_crq crq;
4217
Thomas Falcon901e0402017-02-15 12:17:59 -06004218 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004219 memset(&crq, 0, sizeof(crq));
4220 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4221 crq.query_capability.cmd = QUERY_CAPABILITY;
4222
4223 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004224 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004225 ibmvnic_send_crq(adapter, &crq);
4226
4227 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004228 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004229 ibmvnic_send_crq(adapter, &crq);
4230
4231 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004232 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004233 ibmvnic_send_crq(adapter, &crq);
4234
4235 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004236 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004237 ibmvnic_send_crq(adapter, &crq);
4238
4239 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004240 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004241 ibmvnic_send_crq(adapter, &crq);
4242
4243 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004244 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004245 ibmvnic_send_crq(adapter, &crq);
4246
4247 crq.query_capability.capability =
4248 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004249 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004250 ibmvnic_send_crq(adapter, &crq);
4251
4252 crq.query_capability.capability =
4253 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004254 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004255 ibmvnic_send_crq(adapter, &crq);
4256
4257 crq.query_capability.capability =
4258 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004259 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004260 ibmvnic_send_crq(adapter, &crq);
4261
4262 crq.query_capability.capability =
4263 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004264 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004265 ibmvnic_send_crq(adapter, &crq);
4266
4267 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06004268 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004269 ibmvnic_send_crq(adapter, &crq);
4270
4271 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06004272 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004273 ibmvnic_send_crq(adapter, &crq);
4274
4275 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06004276 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004277 ibmvnic_send_crq(adapter, &crq);
4278
4279 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06004280 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004281 ibmvnic_send_crq(adapter, &crq);
4282
4283 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06004284 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004285 ibmvnic_send_crq(adapter, &crq);
4286
4287 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06004288 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004289 ibmvnic_send_crq(adapter, &crq);
4290
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004291 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4292 atomic_inc(&adapter->running_cap_crqs);
4293 ibmvnic_send_crq(adapter, &crq);
4294
Thomas Falcon032c5e82015-12-21 11:26:06 -06004295 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004296 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004297 ibmvnic_send_crq(adapter, &crq);
4298
4299 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06004300 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004301 ibmvnic_send_crq(adapter, &crq);
4302
4303 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004304 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004305 ibmvnic_send_crq(adapter, &crq);
4306
4307 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004308 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004309 ibmvnic_send_crq(adapter, &crq);
4310
4311 crq.query_capability.capability =
4312 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06004313 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004314 ibmvnic_send_crq(adapter, &crq);
4315
4316 crq.query_capability.capability =
4317 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004318 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004319 ibmvnic_send_crq(adapter, &crq);
4320
4321 crq.query_capability.capability =
4322 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004323 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004324 ibmvnic_send_crq(adapter, &crq);
4325
4326 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004327 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004328 ibmvnic_send_crq(adapter, &crq);
4329}
4330
Lijun Pan16e811f2020-09-27 20:13:29 -05004331static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4332{
4333 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4334 struct device *dev = &adapter->vdev->dev;
4335 union ibmvnic_crq crq;
4336
4337 adapter->ip_offload_tok =
4338 dma_map_single(dev,
4339 &adapter->ip_offload_buf,
4340 buf_sz,
4341 DMA_FROM_DEVICE);
4342
4343 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4344 if (!firmware_has_feature(FW_FEATURE_CMO))
4345 dev_err(dev, "Couldn't map offload buffer\n");
4346 return;
4347 }
4348
4349 memset(&crq, 0, sizeof(crq));
4350 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4351 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4352 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4353 crq.query_ip_offload.ioba =
4354 cpu_to_be32(adapter->ip_offload_tok);
4355
4356 ibmvnic_send_crq(adapter, &crq);
4357}
4358
Lijun Pan46899bd2020-09-27 20:13:30 -05004359static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4360{
4361 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4362 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4363 struct device *dev = &adapter->vdev->dev;
4364 netdev_features_t old_hw_features = 0;
4365 union ibmvnic_crq crq;
4366
4367 adapter->ip_offload_ctrl_tok =
4368 dma_map_single(dev,
4369 ctrl_buf,
4370 sizeof(adapter->ip_offload_ctrl),
4371 DMA_TO_DEVICE);
4372
4373 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4374 dev_err(dev, "Couldn't map ip offload control buffer\n");
4375 return;
4376 }
4377
4378 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4379 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4380 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4381 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4382 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4383 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4384 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4385 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4386 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4387 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4388
4389 /* large_rx disabled for now, additional features needed */
4390 ctrl_buf->large_rx_ipv4 = 0;
4391 ctrl_buf->large_rx_ipv6 = 0;
4392
4393 if (adapter->state != VNIC_PROBING) {
4394 old_hw_features = adapter->netdev->hw_features;
4395 adapter->netdev->hw_features = 0;
4396 }
4397
4398 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4399
4400 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4401 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4402
4403 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4404 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4405
4406 if ((adapter->netdev->features &
4407 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4408 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4409
4410 if (buf->large_tx_ipv4)
4411 adapter->netdev->hw_features |= NETIF_F_TSO;
4412 if (buf->large_tx_ipv6)
4413 adapter->netdev->hw_features |= NETIF_F_TSO6;
4414
4415 if (adapter->state == VNIC_PROBING) {
4416 adapter->netdev->features |= adapter->netdev->hw_features;
4417 } else if (old_hw_features != adapter->netdev->hw_features) {
4418 netdev_features_t tmp = 0;
4419
4420 /* disable features no longer supported */
4421 adapter->netdev->features &= adapter->netdev->hw_features;
4422 /* turn on features now supported if previously enabled */
4423 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4424 adapter->netdev->hw_features;
4425 adapter->netdev->features |=
4426 tmp & adapter->netdev->wanted_features;
4427 }
4428
4429 memset(&crq, 0, sizeof(crq));
4430 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4431 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4432 crq.control_ip_offload.len =
4433 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4434 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4435 ibmvnic_send_crq(adapter, &crq);
4436}
4437
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004438static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4439 struct ibmvnic_adapter *adapter)
4440{
4441 struct device *dev = &adapter->vdev->dev;
4442
4443 if (crq->get_vpd_size_rsp.rc.code) {
4444 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4445 crq->get_vpd_size_rsp.rc.code);
4446 complete(&adapter->fw_done);
4447 return;
4448 }
4449
4450 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4451 complete(&adapter->fw_done);
4452}
4453
4454static void handle_vpd_rsp(union ibmvnic_crq *crq,
4455 struct ibmvnic_adapter *adapter)
4456{
4457 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004458 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004459 u8 fw_level_len = 0;
4460
4461 memset(adapter->fw_version, 0, 32);
4462
4463 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4464 DMA_FROM_DEVICE);
4465
4466 if (crq->get_vpd_rsp.rc.code) {
4467 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4468 crq->get_vpd_rsp.rc.code);
4469 goto complete;
4470 }
4471
4472 /* get the position of the firmware version info
4473 * located after the ASCII 'RM' substring in the buffer
4474 */
4475 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4476 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02004477 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004478 goto complete;
4479 }
4480
4481 /* get length of firmware level ASCII substring */
4482 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4483 fw_level_len = *(substr + 2);
4484 } else {
4485 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4486 goto complete;
4487 }
4488
4489 /* copy firmware version string from vpd into adapter */
4490 if ((substr + 3 + fw_level_len) <
4491 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004492 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004493 } else {
4494 dev_info(dev, "FW substr extrapolated VPD buff\n");
4495 }
4496
4497complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004498 if (adapter->fw_version[0] == '\0')
Lijun Pan0b217d32021-06-11 13:33:53 -05004499 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004500 complete(&adapter->fw_done);
4501}
4502
Thomas Falcon032c5e82015-12-21 11:26:06 -06004503static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4504{
4505 struct device *dev = &adapter->vdev->dev;
4506 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004507 int i;
4508
4509 dma_unmap_single(dev, adapter->ip_offload_tok,
4510 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4511
4512 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4513 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4514 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004515 ((unsigned long *)(buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004516
4517 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4518 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4519 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4520 buf->tcp_ipv4_chksum);
4521 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4522 buf->tcp_ipv6_chksum);
4523 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4524 buf->udp_ipv4_chksum);
4525 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4526 buf->udp_ipv6_chksum);
4527 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4528 buf->large_tx_ipv4);
4529 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4530 buf->large_tx_ipv6);
4531 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4532 buf->large_rx_ipv4);
4533 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4534 buf->large_rx_ipv6);
4535 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4536 buf->max_ipv4_header_size);
4537 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4538 buf->max_ipv6_header_size);
4539 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4540 buf->max_tcp_header_size);
4541 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4542 buf->max_udp_header_size);
4543 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4544 buf->max_large_tx_size);
4545 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4546 buf->max_large_rx_size);
4547 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4548 buf->ipv6_extension_header);
4549 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4550 buf->tcp_pseudosum_req);
4551 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4552 buf->num_ipv6_ext_headers);
4553 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4554 buf->off_ipv6_ext_headers);
4555
Lijun Pan46899bd2020-09-27 20:13:30 -05004556 send_control_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004557}
4558
Thomas Falconc9008d32018-08-06 21:39:59 -05004559static const char *ibmvnic_fw_err_cause(u16 cause)
4560{
4561 switch (cause) {
4562 case ADAPTER_PROBLEM:
4563 return "adapter problem";
4564 case BUS_PROBLEM:
4565 return "bus problem";
4566 case FW_PROBLEM:
4567 return "firmware problem";
4568 case DD_PROBLEM:
4569 return "device driver problem";
4570 case EEH_RECOVERY:
4571 return "EEH recovery";
4572 case FW_UPDATED:
4573 return "firmware updated";
4574 case LOW_MEMORY:
4575 return "low Memory";
4576 default:
4577 return "unknown";
4578 }
4579}
4580
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004581static void handle_error_indication(union ibmvnic_crq *crq,
4582 struct ibmvnic_adapter *adapter)
4583{
4584 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004585 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004586
Thomas Falconc9008d32018-08-06 21:39:59 -05004587 cause = be16_to_cpu(crq->error_indication.error_cause);
4588
4589 dev_warn_ratelimited(dev,
4590 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4591 crq->error_indication.flags
4592 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4593 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004594
Nathan Fontenoted651a12017-05-03 14:04:38 -04004595 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4596 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004597 else
4598 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004599}
4600
Thomas Falconf8136142018-01-29 13:45:05 -06004601static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4602 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004603{
4604 struct net_device *netdev = adapter->netdev;
4605 struct device *dev = &adapter->vdev->dev;
4606 long rc;
4607
4608 rc = crq->change_mac_addr_rsp.rc.code;
4609 if (rc) {
4610 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004611 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004612 }
Lijun Pand9b0e592020-10-20 17:39:19 -05004613 /* crq->change_mac_addr.mac_addr is the requested one
4614 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4615 */
Thomas Falcon62740e92019-05-09 23:13:43 -05004616 ether_addr_copy(netdev->dev_addr,
4617 &crq->change_mac_addr_rsp.mac_addr[0]);
Lijun Pand9b0e592020-10-20 17:39:19 -05004618 ether_addr_copy(adapter->mac_addr,
4619 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004620out:
4621 complete(&adapter->fw_done);
4622 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004623}
4624
4625static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4626 struct ibmvnic_adapter *adapter)
4627{
4628 struct device *dev = &adapter->vdev->dev;
4629 u64 *req_value;
4630 char *name;
4631
Thomas Falcon901e0402017-02-15 12:17:59 -06004632 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004633 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4634 case REQ_TX_QUEUES:
4635 req_value = &adapter->req_tx_queues;
4636 name = "tx";
4637 break;
4638 case REQ_RX_QUEUES:
4639 req_value = &adapter->req_rx_queues;
4640 name = "rx";
4641 break;
4642 case REQ_RX_ADD_QUEUES:
4643 req_value = &adapter->req_rx_add_queues;
4644 name = "rx_add";
4645 break;
4646 case REQ_TX_ENTRIES_PER_SUBCRQ:
4647 req_value = &adapter->req_tx_entries_per_subcrq;
4648 name = "tx_entries_per_subcrq";
4649 break;
4650 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4651 req_value = &adapter->req_rx_add_entries_per_subcrq;
4652 name = "rx_add_entries_per_subcrq";
4653 break;
4654 case REQ_MTU:
4655 req_value = &adapter->req_mtu;
4656 name = "mtu";
4657 break;
4658 case PROMISC_REQUESTED:
4659 req_value = &adapter->promisc;
4660 name = "promisc";
4661 break;
4662 default:
4663 dev_err(dev, "Got invalid cap request rsp %d\n",
4664 crq->request_capability.capability);
4665 return;
4666 }
4667
4668 switch (crq->request_capability_rsp.rc.code) {
4669 case SUCCESS:
4670 break;
4671 case PARTIALSUCCESS:
4672 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4673 *req_value,
Lijun Pan914789a2021-02-11 00:43:21 -06004674 (long)be64_to_cpu(crq->request_capability_rsp.number),
4675 name);
John Allene7913802018-01-18 16:27:12 -06004676
4677 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4678 REQ_MTU) {
4679 pr_err("mtu of %llu is not supported. Reverting.\n",
4680 *req_value);
4681 *req_value = adapter->fallback.mtu;
4682 } else {
4683 *req_value =
4684 be64_to_cpu(crq->request_capability_rsp.number);
4685 }
4686
Lijun Pan09081b92020-09-27 20:13:27 -05004687 send_request_cap(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004688 return;
4689 default:
4690 dev_err(dev, "Error %d in request cap rsp\n",
4691 crq->request_capability_rsp.rc.code);
4692 return;
4693 }
4694
4695 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004696 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon249168a2017-02-15 12:18:00 -06004697 adapter->wait_capability = false;
Lijun Pan16e811f2020-09-27 20:13:29 -05004698 send_query_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004699 }
4700}
4701
4702static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4703 struct ibmvnic_adapter *adapter)
4704{
4705 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004706 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004707 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4708 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004709 u64 *tx_handle_array;
4710 u64 *rx_handle_array;
4711 int num_tx_pools;
4712 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004713 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004714 int i;
4715
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06004716 /* CHECK: Test/set of login_pending does not need to be atomic
4717 * because only ibmvnic_tasklet tests/clears this.
4718 */
4719 if (!adapter->login_pending) {
4720 netdev_warn(netdev, "Ignoring unexpected login response\n");
4721 return 0;
4722 }
4723 adapter->login_pending = false;
4724
Thomas Falcon032c5e82015-12-21 11:26:06 -06004725 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004726 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004727 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004728 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004729
John Allen498cd8e2016-04-06 11:49:55 -05004730 /* If the number of queues requested can't be allocated by the
4731 * server, the login response will return with code 1. We will need
4732 * to resend the login buffer with fewer queues requested.
4733 */
4734 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004735 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004736 complete(&adapter->init_done);
4737 return 0;
4738 }
4739
John Allenc26eba02017-10-26 16:23:25 -05004740 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4741
Thomas Falcon032c5e82015-12-21 11:26:06 -06004742 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4743 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4744 netdev_dbg(adapter->netdev, "%016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06004745 ((unsigned long *)(adapter->login_rsp_buf))[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004746 }
4747
4748 /* Sanity checks */
4749 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4750 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4751 adapter->req_rx_add_queues !=
4752 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4753 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
Dany Madden31d6b402020-11-25 18:04:24 -06004754 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004755 return -EIO;
4756 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004757 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4758 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4759 /* variable buffer sizes are not supported, so just read the
4760 * first entry.
4761 */
4762 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004763
4764 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4765 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4766
4767 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4768 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4769 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4770 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4771
4772 for (i = 0; i < num_tx_pools; i++)
4773 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4774
4775 for (i = 0; i < num_rx_pools; i++)
4776 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4777
Thomas Falcon507ebe62020-08-21 13:39:01 -05004778 adapter->num_active_tx_scrqs = num_tx_pools;
4779 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004780 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004781 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004782 complete(&adapter->init_done);
4783
Thomas Falcon032c5e82015-12-21 11:26:06 -06004784 return 0;
4785}
4786
Thomas Falcon032c5e82015-12-21 11:26:06 -06004787static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4788 struct ibmvnic_adapter *adapter)
4789{
4790 struct device *dev = &adapter->vdev->dev;
4791 long rc;
4792
4793 rc = crq->request_unmap_rsp.rc.code;
4794 if (rc)
4795 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4796}
4797
4798static void handle_query_map_rsp(union ibmvnic_crq *crq,
4799 struct ibmvnic_adapter *adapter)
4800{
4801 struct net_device *netdev = adapter->netdev;
4802 struct device *dev = &adapter->vdev->dev;
4803 long rc;
4804
4805 rc = crq->query_map_rsp.rc.code;
4806 if (rc) {
4807 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4808 return;
4809 }
Sukadev Bhattiprolu0f2bf312021-09-14 20:52:52 -07004810 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
4811 crq->query_map_rsp.page_size,
4812 __be32_to_cpu(crq->query_map_rsp.tot_pages),
4813 __be32_to_cpu(crq->query_map_rsp.free_pages));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004814}
4815
4816static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4817 struct ibmvnic_adapter *adapter)
4818{
4819 struct net_device *netdev = adapter->netdev;
4820 struct device *dev = &adapter->vdev->dev;
4821 long rc;
4822
Thomas Falcon901e0402017-02-15 12:17:59 -06004823 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004824 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004825 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004826 rc = crq->query_capability.rc.code;
4827 if (rc) {
4828 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4829 goto out;
4830 }
4831
4832 switch (be16_to_cpu(crq->query_capability.capability)) {
4833 case MIN_TX_QUEUES:
4834 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004835 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004836 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4837 adapter->min_tx_queues);
4838 break;
4839 case MIN_RX_QUEUES:
4840 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004841 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004842 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4843 adapter->min_rx_queues);
4844 break;
4845 case MIN_RX_ADD_QUEUES:
4846 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004847 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004848 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4849 adapter->min_rx_add_queues);
4850 break;
4851 case MAX_TX_QUEUES:
4852 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004853 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004854 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4855 adapter->max_tx_queues);
4856 break;
4857 case MAX_RX_QUEUES:
4858 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004859 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004860 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4861 adapter->max_rx_queues);
4862 break;
4863 case MAX_RX_ADD_QUEUES:
4864 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004865 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004866 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4867 adapter->max_rx_add_queues);
4868 break;
4869 case MIN_TX_ENTRIES_PER_SUBCRQ:
4870 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004871 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004872 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4873 adapter->min_tx_entries_per_subcrq);
4874 break;
4875 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4876 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004877 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004878 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4879 adapter->min_rx_add_entries_per_subcrq);
4880 break;
4881 case MAX_TX_ENTRIES_PER_SUBCRQ:
4882 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004883 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004884 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4885 adapter->max_tx_entries_per_subcrq);
4886 break;
4887 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4888 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004889 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004890 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4891 adapter->max_rx_add_entries_per_subcrq);
4892 break;
4893 case TCP_IP_OFFLOAD:
4894 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004895 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004896 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4897 adapter->tcp_ip_offload);
4898 break;
4899 case PROMISC_SUPPORTED:
4900 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004901 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004902 netdev_dbg(netdev, "promisc_supported = %lld\n",
4903 adapter->promisc_supported);
4904 break;
4905 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004906 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004907 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004908 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4909 break;
4910 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004911 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004912 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004913 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4914 break;
4915 case MAX_MULTICAST_FILTERS:
4916 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004917 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004918 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4919 adapter->max_multicast_filters);
4920 break;
4921 case VLAN_HEADER_INSERTION:
4922 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004923 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004924 if (adapter->vlan_header_insertion)
4925 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4926 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4927 adapter->vlan_header_insertion);
4928 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004929 case RX_VLAN_HEADER_INSERTION:
4930 adapter->rx_vlan_header_insertion =
4931 be64_to_cpu(crq->query_capability.number);
4932 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4933 adapter->rx_vlan_header_insertion);
4934 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004935 case MAX_TX_SG_ENTRIES:
4936 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004937 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004938 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4939 adapter->max_tx_sg_entries);
4940 break;
4941 case RX_SG_SUPPORTED:
4942 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004943 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004944 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4945 adapter->rx_sg_supported);
4946 break;
4947 case OPT_TX_COMP_SUB_QUEUES:
4948 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004949 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004950 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4951 adapter->opt_tx_comp_sub_queues);
4952 break;
4953 case OPT_RX_COMP_QUEUES:
4954 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004955 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004956 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4957 adapter->opt_rx_comp_queues);
4958 break;
4959 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4960 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004961 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004962 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4963 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4964 break;
4965 case OPT_TX_ENTRIES_PER_SUBCRQ:
4966 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004967 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004968 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4969 adapter->opt_tx_entries_per_subcrq);
4970 break;
4971 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4972 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004973 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004974 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4975 adapter->opt_rxba_entries_per_subcrq);
4976 break;
4977 case TX_RX_DESC_REQ:
4978 adapter->tx_rx_desc_req = crq->query_capability.number;
4979 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4980 adapter->tx_rx_desc_req);
4981 break;
4982
4983 default:
4984 netdev_err(netdev, "Got invalid cap rsp %d\n",
4985 crq->query_capability.capability);
4986 }
4987
4988out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004989 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4990 adapter->wait_capability = false;
Lijun Pan09081b92020-09-27 20:13:27 -05004991 send_request_cap(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004992 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004993}
4994
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004995static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4996{
4997 union ibmvnic_crq crq;
4998 int rc;
4999
5000 memset(&crq, 0, sizeof(crq));
5001 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5002 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005003
5004 mutex_lock(&adapter->fw_lock);
5005 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06005006 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005007
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005008 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005009 if (rc) {
5010 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005011 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005012 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06005013
5014 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005015 if (rc) {
5016 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06005017 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06005018 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06005019
Thomas Falconff25dcb2019-11-25 17:12:56 -06005020 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005021 return adapter->fw_done_rc ? -EIO : 0;
5022}
5023
5024static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5025 struct ibmvnic_adapter *adapter)
5026{
5027 struct net_device *netdev = adapter->netdev;
5028 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005029 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005030
5031 rc = crq->query_phys_parms_rsp.rc.code;
5032 if (rc) {
5033 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5034 return rc;
5035 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005036 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005037 case IBMVNIC_10MBPS:
5038 adapter->speed = SPEED_10;
5039 break;
5040 case IBMVNIC_100MBPS:
5041 adapter->speed = SPEED_100;
5042 break;
5043 case IBMVNIC_1GBPS:
5044 adapter->speed = SPEED_1000;
5045 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05005046 case IBMVNIC_10GBPS:
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005047 adapter->speed = SPEED_10000;
5048 break;
5049 case IBMVNIC_25GBPS:
5050 adapter->speed = SPEED_25000;
5051 break;
5052 case IBMVNIC_40GBPS:
5053 adapter->speed = SPEED_40000;
5054 break;
5055 case IBMVNIC_50GBPS:
5056 adapter->speed = SPEED_50000;
5057 break;
5058 case IBMVNIC_100GBPS:
5059 adapter->speed = SPEED_100000;
5060 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05005061 case IBMVNIC_200GBPS:
5062 adapter->speed = SPEED_200000;
5063 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005064 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03005065 if (netif_carrier_ok(netdev))
5066 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005067 adapter->speed = SPEED_UNKNOWN;
5068 }
5069 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5070 adapter->duplex = DUPLEX_FULL;
5071 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5072 adapter->duplex = DUPLEX_HALF;
5073 else
5074 adapter->duplex = DUPLEX_UNKNOWN;
5075
5076 return rc;
5077}
5078
Thomas Falcon032c5e82015-12-21 11:26:06 -06005079static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5080 struct ibmvnic_adapter *adapter)
5081{
5082 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5083 struct net_device *netdev = adapter->netdev;
5084 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04005085 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005086 long rc;
5087
5088 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Lijun Pan429aa362021-02-11 00:43:18 -06005089 (unsigned long)cpu_to_be64(u64_crq[0]),
5090 (unsigned long)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06005091 switch (gen_crq->first) {
5092 case IBMVNIC_CRQ_INIT_RSP:
5093 switch (gen_crq->cmd) {
5094 case IBMVNIC_CRQ_INIT:
5095 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04005096 adapter->from_passive_init = true;
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06005097 /* Discard any stale login responses from prev reset.
5098 * CHECK: should we clear even on INIT_COMPLETE?
5099 */
5100 adapter->login_pending = false;
5101
Thomas Falcon17c87052018-05-23 13:37:58 -05005102 if (!completion_done(&adapter->init_done)) {
5103 complete(&adapter->init_done);
5104 adapter->init_done_rc = -EIO;
5105 }
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005106
5107 if (adapter->state == VNIC_DOWN)
5108 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5109 else
5110 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5111
Sukadev Bhattiproluef66a1e2021-02-02 21:08:02 -08005112 if (rc && rc != -EBUSY) {
5113 /* We were unable to schedule the failover
5114 * reset either because the adapter was still
5115 * probing (eg: during kexec) or we could not
5116 * allocate memory. Clear the failover_pending
5117 * flag since no one else will. We ignore
5118 * EBUSY because it means either FAILOVER reset
5119 * is already scheduled or the adapter is
5120 * being removed.
5121 */
5122 netdev_err(netdev,
5123 "Error %ld scheduling failover reset\n",
5124 rc);
5125 adapter->failover_pending = false;
5126 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06005127 break;
5128 case IBMVNIC_CRQ_INIT_COMPLETE:
5129 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05005130 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005131 send_version_xchg(adapter);
5132 break;
5133 default:
5134 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5135 }
5136 return;
5137 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04005138 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05005139 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06005140 /* terminate any thread waiting for a response
5141 * from the device
5142 */
5143 if (!completion_done(&adapter->fw_done)) {
5144 adapter->fw_done_rc = -EIO;
5145 complete(&adapter->fw_done);
5146 }
5147 if (!completion_done(&adapter->stats_done))
5148 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005149 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05005150 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005151 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04005152 dev_info(dev, "Migrated, re-enabling adapter\n");
5153 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05005154 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5155 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05005156 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005157 } else {
5158 /* The adapter lost the connection */
5159 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5160 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005161 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005162 }
5163 return;
5164 case IBMVNIC_CRQ_CMD_RSP:
5165 break;
5166 default:
5167 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5168 gen_crq->first);
5169 return;
5170 }
5171
5172 switch (gen_crq->cmd) {
5173 case VERSION_EXCHANGE_RSP:
5174 rc = crq->version_exchange_rsp.rc.code;
5175 if (rc) {
5176 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5177 break;
5178 }
Thomas Falcon78468892020-05-28 11:19:17 -05005179 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06005180 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05005181 dev_info(dev, "Partner protocol version is %d\n",
5182 ibmvnic_version);
Lijun Pan491099a2020-09-27 20:13:26 -05005183 send_query_cap(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005184 break;
5185 case QUERY_CAPABILITY_RSP:
5186 handle_query_cap_rsp(crq, adapter);
5187 break;
5188 case QUERY_MAP_RSP:
5189 handle_query_map_rsp(crq, adapter);
5190 break;
5191 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05005192 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5193 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005194 break;
5195 case REQUEST_UNMAP_RSP:
5196 handle_request_unmap_rsp(crq, adapter);
5197 break;
5198 case REQUEST_CAPABILITY_RSP:
5199 handle_request_cap_rsp(crq, adapter);
5200 break;
5201 case LOGIN_RSP:
5202 netdev_dbg(netdev, "Got Login Response\n");
5203 handle_login_rsp(crq, adapter);
5204 break;
5205 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04005206 netdev_dbg(netdev,
5207 "Got Logical Link State Response, state: %d rc: %d\n",
5208 crq->logical_link_state_rsp.link_state,
5209 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005210 adapter->logical_link_state =
5211 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04005212 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5213 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005214 break;
5215 case LINK_STATE_INDICATION:
5216 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5217 adapter->phys_link_state =
5218 crq->link_state_indication.phys_link_state;
5219 adapter->logical_link_state =
5220 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05005221 if (adapter->phys_link_state && adapter->logical_link_state)
5222 netif_carrier_on(netdev);
5223 else
5224 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005225 break;
5226 case CHANGE_MAC_ADDR_RSP:
5227 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06005228 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005229 break;
5230 case ERROR_INDICATION:
5231 netdev_dbg(netdev, "Got Error Indication\n");
5232 handle_error_indication(crq, adapter);
5233 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005234 case REQUEST_STATISTICS_RSP:
5235 netdev_dbg(netdev, "Got Statistics Response\n");
5236 complete(&adapter->stats_done);
5237 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005238 case QUERY_IP_OFFLOAD_RSP:
5239 netdev_dbg(netdev, "Got Query IP offload Response\n");
5240 handle_query_ip_offload_rsp(adapter);
5241 break;
5242 case MULTICAST_CTRL_RSP:
5243 netdev_dbg(netdev, "Got multicast control Response\n");
5244 break;
5245 case CONTROL_IP_OFFLOAD_RSP:
5246 netdev_dbg(netdev, "Got Control IP offload Response\n");
5247 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5248 sizeof(adapter->ip_offload_ctrl),
5249 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05005250 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005251 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005252 case COLLECT_FW_TRACE_RSP:
5253 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5254 complete(&adapter->fw_done);
5255 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02005256 case GET_VPD_SIZE_RSP:
5257 handle_vpd_size_rsp(crq, adapter);
5258 break;
5259 case GET_VPD_RSP:
5260 handle_vpd_rsp(crq, adapter);
5261 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03005262 case QUERY_PHYS_PARMS_RSP:
5263 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5264 complete(&adapter->fw_done);
5265 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005266 default:
5267 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5268 gen_crq->cmd);
5269 }
5270}
5271
5272static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5273{
5274 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06005275
Thomas Falcon6c267b32017-02-15 12:17:58 -06005276 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005277 return IRQ_HANDLED;
5278}
5279
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305280static void ibmvnic_tasklet(struct tasklet_struct *t)
Thomas Falcon6c267b32017-02-15 12:17:58 -06005281{
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305282 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005283 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005284 union ibmvnic_crq *crq;
5285 unsigned long flags;
5286 bool done = false;
5287
5288 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005289 while (!done) {
5290 /* Pull all the valid messages off the CRQ */
5291 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
Lijun Pane41aec72021-01-27 19:34:42 -06005292 /* This barrier makes sure ibmvnic_next_crq()'s
5293 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5294 * before ibmvnic_handle_crq()'s
5295 * switch(gen_crq->first) and switch(gen_crq->cmd).
5296 */
5297 dma_rmb();
Thomas Falcon032c5e82015-12-21 11:26:06 -06005298 ibmvnic_handle_crq(crq, adapter);
5299 crq->generic.first = 0;
5300 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04005301
5302 /* remain in tasklet until all
5303 * capabilities responses are received
5304 */
5305 if (!adapter->wait_capability)
5306 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005307 }
Thomas Falcon249168a2017-02-15 12:18:00 -06005308 /* if capabilities CRQ's were sent in this tasklet, the following
5309 * tasklet must wait until all responses are received
5310 */
5311 if (atomic_read(&adapter->running_cap_crqs) != 0)
5312 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005313 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005314}
5315
5316static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5317{
5318 struct vio_dev *vdev = adapter->vdev;
5319 int rc;
5320
5321 do {
5322 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5323 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5324
5325 if (rc)
5326 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5327
5328 return rc;
5329}
5330
5331static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5332{
5333 struct ibmvnic_crq_queue *crq = &adapter->crq;
5334 struct device *dev = &adapter->vdev->dev;
5335 struct vio_dev *vdev = adapter->vdev;
5336 int rc;
5337
5338 /* Close the CRQ */
5339 do {
5340 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5341 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5342
5343 /* Clean out the queue */
Lijun Pan0e435be2020-11-23 13:35:46 -06005344 if (!crq->msgs)
5345 return -EINVAL;
5346
Thomas Falcon032c5e82015-12-21 11:26:06 -06005347 memset(crq->msgs, 0, PAGE_SIZE);
5348 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05005349 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005350
5351 /* And re-open it again */
5352 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5353 crq->msg_token, PAGE_SIZE);
5354
5355 if (rc == H_CLOSED)
5356 /* Adapter is good, but other end is not ready */
5357 dev_warn(dev, "Partner adapter not ready\n");
5358 else if (rc != 0)
5359 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5360
5361 return rc;
5362}
5363
Nathan Fontenotf9928872017-03-30 02:48:54 -04005364static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005365{
5366 struct ibmvnic_crq_queue *crq = &adapter->crq;
5367 struct vio_dev *vdev = adapter->vdev;
5368 long rc;
5369
Nathan Fontenotf9928872017-03-30 02:48:54 -04005370 if (!crq->msgs)
5371 return;
5372
Thomas Falcon032c5e82015-12-21 11:26:06 -06005373 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5374 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005375 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005376 do {
5377 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5378 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5379
5380 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5381 DMA_BIDIRECTIONAL);
5382 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005383 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05005384 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005385}
5386
Nathan Fontenotf9928872017-03-30 02:48:54 -04005387static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005388{
5389 struct ibmvnic_crq_queue *crq = &adapter->crq;
5390 struct device *dev = &adapter->vdev->dev;
5391 struct vio_dev *vdev = adapter->vdev;
5392 int rc, retrc = -ENOMEM;
5393
Nathan Fontenotf9928872017-03-30 02:48:54 -04005394 if (crq->msgs)
5395 return 0;
5396
Thomas Falcon032c5e82015-12-21 11:26:06 -06005397 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5398 /* Should we allocate more than one page? */
5399
5400 if (!crq->msgs)
5401 return -ENOMEM;
5402
5403 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5404 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5405 DMA_BIDIRECTIONAL);
5406 if (dma_mapping_error(dev, crq->msg_token))
5407 goto map_failed;
5408
5409 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5410 crq->msg_token, PAGE_SIZE);
5411
5412 if (rc == H_RESOURCE)
5413 /* maybe kexecing and resource is busy. try a reset */
5414 rc = ibmvnic_reset_crq(adapter);
5415 retrc = rc;
5416
5417 if (rc == H_CLOSED) {
5418 dev_warn(dev, "Partner adapter not ready\n");
5419 } else if (rc) {
5420 dev_warn(dev, "Error %d opening adapter\n", rc);
5421 goto reg_crq_failed;
5422 }
5423
5424 retrc = 0;
5425
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305426 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005427
Thomas Falcon032c5e82015-12-21 11:26:06 -06005428 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03005429 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5430 adapter->vdev->unit_address);
5431 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005432 if (rc) {
5433 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5434 vdev->irq, rc);
5435 goto req_irq_failed;
5436 }
5437
5438 rc = vio_enable_interrupts(vdev);
5439 if (rc) {
5440 dev_err(dev, "Error %d enabling interrupts\n", rc);
5441 goto req_irq_failed;
5442 }
5443
5444 crq->cur = 0;
5445 spin_lock_init(&crq->lock);
5446
5447 return retrc;
5448
5449req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06005450 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005451 do {
5452 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5453 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5454reg_crq_failed:
5455 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5456map_failed:
5457 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005458 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005459 return retrc;
5460}
5461
Lijun Pan635e4422020-08-19 17:52:26 -05005462static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005463{
5464 struct device *dev = &adapter->vdev->dev;
Dany Madden98c41f02020-11-25 18:04:32 -06005465 unsigned long timeout = msecs_to_jiffies(20000);
Michal Suchanek6881b072021-03-02 20:47:47 +01005466 u64 old_num_rx_queues = adapter->req_rx_queues;
5467 u64 old_num_tx_queues = adapter->req_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005468 int rc;
5469
John Allen017892c12017-05-26 10:30:19 -04005470 adapter->from_passive_init = false;
5471
Michal Suchanek6881b072021-03-02 20:47:47 +01005472 if (reset)
Lijun Pan635e4422020-08-19 17:52:26 -05005473 reinit_completion(&adapter->init_done);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005474
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005475 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005476 rc = ibmvnic_send_crq_init(adapter);
5477 if (rc) {
5478 dev_err(dev, "Send crq init failed with error %d\n", rc);
5479 return rc;
5480 }
5481
John Allenf6ef6402017-03-17 17:13:42 -05005482 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5483 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04005484 return -1;
5485 }
5486
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005487 if (adapter->init_done_rc) {
5488 release_crq_queue(adapter);
5489 return adapter->init_done_rc;
5490 }
5491
Lijun Pan785a2b12020-09-17 21:12:46 -05005492 if (adapter->from_passive_init) {
5493 adapter->state = VNIC_OPEN;
5494 adapter->from_passive_init = false;
5495 return -1;
5496 }
5497
Lijun Pan635e4422020-08-19 17:52:26 -05005498 if (reset &&
5499 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005500 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005501 if (adapter->req_rx_queues != old_num_rx_queues ||
5502 adapter->req_tx_queues != old_num_tx_queues) {
5503 release_sub_crqs(adapter, 0);
5504 rc = init_sub_crqs(adapter);
5505 } else {
5506 rc = reset_sub_crq_queues(adapter);
5507 }
5508 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005509 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005510 }
5511
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005512 if (rc) {
5513 dev_err(dev, "Initialization of sub crqs failed\n");
5514 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005515 return rc;
5516 }
5517
5518 rc = init_sub_crq_irqs(adapter);
5519 if (rc) {
5520 dev_err(dev, "Failed to initialize sub crq irqs\n");
5521 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005522 }
5523
5524 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005525}
5526
Thomas Falcon40c9db82017-06-12 12:35:04 -05005527static struct device_attribute dev_attr_failover;
5528
Thomas Falcon032c5e82015-12-21 11:26:06 -06005529static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5530{
5531 struct ibmvnic_adapter *adapter;
5532 struct net_device *netdev;
5533 unsigned char *mac_addr_p;
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005534 bool init_success;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005535 int rc;
5536
5537 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5538 dev->unit_address);
5539
5540 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5541 VETH_MAC_ADDR, NULL);
5542 if (!mac_addr_p) {
5543 dev_err(&dev->dev,
5544 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5545 __FILE__, __LINE__);
5546 return 0;
5547 }
5548
5549 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005550 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005551 if (!netdev)
5552 return -ENOMEM;
5553
5554 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005555 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005556 dev_set_drvdata(&dev->dev, netdev);
5557 adapter->vdev = dev;
5558 adapter->netdev = netdev;
Sukadev Bhattiprolu76cdc5c2020-11-25 18:04:29 -06005559 adapter->login_pending = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005560
5561 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5562 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5563 netdev->irq = dev->irq;
5564 netdev->netdev_ops = &ibmvnic_netdev_ops;
5565 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5566 SET_NETDEV_DEV(netdev, &dev->dev);
5567
Nathan Fontenoted651a12017-05-03 14:04:38 -04005568 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005569 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5570 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005571 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005572 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005573 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005574 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005575 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005576 init_completion(&adapter->fw_done);
5577 init_completion(&adapter->reset_done);
5578 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005579 clear_bit(0, &adapter->resetting);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005580
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005581 init_success = false;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005582 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005583 rc = init_crq_queue(adapter);
5584 if (rc) {
5585 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5586 rc);
5587 goto ibmvnic_init_fail;
5588 }
5589
Lijun Pan635e4422020-08-19 17:52:26 -05005590 rc = ibmvnic_reset_init(adapter, false);
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005591 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005592
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005593 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5594 * partner is not ready. CRQ is not active. When the partner becomes
5595 * ready, we will do the passive init reset.
5596 */
5597
5598 if (!rc)
5599 init_success = true;
5600
Thomas Falcon07184212018-05-16 15:49:05 -05005601 rc = init_stats_buffers(adapter);
5602 if (rc)
5603 goto ibmvnic_init_fail;
5604
5605 rc = init_stats_token(adapter);
5606 if (rc)
5607 goto ibmvnic_stats_fail;
5608
Thomas Falcon40c9db82017-06-12 12:35:04 -05005609 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005610 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005611 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005612
Mick Tarsele876a8a2017-09-28 13:53:18 -07005613 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005614 rc = register_netdev(netdev);
5615 if (rc) {
5616 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005617 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005618 }
5619 dev_info(&dev->dev, "ibmvnic registered\n");
5620
Cristobal Forno53f8b1b2021-06-10 11:08:35 -06005621 if (init_success) {
5622 adapter->state = VNIC_PROBED;
5623 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5624 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5625 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5626 } else {
5627 adapter->state = VNIC_DOWN;
5628 }
John Allenc26eba02017-10-26 16:23:25 -05005629
5630 adapter->wait_for_reset = false;
Dany Maddena86d5c62020-11-25 18:04:31 -06005631 adapter->last_reset_time = jiffies;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005632 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005633
5634ibmvnic_register_fail:
5635 device_remove_file(&dev->dev, &dev_attr_failover);
5636
Thomas Falcon07184212018-05-16 15:49:05 -05005637ibmvnic_dev_file_err:
5638 release_stats_token(adapter);
5639
5640ibmvnic_stats_fail:
5641 release_stats_buffers(adapter);
5642
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005643ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005644 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005645 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005646 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005647 free_netdev(netdev);
5648
5649 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005650}
5651
Uwe Kleine-König386a9662021-02-25 23:18:34 +01005652static void ibmvnic_remove(struct vio_dev *dev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005653{
5654 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005655 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005656 unsigned long flags;
5657
5658 spin_lock_irqsave(&adapter->state_lock, flags);
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08005659
5660 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5661 * finish. Then, set the state to REMOVING to prevent it from
5662 * scheduling any more work and to have reset functions ignore
5663 * any resets that have already been scheduled. Drop the lock
5664 * after setting state, so __ibmvnic_reset() which is called
5665 * from the flush_work() below, can make progress.
5666 */
Junlin Yang69cdb792021-03-05 16:48:39 +08005667 spin_lock(&adapter->rwi_lock);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005668 adapter->state = VNIC_REMOVING;
Junlin Yang69cdb792021-03-05 16:48:39 +08005669 spin_unlock(&adapter->rwi_lock);
Sukadev Bhattiprolu4a41c422021-02-12 20:42:50 -08005670
Juliet Kim7d7195a2020-03-10 09:23:58 -05005671 spin_unlock_irqrestore(&adapter->state_lock, flags);
5672
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005673 flush_work(&adapter->ibmvnic_reset);
5674 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5675
Juliet Kima5681e22018-11-19 15:59:22 -06005676 rtnl_lock();
5677 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005678
5679 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005680 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005681 release_crq_queue(adapter);
5682
Thomas Falcon53cc7722018-02-26 18:10:56 -06005683 release_stats_token(adapter);
5684 release_stats_buffers(adapter);
5685
Nathan Fontenot90c80142017-05-03 14:04:32 -04005686 adapter->state = VNIC_REMOVED;
5687
Juliet Kima5681e22018-11-19 15:59:22 -06005688 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005689 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005690 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005691 free_netdev(netdev);
5692 dev_set_drvdata(&dev->dev, NULL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005693}
5694
Thomas Falcon40c9db82017-06-12 12:35:04 -05005695static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5696 const char *buf, size_t count)
5697{
5698 struct net_device *netdev = dev_get_drvdata(dev);
5699 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5700 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5701 __be64 session_token;
5702 long rc;
5703
5704 if (!sysfs_streq(buf, "1"))
5705 return -EINVAL;
5706
5707 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5708 H_GET_SESSION_TOKEN, 0, 0, 0);
5709 if (rc) {
5710 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5711 rc);
Lijun Pan334c4242021-04-13 03:31:44 -05005712 goto last_resort;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005713 }
5714
5715 session_token = (__be64)retbuf[0];
5716 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5717 be64_to_cpu(session_token));
5718 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5719 H_SESSION_ERR_DETECTED, session_token, 0, 0);
Lijun Pan334c4242021-04-13 03:31:44 -05005720 if (rc)
5721 netdev_err(netdev,
5722 "H_VIOCTL initiated failover failed, rc %ld\n",
Thomas Falcon40c9db82017-06-12 12:35:04 -05005723 rc);
Lijun Pan334c4242021-04-13 03:31:44 -05005724
5725last_resort:
5726 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5727 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005728
5729 return count;
5730}
Joe Perches6cbaefb2017-12-19 10:15:09 -08005731static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005732
Thomas Falcon032c5e82015-12-21 11:26:06 -06005733static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5734{
5735 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5736 struct ibmvnic_adapter *adapter;
5737 struct iommu_table *tbl;
5738 unsigned long ret = 0;
5739 int i;
5740
5741 tbl = get_iommu_table_base(&vdev->dev);
5742
5743 /* netdev inits at probe time along with the structures we need below*/
5744 if (!netdev)
5745 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5746
5747 adapter = netdev_priv(netdev);
5748
5749 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005750 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5751
5752 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5753 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5754
Thomas Falcon507ebe62020-08-21 13:39:01 -05005755 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005756 ret += adapter->rx_pool[i].size *
5757 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5758
5759 return ret;
5760}
5761
5762static int ibmvnic_resume(struct device *dev)
5763{
5764 struct net_device *netdev = dev_get_drvdata(dev);
5765 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005766
John Allencb89ba22017-06-19 11:27:53 -05005767 if (adapter->state != VNIC_OPEN)
5768 return 0;
5769
John Allena2488782017-07-24 13:26:06 -05005770 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005771
5772 return 0;
5773}
5774
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305775static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005776 {"network", "IBM,vnic"},
5777 {"", "" }
5778};
5779MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5780
5781static const struct dev_pm_ops ibmvnic_pm_ops = {
5782 .resume = ibmvnic_resume
5783};
5784
5785static struct vio_driver ibmvnic_driver = {
5786 .id_table = ibmvnic_device_table,
5787 .probe = ibmvnic_probe,
5788 .remove = ibmvnic_remove,
5789 .get_desired_dma = ibmvnic_get_desired_dma,
5790 .name = ibmvnic_driver_name,
5791 .pm = &ibmvnic_pm_ops,
5792};
5793
5794/* module functions */
5795static int __init ibmvnic_module_init(void)
5796{
5797 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5798 IBMVNIC_DRIVER_VERSION);
5799
5800 return vio_register_driver(&ibmvnic_driver);
5801}
5802
5803static void __exit ibmvnic_module_exit(void)
5804{
5805 vio_unregister_driver(&ibmvnic_driver);
5806}
5807
5808module_init(ibmvnic_module_init);
5809module_exit(ibmvnic_module_exit);