blob: d4d40b34978862366002a56e808c41d01d84e41c [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060082static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060083static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050089static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060090static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99static int ibmvnic_poll(struct napi_struct *napi, int data);
Lijun Pan69980d02020-09-27 20:13:28 -0500100static void send_query_map(struct ibmvnic_adapter *adapter);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600103static int send_login(struct ibmvnic_adapter *adapter);
Lijun Pan491099a2020-09-27 20:13:26 -0500104static void send_query_cap(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500105static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500107static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400108static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500109static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500110static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300111static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600112
113struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
115 int offset;
116};
117
118#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121
122static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145};
146
147static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
149 unsigned long *irq)
150{
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 long rc;
153
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 *number = retbuf[0];
156 *irq = retbuf[1];
157
158 return rc;
159}
160
Thomas Falcon476d96c2019-11-25 17:12:55 -0600161/**
162 * ibmvnic_wait_for_completion - Check device state and wait for completion
163 * @adapter: private device data
164 * @comp_done: completion structure to wait for
165 * @timeout: time to wait in milliseconds
166 *
167 * Wait for a completion signal or until the timeout limit is reached
168 * while checking that the device is still active.
169 */
170static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171 struct completion *comp_done,
172 unsigned long timeout)
173{
174 struct net_device *netdev;
175 unsigned long div_timeout;
176 u8 retry;
177
178 netdev = adapter->netdev;
179 retry = 5;
180 div_timeout = msecs_to_jiffies(timeout / retry);
181 while (true) {
182 if (!adapter->crq.active) {
183 netdev_err(netdev, "Device down!\n");
184 return -ENODEV;
185 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600186 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600187 break;
188 if (wait_for_completion_timeout(comp_done, div_timeout))
189 return 0;
190 }
191 netdev_err(netdev, "Operation timed out.\n");
192 return -ETIMEDOUT;
193}
194
Thomas Falcon032c5e82015-12-21 11:26:06 -0600195static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196 struct ibmvnic_long_term_buff *ltb, int size)
197{
198 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500199 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600200
201 ltb->size = size;
202 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
203 GFP_KERNEL);
204
205 if (!ltb->buff) {
206 dev_err(dev, "Couldn't alloc long term buffer\n");
207 return -ENOMEM;
208 }
209 ltb->map_id = adapter->map_id;
210 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500211
Thomas Falconff25dcb2019-11-25 17:12:56 -0600212 mutex_lock(&adapter->fw_lock);
213 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600214 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500215 rc = send_request_map(adapter, ltb->addr,
216 ltb->size, ltb->map_id);
217 if (rc) {
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600219 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500220 return rc;
221 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600222
223 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224 if (rc) {
225 dev_err(dev,
226 "Long term map request aborted or timed out,rc = %d\n",
227 rc);
228 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600229 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600230 return rc;
231 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500232
233 if (adapter->fw_done_rc) {
234 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
235 adapter->fw_done_rc);
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500236 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600237 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500238 return -1;
239 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600240 mutex_unlock(&adapter->fw_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600241 return 0;
242}
243
244static void free_long_term_buff(struct ibmvnic_adapter *adapter,
245 struct ibmvnic_long_term_buff *ltb)
246{
247 struct device *dev = &adapter->vdev->dev;
248
Nathan Fontenotc657e322017-03-30 02:49:06 -0400249 if (!ltb->buff)
250 return;
251
Nathan Fontenoted651a12017-05-03 14:04:38 -0400252 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
253 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500254 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400255 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600256}
257
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500258static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
259 struct ibmvnic_long_term_buff *ltb)
260{
Thomas Falcon476d96c2019-11-25 17:12:55 -0600261 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500262 int rc;
263
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500264 memset(ltb->buff, 0, ltb->size);
265
Thomas Falconff25dcb2019-11-25 17:12:56 -0600266 mutex_lock(&adapter->fw_lock);
267 adapter->fw_done_rc = 0;
268
Thomas Falcon070eca92019-11-25 17:12:53 -0600269 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500270 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600271 if (rc) {
272 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500273 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -0600274 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600275
276 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
277 if (rc) {
278 dev_info(dev,
279 "Reset failed, long term map request timed out or aborted\n");
Thomas Falconff25dcb2019-11-25 17:12:56 -0600280 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600281 return rc;
282 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500283
284 if (adapter->fw_done_rc) {
Thomas Falcon476d96c2019-11-25 17:12:55 -0600285 dev_info(dev,
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500286 "Reset failed, attempting to free and reallocate buffer\n");
287 free_long_term_buff(adapter, ltb);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600288 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500289 return alloc_long_term_buff(adapter, ltb, ltb->size);
290 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600291 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500292 return 0;
293}
294
Thomas Falconf185a492017-05-26 10:30:48 -0400295static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
296{
297 int i;
298
Thomas Falcon507ebe62020-08-21 13:39:01 -0500299 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400300 adapter->rx_pool[i].active = 0;
301}
302
Thomas Falcon032c5e82015-12-21 11:26:06 -0600303static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
304 struct ibmvnic_rx_pool *pool)
305{
306 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500307 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600308 struct device *dev = &adapter->vdev->dev;
309 int buffers_added = 0;
310 unsigned long lpar_rc;
311 union sub_crq sub_crq;
312 struct sk_buff *skb;
313 unsigned int offset;
314 dma_addr_t dma_addr;
315 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600316 int shift = 0;
317 int index;
318 int i;
319
Thomas Falconf185a492017-05-26 10:30:48 -0400320 if (!pool->active)
321 return;
322
Thomas Falcon032c5e82015-12-21 11:26:06 -0600323 for (i = 0; i < count; ++i) {
324 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
325 if (!skb) {
326 dev_err(dev, "Couldn't replenish rx buff\n");
327 adapter->replenish_no_mem++;
328 break;
329 }
330
331 index = pool->free_map[pool->next_free];
332
333 if (pool->rx_buff[index].skb)
334 dev_err(dev, "Inconsistent free_map!\n");
335
336 /* Copy the skb to the long term mapped DMA buffer */
337 offset = index * pool->buff_size;
338 dst = pool->long_term_buff.buff + offset;
339 memset(dst, 0, pool->buff_size);
340 dma_addr = pool->long_term_buff.addr + offset;
341 pool->rx_buff[index].data = dst;
342
343 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
344 pool->rx_buff[index].dma = dma_addr;
345 pool->rx_buff[index].skb = skb;
346 pool->rx_buff[index].pool_index = pool->index;
347 pool->rx_buff[index].size = pool->buff_size;
348
349 memset(&sub_crq, 0, sizeof(sub_crq));
350 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351 sub_crq.rx_add.correlator =
352 cpu_to_be64((u64)&pool->rx_buff[index]);
353 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
355
356 /* The length field of the sCRQ is defined to be 24 bits so the
357 * buffer size needs to be left shifted by a byte before it is
358 * converted to big endian to prevent the last byte from being
359 * truncated.
360 */
361#ifdef __LITTLE_ENDIAN__
362 shift = 8;
363#endif
364 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
365
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500366 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600367 if (lpar_rc != H_SUCCESS)
368 goto failure;
369
370 buffers_added++;
371 adapter->replenish_add_buff_success++;
372 pool->next_free = (pool->next_free + 1) % pool->size;
373 }
374 atomic_add(buffers_added, &pool->available);
375 return;
376
377failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500378 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -0600380 pool->free_map[pool->next_free] = index;
381 pool->rx_buff[index].skb = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600382
383 dev_kfree_skb_any(skb);
384 adapter->replenish_add_buff_failure++;
385 atomic_add(buffers_added, &pool->available);
Thomas Falconf185a492017-05-26 10:30:48 -0400386
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500387 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400388 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500389 * queue is closed or pending failover.
390 * Firmware guarantees that a signal will be sent to the
391 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400392 */
393 deactivate_rx_pools(adapter);
394 netif_carrier_off(adapter->netdev);
395 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600396}
397
398static void replenish_pools(struct ibmvnic_adapter *adapter)
399{
400 int i;
401
Thomas Falcon032c5e82015-12-21 11:26:06 -0600402 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500403 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600404 if (adapter->rx_pool[i].active)
405 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
406 }
407}
408
John Allen3d52b592017-08-02 16:44:14 -0500409static void release_stats_buffers(struct ibmvnic_adapter *adapter)
410{
411 kfree(adapter->tx_stats_buffers);
412 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600413 adapter->tx_stats_buffers = NULL;
414 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500415}
416
417static int init_stats_buffers(struct ibmvnic_adapter *adapter)
418{
419 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600420 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500421 sizeof(struct ibmvnic_tx_queue_stats),
422 GFP_KERNEL);
423 if (!adapter->tx_stats_buffers)
424 return -ENOMEM;
425
426 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600427 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500428 sizeof(struct ibmvnic_rx_queue_stats),
429 GFP_KERNEL);
430 if (!adapter->rx_stats_buffers)
431 return -ENOMEM;
432
433 return 0;
434}
435
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400436static void release_stats_token(struct ibmvnic_adapter *adapter)
437{
438 struct device *dev = &adapter->vdev->dev;
439
440 if (!adapter->stats_token)
441 return;
442
443 dma_unmap_single(dev, adapter->stats_token,
444 sizeof(struct ibmvnic_statistics),
445 DMA_FROM_DEVICE);
446 adapter->stats_token = 0;
447}
448
449static int init_stats_token(struct ibmvnic_adapter *adapter)
450{
451 struct device *dev = &adapter->vdev->dev;
452 dma_addr_t stok;
453
454 stok = dma_map_single(dev, &adapter->stats,
455 sizeof(struct ibmvnic_statistics),
456 DMA_FROM_DEVICE);
457 if (dma_mapping_error(dev, stok)) {
458 dev_err(dev, "Couldn't map stats buffer\n");
459 return -1;
460 }
461
462 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500463 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400464 return 0;
465}
466
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400467static int reset_rx_pools(struct ibmvnic_adapter *adapter)
468{
469 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500470 u64 buff_size;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400471 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500472 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600473
Mingming Cao9f134572020-08-25 13:26:41 -0400474 if (!adapter->rx_pool)
475 return -1;
476
Thomas Falcon507ebe62020-08-21 13:39:01 -0500477 buff_size = adapter->cur_rx_buf_sz;
478 rx_scrqs = adapter->num_active_rx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400479 for (i = 0; i < rx_scrqs; i++) {
480 rx_pool = &adapter->rx_pool[i];
481
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500482 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
483
Thomas Falcon507ebe62020-08-21 13:39:01 -0500484 if (rx_pool->buff_size != buff_size) {
John Allen896d8692018-01-18 16:26:31 -0600485 free_long_term_buff(adapter, &rx_pool->long_term_buff);
Thomas Falcon507ebe62020-08-21 13:39:01 -0500486 rx_pool->buff_size = buff_size;
Thomas Falcon7c940b12019-06-07 16:03:55 -0500487 rc = alloc_long_term_buff(adapter,
488 &rx_pool->long_term_buff,
489 rx_pool->size *
490 rx_pool->buff_size);
John Allen896d8692018-01-18 16:26:31 -0600491 } else {
492 rc = reset_long_term_buff(adapter,
493 &rx_pool->long_term_buff);
494 }
495
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500496 if (rc)
497 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400498
499 for (j = 0; j < rx_pool->size; j++)
500 rx_pool->free_map[j] = j;
501
502 memset(rx_pool->rx_buff, 0,
503 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
504
505 atomic_set(&rx_pool->available, 0);
506 rx_pool->next_alloc = 0;
507 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500508 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400509 }
510
511 return 0;
512}
513
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400514static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600515{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400516 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400517 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600518
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400519 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600520 return;
521
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600522 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400523 rx_pool = &adapter->rx_pool[i];
524
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500525 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
526
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400527 kfree(rx_pool->free_map);
528 free_long_term_buff(adapter, &rx_pool->long_term_buff);
529
530 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400531 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400532
533 for (j = 0; j < rx_pool->size; j++) {
534 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600535 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
536 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400537 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600538 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400539
540 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600541 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400542
543 kfree(adapter->rx_pool);
544 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600545 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400546}
547
548static int init_rx_pools(struct net_device *netdev)
549{
550 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
551 struct device *dev = &adapter->vdev->dev;
552 struct ibmvnic_rx_pool *rx_pool;
553 int rxadd_subcrqs;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500554 u64 buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400555 int i, j;
556
Thomas Falcon507ebe62020-08-21 13:39:01 -0500557 rxadd_subcrqs = adapter->num_active_rx_scrqs;
558 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400559
560 adapter->rx_pool = kcalloc(rxadd_subcrqs,
561 sizeof(struct ibmvnic_rx_pool),
562 GFP_KERNEL);
563 if (!adapter->rx_pool) {
564 dev_err(dev, "Failed to allocate rx pools\n");
565 return -1;
566 }
567
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600568 adapter->num_active_rx_pools = rxadd_subcrqs;
569
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400570 for (i = 0; i < rxadd_subcrqs; i++) {
571 rx_pool = &adapter->rx_pool[i];
572
573 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500574 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400575 i, adapter->req_rx_add_entries_per_subcrq,
Thomas Falcon507ebe62020-08-21 13:39:01 -0500576 buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400577
578 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
579 rx_pool->index = i;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500580 rx_pool->buff_size = buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400581 rx_pool->active = 1;
582
583 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
584 GFP_KERNEL);
585 if (!rx_pool->free_map) {
586 release_rx_pools(adapter);
587 return -1;
588 }
589
590 rx_pool->rx_buff = kcalloc(rx_pool->size,
591 sizeof(struct ibmvnic_rx_buff),
592 GFP_KERNEL);
593 if (!rx_pool->rx_buff) {
594 dev_err(dev, "Couldn't alloc rx buffers\n");
595 release_rx_pools(adapter);
596 return -1;
597 }
598
599 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
600 rx_pool->size * rx_pool->buff_size)) {
601 release_rx_pools(adapter);
602 return -1;
603 }
604
605 for (j = 0; j < rx_pool->size; ++j)
606 rx_pool->free_map[j] = j;
607
608 atomic_set(&rx_pool->available, 0);
609 rx_pool->next_alloc = 0;
610 rx_pool->next_free = 0;
611 }
612
613 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600614}
615
Thomas Falcone26dc252018-03-16 20:00:25 -0500616static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
617 struct ibmvnic_tx_pool *tx_pool)
618{
619 int rc, i;
620
621 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
622 if (rc)
623 return rc;
624
625 memset(tx_pool->tx_buff, 0,
626 tx_pool->num_buffers *
627 sizeof(struct ibmvnic_tx_buff));
628
629 for (i = 0; i < tx_pool->num_buffers; i++)
630 tx_pool->free_map[i] = i;
631
632 tx_pool->consumer_index = 0;
633 tx_pool->producer_index = 0;
634
635 return 0;
636}
637
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400638static int reset_tx_pools(struct ibmvnic_adapter *adapter)
639{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400640 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500641 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400642
Mingming Cao9f134572020-08-25 13:26:41 -0400643 if (!adapter->tx_pool)
644 return -1;
645
Thomas Falcon507ebe62020-08-21 13:39:01 -0500646 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400647 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500648 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500649 if (rc)
650 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500651 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500652 if (rc)
653 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400654 }
655
656 return 0;
657}
658
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200659static void release_vpd_data(struct ibmvnic_adapter *adapter)
660{
661 if (!adapter->vpd)
662 return;
663
664 kfree(adapter->vpd->buff);
665 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600666
667 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200668}
669
Thomas Falconfb794212018-03-16 20:00:26 -0500670static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
671 struct ibmvnic_tx_pool *tx_pool)
672{
673 kfree(tx_pool->tx_buff);
674 kfree(tx_pool->free_map);
675 free_long_term_buff(adapter, &tx_pool->long_term_buff);
676}
677
Nathan Fontenotc657e322017-03-30 02:49:06 -0400678static void release_tx_pools(struct ibmvnic_adapter *adapter)
679{
John Allen896d8692018-01-18 16:26:31 -0600680 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400681
682 if (!adapter->tx_pool)
683 return;
684
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600685 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500686 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
687 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400688 }
689
690 kfree(adapter->tx_pool);
691 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500692 kfree(adapter->tso_pool);
693 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600694 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400695}
696
Thomas Falcon32053062018-03-16 20:00:27 -0500697static int init_one_tx_pool(struct net_device *netdev,
698 struct ibmvnic_tx_pool *tx_pool,
699 int num_entries, int buf_size)
700{
701 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
702 int i;
703
704 tx_pool->tx_buff = kcalloc(num_entries,
705 sizeof(struct ibmvnic_tx_buff),
706 GFP_KERNEL);
707 if (!tx_pool->tx_buff)
708 return -1;
709
710 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
711 num_entries * buf_size))
712 return -1;
713
714 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
715 if (!tx_pool->free_map)
716 return -1;
717
718 for (i = 0; i < num_entries; i++)
719 tx_pool->free_map[i] = i;
720
721 tx_pool->consumer_index = 0;
722 tx_pool->producer_index = 0;
723 tx_pool->num_buffers = num_entries;
724 tx_pool->buf_size = buf_size;
725
726 return 0;
727}
728
Nathan Fontenotc657e322017-03-30 02:49:06 -0400729static int init_tx_pools(struct net_device *netdev)
730{
731 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400732 int tx_subcrqs;
Thomas Falcon32053062018-03-16 20:00:27 -0500733 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400734
Thomas Falcon507ebe62020-08-21 13:39:01 -0500735 tx_subcrqs = adapter->num_active_tx_scrqs;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400736 adapter->tx_pool = kcalloc(tx_subcrqs,
737 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
738 if (!adapter->tx_pool)
739 return -1;
740
Thomas Falcon32053062018-03-16 20:00:27 -0500741 adapter->tso_pool = kcalloc(tx_subcrqs,
742 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
743 if (!adapter->tso_pool)
744 return -1;
745
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600746 adapter->num_active_tx_pools = tx_subcrqs;
747
Nathan Fontenotc657e322017-03-30 02:49:06 -0400748 for (i = 0; i < tx_subcrqs; i++) {
Thomas Falcon32053062018-03-16 20:00:27 -0500749 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
750 adapter->req_tx_entries_per_subcrq,
751 adapter->req_mtu + VLAN_HLEN);
752 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400753 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500754 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400755 }
756
Thomas Falcon7c940b12019-06-07 16:03:55 -0500757 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
758 IBMVNIC_TSO_BUFS,
759 IBMVNIC_TSO_BUF_SZ);
Thomas Falcon32053062018-03-16 20:00:27 -0500760 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400761 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500762 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400763 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400764 }
765
766 return 0;
767}
768
John Allend944c3d62017-05-26 10:30:13 -0400769static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
770{
771 int i;
772
773 if (adapter->napi_enabled)
774 return;
775
776 for (i = 0; i < adapter->req_rx_queues; i++)
777 napi_enable(&adapter->napi[i]);
778
779 adapter->napi_enabled = true;
780}
781
782static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
783{
784 int i;
785
786 if (!adapter->napi_enabled)
787 return;
788
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500789 for (i = 0; i < adapter->req_rx_queues; i++) {
790 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400791 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500792 }
John Allend944c3d62017-05-26 10:30:13 -0400793
794 adapter->napi_enabled = false;
795}
796
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600797static int init_napi(struct ibmvnic_adapter *adapter)
798{
799 int i;
800
801 adapter->napi = kcalloc(adapter->req_rx_queues,
802 sizeof(struct napi_struct), GFP_KERNEL);
803 if (!adapter->napi)
804 return -ENOMEM;
805
806 for (i = 0; i < adapter->req_rx_queues; i++) {
807 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
808 netif_napi_add(adapter->netdev, &adapter->napi[i],
809 ibmvnic_poll, NAPI_POLL_WEIGHT);
810 }
811
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600812 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600813 return 0;
814}
815
816static void release_napi(struct ibmvnic_adapter *adapter)
817{
818 int i;
819
820 if (!adapter->napi)
821 return;
822
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600823 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800824 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
825 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600826 }
827
828 kfree(adapter->napi);
829 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600830 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500831 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600832}
833
John Allena57a5d22017-03-17 17:13:41 -0500834static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600835{
836 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500837 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500838 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -0500839 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -0500840 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500841 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600842
John Allenbd0b6722017-03-17 17:13:40 -0500843 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500844 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -0500845 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500846 netdev_warn(netdev, "Login attempts exceeded\n");
847 return -1;
848 }
849
850 adapter->init_done_rc = 0;
851 reinit_completion(&adapter->init_done);
852 rc = send_login(adapter);
853 if (rc) {
854 netdev_warn(netdev, "Unable to login\n");
855 return rc;
856 }
857
858 if (!wait_for_completion_timeout(&adapter->init_done,
859 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -0500860 netdev_warn(netdev, "Login timed out, retrying...\n");
861 retry = true;
862 adapter->init_done_rc = 0;
863 retry_count++;
864 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500865 }
866
Thomas Falcondff515a32020-06-15 10:29:23 -0500867 if (adapter->init_done_rc == ABORTED) {
868 netdev_warn(netdev, "Login aborted, retrying...\n");
869 retry = true;
870 adapter->init_done_rc = 0;
871 retry_count++;
872 /* FW or device may be busy, so
873 * wait a bit before retrying login
874 */
875 msleep(500);
876 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500877 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600878 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500879
Thomas Falconeb110412018-05-24 14:37:53 -0500880 retry = true;
881 netdev_dbg(netdev,
882 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500883 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -0500884 reinit_completion(&adapter->init_done);
Lijun Pan491099a2020-09-27 20:13:26 -0500885 send_query_cap(adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500886 if (!wait_for_completion_timeout(&adapter->init_done,
887 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500888 netdev_warn(netdev,
889 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -0500890 return -1;
891 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500892
Thomas Falcon4d96f122017-08-01 15:04:36 -0500893 rc = init_sub_crqs(adapter);
894 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500895 netdev_warn(netdev,
896 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500897 return -1;
898 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500899
Thomas Falcon4d96f122017-08-01 15:04:36 -0500900 rc = init_sub_crq_irqs(adapter);
901 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500902 netdev_warn(netdev,
903 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500904 return -1;
905 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500906 } else if (adapter->init_done_rc) {
907 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -0500908 return -1;
909 }
Thomas Falconeb110412018-05-24 14:37:53 -0500910 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -0500911
Thomas Falcon62740e92019-05-09 23:13:43 -0500912 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -0600913
John Allena57a5d22017-03-17 17:13:41 -0500914 return 0;
915}
916
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600917static void release_login_buffer(struct ibmvnic_adapter *adapter)
918{
919 kfree(adapter->login_buf);
920 adapter->login_buf = NULL;
921}
922
923static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
924{
925 kfree(adapter->login_rsp_buf);
926 adapter->login_rsp_buf = NULL;
927}
928
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400929static void release_resources(struct ibmvnic_adapter *adapter)
930{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200931 release_vpd_data(adapter);
932
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400933 release_tx_pools(adapter);
934 release_rx_pools(adapter);
935
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600936 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600937 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400938}
939
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400940static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
941{
942 struct net_device *netdev = adapter->netdev;
943 unsigned long timeout = msecs_to_jiffies(30000);
944 union ibmvnic_crq crq;
945 bool resend;
946 int rc;
947
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500948 netdev_dbg(netdev, "setting link state %d\n", link_state);
949
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400950 memset(&crq, 0, sizeof(crq));
951 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
952 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
953 crq.logical_link_state.link_state = link_state;
954
955 do {
956 resend = false;
957
958 reinit_completion(&adapter->init_done);
959 rc = ibmvnic_send_crq(adapter, &crq);
960 if (rc) {
961 netdev_err(netdev, "Failed to set link state\n");
962 return rc;
963 }
964
965 if (!wait_for_completion_timeout(&adapter->init_done,
966 timeout)) {
967 netdev_err(netdev, "timeout setting link state\n");
968 return -1;
969 }
970
Lijun Pan4c5f6af2020-08-19 17:52:23 -0500971 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400972 /* Partuial success, delay and re-send */
973 mdelay(1000);
974 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -0500975 } else if (adapter->init_done_rc) {
976 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
977 adapter->init_done_rc);
978 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400979 }
980 } while (resend);
981
982 return 0;
983}
984
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400985static int set_real_num_queues(struct net_device *netdev)
986{
987 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
988 int rc;
989
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500990 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
991 adapter->req_tx_queues, adapter->req_rx_queues);
992
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400993 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
994 if (rc) {
995 netdev_err(netdev, "failed to set the number of tx queues\n");
996 return rc;
997 }
998
999 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1000 if (rc)
1001 netdev_err(netdev, "failed to set the number of rx queues\n");
1002
1003 return rc;
1004}
1005
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001006static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1007{
1008 struct device *dev = &adapter->vdev->dev;
1009 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001010 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001011 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001012
1013 if (adapter->vpd->buff)
1014 len = adapter->vpd->len;
1015
Thomas Falconff25dcb2019-11-25 17:12:56 -06001016 mutex_lock(&adapter->fw_lock);
1017 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001018 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001019
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001020 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1021 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001022 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001023 if (rc) {
1024 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001025 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001026 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001027
1028 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1029 if (rc) {
1030 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001031 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001032 return rc;
1033 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001034 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001035
1036 if (!adapter->vpd->len)
1037 return -ENODATA;
1038
1039 if (!adapter->vpd->buff)
1040 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1041 else if (adapter->vpd->len != len)
1042 adapter->vpd->buff =
1043 krealloc(adapter->vpd->buff,
1044 adapter->vpd->len, GFP_KERNEL);
1045
1046 if (!adapter->vpd->buff) {
1047 dev_err(dev, "Could allocate VPD buffer\n");
1048 return -ENOMEM;
1049 }
1050
1051 adapter->vpd->dma_addr =
1052 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1053 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001054 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001055 dev_err(dev, "Could not map VPD buffer\n");
1056 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001057 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001058 return -ENOMEM;
1059 }
1060
Thomas Falconff25dcb2019-11-25 17:12:56 -06001061 mutex_lock(&adapter->fw_lock);
1062 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001063 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001064
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001065 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1066 crq.get_vpd.cmd = GET_VPD;
1067 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1068 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001069 rc = ibmvnic_send_crq(adapter, &crq);
1070 if (rc) {
1071 kfree(adapter->vpd->buff);
1072 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001073 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001074 return rc;
1075 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001076
1077 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1078 if (rc) {
1079 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1080 kfree(adapter->vpd->buff);
1081 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001082 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001083 return rc;
1084 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001085
Thomas Falconff25dcb2019-11-25 17:12:56 -06001086 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001087 return 0;
1088}
1089
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001090static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001091{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001092 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001093 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001094
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001095 rc = set_real_num_queues(netdev);
1096 if (rc)
1097 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001098
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001099 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1100 if (!adapter->vpd)
1101 return -ENOMEM;
1102
John Allen69d08dc2018-01-18 16:27:58 -06001103 /* Vital Product Data (VPD) */
1104 rc = ibmvnic_get_vpd(adapter);
1105 if (rc) {
1106 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1107 return rc;
1108 }
1109
Thomas Falcon032c5e82015-12-21 11:26:06 -06001110 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001111
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001112 rc = init_napi(adapter);
1113 if (rc)
1114 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001115
Lijun Pan69980d02020-09-27 20:13:28 -05001116 send_query_map(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001117
1118 rc = init_rx_pools(netdev);
1119 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001120 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001121
Nathan Fontenotc657e322017-03-30 02:49:06 -04001122 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001123 return rc;
1124}
1125
Nathan Fontenoted651a12017-05-03 14:04:38 -04001126static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001127{
1128 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001129 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001130 int i, rc;
1131
Nathan Fontenot90c80142017-05-03 14:04:32 -04001132 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001133 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001134 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001135
Thomas Falcon032c5e82015-12-21 11:26:06 -06001136 /* We're ready to receive frames, enable the sub-crq interrupts and
1137 * set the logical link state to up
1138 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001139 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001140 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001141 if (prev_state == VNIC_CLOSED)
1142 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001143 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001144 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001145
Nathan Fontenoted651a12017-05-03 14:04:38 -04001146 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001147 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001148 if (prev_state == VNIC_CLOSED)
1149 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001150 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001151 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001152
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001153 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001154 if (rc) {
1155 for (i = 0; i < adapter->req_rx_queues; i++)
1156 napi_disable(&adapter->napi[i]);
1157 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001158 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001159 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001160
Nathan Fontenoted651a12017-05-03 14:04:38 -04001161 netif_tx_start_all_queues(netdev);
1162
1163 if (prev_state == VNIC_CLOSED) {
1164 for (i = 0; i < adapter->req_rx_queues; i++)
1165 napi_schedule(&adapter->napi[i]);
1166 }
1167
1168 adapter->state = VNIC_OPEN;
1169 return rc;
1170}
1171
1172static int ibmvnic_open(struct net_device *netdev)
1173{
1174 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001175 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001176
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001177 /* If device failover is pending, just set device state and return.
1178 * Device operation will be handled by reset routine.
1179 */
1180 if (adapter->failover_pending) {
1181 adapter->state = VNIC_OPEN;
1182 return 0;
1183 }
1184
Nathan Fontenoted651a12017-05-03 14:04:38 -04001185 if (adapter->state != VNIC_CLOSED) {
1186 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001187 if (rc)
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001188 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001189
1190 rc = init_resources(adapter);
1191 if (rc) {
1192 netdev_err(netdev, "failed to initialize resources\n");
1193 release_resources(adapter);
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001194 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001195 }
1196 }
1197
1198 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001199
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001200out:
1201 /*
1202 * If open fails due to a pending failover, set device state and
1203 * return. Device operation will be handled by reset routine.
1204 */
1205 if (rc && adapter->failover_pending) {
1206 adapter->state = VNIC_OPEN;
1207 rc = 0;
1208 }
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001209 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001210}
1211
Thomas Falcond0869c02018-02-13 18:23:43 -06001212static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1213{
1214 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001215 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001216 u64 rx_entries;
1217 int rx_scrqs;
1218 int i, j;
1219
1220 if (!adapter->rx_pool)
1221 return;
1222
Thomas Falcon660e3092018-04-20 14:25:32 -05001223 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001224 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1225
1226 /* Free any remaining skbs in the rx buffer pools */
1227 for (i = 0; i < rx_scrqs; i++) {
1228 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001229 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001230 continue;
1231
1232 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1233 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001234 rx_buff = &rx_pool->rx_buff[j];
1235 if (rx_buff && rx_buff->skb) {
1236 dev_kfree_skb_any(rx_buff->skb);
1237 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001238 }
1239 }
1240 }
1241}
1242
Thomas Falcone9e1e972018-03-16 20:00:30 -05001243static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1244 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001245{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001246 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001247 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001248 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001249
Dan Carpenter050e85c2018-03-23 14:36:15 +03001250 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001251 return;
1252
1253 tx_entries = tx_pool->num_buffers;
1254
1255 for (i = 0; i < tx_entries; i++) {
1256 tx_buff = &tx_pool->tx_buff[i];
1257 if (tx_buff && tx_buff->skb) {
1258 dev_kfree_skb_any(tx_buff->skb);
1259 tx_buff->skb = NULL;
1260 }
1261 }
1262}
1263
1264static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1265{
1266 int tx_scrqs;
1267 int i;
1268
1269 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001270 return;
1271
Thomas Falcon660e3092018-04-20 14:25:32 -05001272 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001273
1274 /* Free any remaining skbs in the tx buffer pools */
1275 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001276 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001277 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1278 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001279 }
1280}
1281
John Allen6095e592018-03-30 13:44:21 -05001282static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001283{
John Allen6095e592018-03-30 13:44:21 -05001284 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001285 int i;
1286
Nathan Fontenot46293b92017-05-03 14:05:02 -04001287 if (adapter->tx_scrq) {
1288 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001289 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001290 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001291 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001292 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001293 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001294 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001295 }
1296
Nathan Fontenot46293b92017-05-03 14:05:02 -04001297 if (adapter->rx_scrq) {
1298 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001299 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001300 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001301 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001302 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001303 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001304 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001305 }
1306 }
John Allen6095e592018-03-30 13:44:21 -05001307}
1308
1309static void ibmvnic_cleanup(struct net_device *netdev)
1310{
1311 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1312
1313 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001314 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001315 netif_tx_disable(netdev);
1316 else
1317 netif_tx_stop_all_queues(netdev);
1318
1319 ibmvnic_napi_disable(adapter);
1320 ibmvnic_disable_irqs(adapter);
1321
Thomas Falcond0869c02018-02-13 18:23:43 -06001322 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001323 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001324}
1325
1326static int __ibmvnic_close(struct net_device *netdev)
1327{
1328 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1329 int rc = 0;
1330
1331 adapter->state = VNIC_CLOSING;
1332 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1333 if (rc)
1334 return rc;
Nathan Fontenot90c80142017-05-03 14:04:32 -04001335 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001336 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001337}
1338
Nathan Fontenoted651a12017-05-03 14:04:38 -04001339static int ibmvnic_close(struct net_device *netdev)
1340{
1341 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1342 int rc;
1343
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001344 /* If device failover is pending, just set device state and return.
1345 * Device operation will be handled by reset routine.
1346 */
1347 if (adapter->failover_pending) {
1348 adapter->state = VNIC_CLOSED;
1349 return 0;
1350 }
1351
Nathan Fontenoted651a12017-05-03 14:04:38 -04001352 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001353 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001354
1355 return rc;
1356}
1357
Thomas Falconad7775d2016-04-01 17:20:34 -05001358/**
1359 * build_hdr_data - creates L2/L3/L4 header data buffer
1360 * @hdr_field - bitfield determining needed headers
1361 * @skb - socket buffer
1362 * @hdr_len - array of header lengths
1363 * @tot_len - total length of data
1364 *
1365 * Reads hdr_field to determine which headers are needed by firmware.
1366 * Builds a buffer containing these headers. Saves individual header
1367 * lengths and total buffer length to be used to build descriptors.
1368 */
1369static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1370 int *hdr_len, u8 *hdr_data)
1371{
1372 int len = 0;
1373 u8 *hdr;
1374
Thomas Falconda75e3b2018-03-12 11:51:02 -05001375 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1376 hdr_len[0] = sizeof(struct vlan_ethhdr);
1377 else
1378 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001379
1380 if (skb->protocol == htons(ETH_P_IP)) {
1381 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1382 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1383 hdr_len[2] = tcp_hdrlen(skb);
1384 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1385 hdr_len[2] = sizeof(struct udphdr);
1386 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1387 hdr_len[1] = sizeof(struct ipv6hdr);
1388 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1389 hdr_len[2] = tcp_hdrlen(skb);
1390 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1391 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001392 } else if (skb->protocol == htons(ETH_P_ARP)) {
1393 hdr_len[1] = arp_hdr_len(skb->dev);
1394 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001395 }
1396
1397 memset(hdr_data, 0, 120);
1398 if ((hdr_field >> 6) & 1) {
1399 hdr = skb_mac_header(skb);
1400 memcpy(hdr_data, hdr, hdr_len[0]);
1401 len += hdr_len[0];
1402 }
1403
1404 if ((hdr_field >> 5) & 1) {
1405 hdr = skb_network_header(skb);
1406 memcpy(hdr_data + len, hdr, hdr_len[1]);
1407 len += hdr_len[1];
1408 }
1409
1410 if ((hdr_field >> 4) & 1) {
1411 hdr = skb_transport_header(skb);
1412 memcpy(hdr_data + len, hdr, hdr_len[2]);
1413 len += hdr_len[2];
1414 }
1415 return len;
1416}
1417
1418/**
1419 * create_hdr_descs - create header and header extension descriptors
1420 * @hdr_field - bitfield determining needed headers
1421 * @data - buffer containing header data
1422 * @len - length of data buffer
1423 * @hdr_len - array of individual header lengths
1424 * @scrq_arr - descriptor array
1425 *
1426 * Creates header and, if needed, header extension descriptors and
1427 * places them in a descriptor array, scrq_arr
1428 */
1429
Thomas Falcon2de09682017-10-16 10:02:11 -05001430static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1431 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001432{
1433 union sub_crq hdr_desc;
1434 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001435 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001436 u8 *data, *cur;
1437 int tmp;
1438
1439 while (tmp_len > 0) {
1440 cur = hdr_data + len - tmp_len;
1441
1442 memset(&hdr_desc, 0, sizeof(hdr_desc));
1443 if (cur != hdr_data) {
1444 data = hdr_desc.hdr_ext.data;
1445 tmp = tmp_len > 29 ? 29 : tmp_len;
1446 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1447 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1448 hdr_desc.hdr_ext.len = tmp;
1449 } else {
1450 data = hdr_desc.hdr.data;
1451 tmp = tmp_len > 24 ? 24 : tmp_len;
1452 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1453 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1454 hdr_desc.hdr.len = tmp;
1455 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1456 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1457 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1458 hdr_desc.hdr.flag = hdr_field << 1;
1459 }
1460 memcpy(data, cur, tmp);
1461 tmp_len -= tmp;
1462 *scrq_arr = hdr_desc;
1463 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001464 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001465 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001466
1467 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001468}
1469
1470/**
1471 * build_hdr_descs_arr - build a header descriptor array
1472 * @skb - socket buffer
1473 * @num_entries - number of descriptors to be sent
1474 * @subcrq - first TX descriptor
1475 * @hdr_field - bit field determining which headers will be sent
1476 *
1477 * This function will build a TX descriptor array with applicable
1478 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1479 */
1480
1481static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1482 int *num_entries, u8 hdr_field)
1483{
1484 int hdr_len[3] = {0, 0, 0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001485 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001486 u8 *hdr_data = txbuff->hdr_data;
1487
1488 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1489 txbuff->hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001490 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconad7775d2016-04-01 17:20:34 -05001491 txbuff->indir_arr + 1);
1492}
1493
Thomas Falcon1f247a62018-03-12 11:51:04 -05001494static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1495 struct net_device *netdev)
1496{
1497 /* For some backing devices, mishandling of small packets
1498 * can result in a loss of connection or TX stall. Device
1499 * architects recommend that no packet should be smaller
1500 * than the minimum MTU value provided to the driver, so
1501 * pad any packets to that length
1502 */
1503 if (skb->len < netdev->min_mtu)
1504 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001505
1506 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001507}
1508
YueHaibing94b2bb22018-09-18 14:35:47 +08001509static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001510{
1511 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1512 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001513 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001514 struct device *dev = &adapter->vdev->dev;
1515 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001516 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001517 struct ibmvnic_tx_pool *tx_pool;
1518 unsigned int tx_send_failed = 0;
1519 unsigned int tx_map_failed = 0;
1520 unsigned int tx_dropped = 0;
1521 unsigned int tx_packets = 0;
1522 unsigned int tx_bytes = 0;
1523 dma_addr_t data_dma_addr;
1524 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001525 unsigned long lpar_rc;
1526 union sub_crq tx_crq;
1527 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001528 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001529 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001530 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001531 u8 proto = 0;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001532 u64 handle;
YueHaibing94b2bb22018-09-18 14:35:47 +08001533 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001534
Juliet Kim7ed5b312019-09-20 16:11:23 -04001535 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001536 if (!netif_subqueue_stopped(netdev, skb))
1537 netif_stop_subqueue(netdev, queue_num);
1538 dev_kfree_skb_any(skb);
1539
Thomas Falcon032c5e82015-12-21 11:26:06 -06001540 tx_send_failed++;
1541 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001542 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001543 goto out;
1544 }
1545
Thomas Falcon7083a452018-03-12 21:05:26 -05001546 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001547 tx_dropped++;
1548 tx_send_failed++;
1549 ret = NETDEV_TX_OK;
1550 goto out;
1551 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001552 if (skb_is_gso(skb))
1553 tx_pool = &adapter->tso_pool[queue_num];
1554 else
1555 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001556
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001557 tx_scrq = adapter->tx_scrq[queue_num];
1558 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001559 handle = tx_scrq->handle;
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001560
Thomas Falcon032c5e82015-12-21 11:26:06 -06001561 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001562
Thomas Falcon86b61a52018-03-16 20:00:29 -05001563 if (index == IBMVNIC_INVALID_MAP) {
1564 dev_kfree_skb_any(skb);
1565 tx_send_failed++;
1566 tx_dropped++;
1567 ret = NETDEV_TX_OK;
1568 goto out;
1569 }
1570
1571 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1572
Thomas Falcon06b3e352018-03-16 20:00:28 -05001573 offset = index * tx_pool->buf_size;
1574 dst = tx_pool->long_term_buff.buff + offset;
1575 memset(dst, 0, tx_pool->buf_size);
1576 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001577
Thomas Falcon15482052017-10-17 12:36:54 -05001578 if (skb_shinfo(skb)->nr_frags) {
1579 int cur, i;
1580
1581 /* Copy the head */
1582 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1583 cur = skb_headlen(skb);
1584
1585 /* Copy the frags */
1586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1587 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1588
1589 memcpy(dst + cur,
1590 page_address(skb_frag_page(frag)) +
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07001591 skb_frag_off(frag), skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001592 cur += skb_frag_size(frag);
1593 }
1594 } else {
1595 skb_copy_from_linear_data(skb, dst, skb->len);
1596 }
1597
Thomas Falcon032c5e82015-12-21 11:26:06 -06001598 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001599 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001600
1601 tx_buff = &tx_pool->tx_buff[index];
1602 tx_buff->skb = skb;
1603 tx_buff->data_dma[0] = data_dma_addr;
1604 tx_buff->data_len[0] = skb->len;
1605 tx_buff->index = index;
1606 tx_buff->pool_index = queue_num;
1607 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001608
1609 memset(&tx_crq, 0, sizeof(tx_crq));
1610 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1611 tx_crq.v1.type = IBMVNIC_TX_DESC;
1612 tx_crq.v1.n_crq_elem = 1;
1613 tx_crq.v1.n_sge = 1;
1614 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001615
Thomas Falconfdb06102017-10-17 12:36:55 -05001616 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001617 tx_crq.v1.correlator =
1618 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001619 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001620 tx_crq.v1.correlator = cpu_to_be32(index);
1621 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001622 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1623 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1624
Michał Mirosławe84b4792018-11-07 17:50:52 +01001625 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001626 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1627 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1628 }
1629
1630 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001631 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1632 proto = ip_hdr(skb)->protocol;
1633 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1634 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1635 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001636 }
1637
Thomas Falcona0dca102018-01-18 19:29:48 -06001638 if (proto == IPPROTO_TCP)
1639 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1640 else if (proto == IPPROTO_UDP)
1641 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1642
Thomas Falconad7775d2016-04-01 17:20:34 -05001643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001644 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001645 hdrs += 2;
1646 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001647 if (skb_is_gso(skb)) {
1648 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1649 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1650 hdrs += 2;
1651 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001652 /* determine if l2/3/4 headers are sent to firmware */
John Allen2fa56a42018-02-09 13:19:46 -06001653 if ((*hdrs >> 7) & 1) {
Thomas Falconad7775d2016-04-01 17:20:34 -05001654 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1655 tx_crq.v1.n_crq_elem = num_entries;
Thomas Falconecba6162018-02-26 18:10:55 -06001656 tx_buff->num_entries = num_entries;
Thomas Falconad7775d2016-04-01 17:20:34 -05001657 tx_buff->indir_arr[0] = tx_crq;
1658 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1659 sizeof(tx_buff->indir_arr),
1660 DMA_TO_DEVICE);
1661 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001662 dev_kfree_skb_any(skb);
1663 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001664 if (!firmware_has_feature(FW_FEATURE_CMO))
1665 dev_err(dev, "tx: unable to map descriptor array\n");
1666 tx_map_failed++;
1667 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001668 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001669 goto tx_err_out;
Thomas Falconad7775d2016-04-01 17:20:34 -05001670 }
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001671 lpar_rc = send_subcrq_indirect(adapter, handle,
Thomas Falconad7775d2016-04-01 17:20:34 -05001672 (u64)tx_buff->indir_dma,
1673 (u64)num_entries);
Thomas Falcon80f0fe02019-08-14 14:57:05 -05001674 dma_unmap_single(dev, tx_buff->indir_dma,
1675 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
Thomas Falconad7775d2016-04-01 17:20:34 -05001676 } else {
Thomas Falconecba6162018-02-26 18:10:55 -06001677 tx_buff->num_entries = num_entries;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001678 lpar_rc = send_subcrq(adapter, handle,
John Allen498cd8e2016-04-06 11:49:55 -05001679 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001680 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001681 if (lpar_rc != H_SUCCESS) {
Thomas Falcon2d14d372018-07-13 12:03:32 -05001682 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1683 dev_err_ratelimited(dev, "tx: send failed\n");
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001684 dev_kfree_skb_any(skb);
1685 tx_buff->skb = NULL;
1686
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001687 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1688 /* Disable TX and report carrier off if queue is closed
1689 * or pending failover.
Thomas Falconb8c80b82017-05-26 10:30:42 -04001690 * Firmware guarantees that a signal will be sent to the
1691 * driver, triggering a reset or some other action.
1692 */
1693 netif_tx_stop_all_queues(netdev);
1694 netif_carrier_off(netdev);
1695 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001696
Thomas Falcon032c5e82015-12-21 11:26:06 -06001697 tx_send_failed++;
1698 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001699 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001700 goto tx_err_out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001701 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001702
Thomas Falconffc385b2018-02-18 10:08:41 -06001703 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001704 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001705 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001706 netif_stop_subqueue(netdev, queue_num);
1707 }
1708
Thomas Falcon032c5e82015-12-21 11:26:06 -06001709 tx_packets++;
1710 tx_bytes += skb->len;
1711 txq->trans_start = jiffies;
1712 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001713 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001714
Thomas Falcon86b61a52018-03-16 20:00:29 -05001715tx_err_out:
1716 /* roll back consumer index and map array*/
1717 if (tx_pool->consumer_index == 0)
1718 tx_pool->consumer_index =
1719 tx_pool->num_buffers - 1;
1720 else
1721 tx_pool->consumer_index--;
1722 tx_pool->free_map[tx_pool->consumer_index] = index;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001723out:
1724 netdev->stats.tx_dropped += tx_dropped;
1725 netdev->stats.tx_bytes += tx_bytes;
1726 netdev->stats.tx_packets += tx_packets;
1727 adapter->tx_send_failed += tx_send_failed;
1728 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001729 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1730 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1731 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001732
1733 return ret;
1734}
1735
1736static void ibmvnic_set_multi(struct net_device *netdev)
1737{
1738 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1739 struct netdev_hw_addr *ha;
1740 union ibmvnic_crq crq;
1741
1742 memset(&crq, 0, sizeof(crq));
1743 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1744 crq.request_capability.cmd = REQUEST_CAPABILITY;
1745
1746 if (netdev->flags & IFF_PROMISC) {
1747 if (!adapter->promisc_supported)
1748 return;
1749 } else {
1750 if (netdev->flags & IFF_ALLMULTI) {
1751 /* Accept all multicast */
1752 memset(&crq, 0, sizeof(crq));
1753 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1754 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1755 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1756 ibmvnic_send_crq(adapter, &crq);
1757 } else if (netdev_mc_empty(netdev)) {
1758 /* Reject all multicast */
1759 memset(&crq, 0, sizeof(crq));
1760 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1761 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1762 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1763 ibmvnic_send_crq(adapter, &crq);
1764 } else {
1765 /* Accept one or more multicast(s) */
1766 netdev_for_each_mc_addr(ha, netdev) {
1767 memset(&crq, 0, sizeof(crq));
1768 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1769 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1770 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1771 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1772 ha->addr);
1773 ibmvnic_send_crq(adapter, &crq);
1774 }
1775 }
1776 }
1777}
1778
Thomas Falcon62740e92019-05-09 23:13:43 -05001779static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001780{
1781 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001782 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001783 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001784
Thomas Falcon62740e92019-05-09 23:13:43 -05001785 if (!is_valid_ether_addr(dev_addr)) {
1786 rc = -EADDRNOTAVAIL;
1787 goto err;
1788 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001789
1790 memset(&crq, 0, sizeof(crq));
1791 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1792 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05001793 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06001794
Thomas Falconff25dcb2019-11-25 17:12:56 -06001795 mutex_lock(&adapter->fw_lock);
1796 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001797 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001798
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001799 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05001800 if (rc) {
1801 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001802 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001803 goto err;
1804 }
1805
Thomas Falcon476d96c2019-11-25 17:12:55 -06001806 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001807 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06001808 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05001809 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001810 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001811 goto err;
1812 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001813 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001814 return 0;
1815err:
1816 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1817 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001818}
1819
John Allenc26eba02017-10-26 16:23:25 -05001820static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1821{
1822 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1823 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001824 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001825
Thomas Falcon62740e92019-05-09 23:13:43 -05001826 rc = 0;
Lijun Pan8fc36722020-10-27 17:04:56 -05001827 if (!is_valid_ether_addr(addr->sa_data))
1828 return -EADDRNOTAVAIL;
1829
1830 if (adapter->state != VNIC_PROBED) {
1831 ether_addr_copy(adapter->mac_addr, addr->sa_data);
Thomas Falcon62740e92019-05-09 23:13:43 -05001832 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
Lijun Pan8fc36722020-10-27 17:04:56 -05001833 }
John Allenc26eba02017-10-26 16:23:25 -05001834
Thomas Falconf8136142018-01-29 13:45:05 -06001835 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001836}
1837
Nathan Fontenoted651a12017-05-03 14:04:38 -04001838/**
Juliet Kimb27507b2019-09-20 16:11:22 -04001839 * do_change_param_reset returns zero if we are able to keep processing reset
1840 * events, or non-zero if we hit a fatal error and must halt.
1841 */
1842static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1843 struct ibmvnic_rwi *rwi,
1844 u32 reset_state)
1845{
1846 struct net_device *netdev = adapter->netdev;
1847 int i, rc;
1848
1849 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1850 rwi->reset_reason);
1851
1852 netif_carrier_off(netdev);
1853 adapter->reset_reason = rwi->reset_reason;
1854
1855 ibmvnic_cleanup(netdev);
1856
1857 if (reset_state == VNIC_OPEN) {
1858 rc = __ibmvnic_close(netdev);
1859 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06001860 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04001861 }
1862
1863 release_resources(adapter);
1864 release_sub_crqs(adapter, 1);
1865 release_crq_queue(adapter);
1866
1867 adapter->state = VNIC_PROBED;
1868
1869 rc = init_crq_queue(adapter);
1870
1871 if (rc) {
1872 netdev_err(adapter->netdev,
1873 "Couldn't initialize crq. rc=%d\n", rc);
1874 return rc;
1875 }
1876
Lijun Pan635e4422020-08-19 17:52:26 -05001877 rc = ibmvnic_reset_init(adapter, true);
Dany Madden0cb4bc62020-11-25 18:04:27 -06001878 if (rc) {
1879 rc = IBMVNIC_INIT_FAILED;
1880 goto out;
1881 }
Juliet Kimb27507b2019-09-20 16:11:22 -04001882
1883 /* If the adapter was in PROBE state prior to the reset,
1884 * exit here.
1885 */
1886 if (reset_state == VNIC_PROBED)
Dany Madden0cb4bc62020-11-25 18:04:27 -06001887 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04001888
1889 rc = ibmvnic_login(netdev);
1890 if (rc) {
Dany Madden0cb4bc62020-11-25 18:04:27 -06001891 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04001892 }
1893
1894 rc = init_resources(adapter);
1895 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06001896 goto out;
Juliet Kimb27507b2019-09-20 16:11:22 -04001897
1898 ibmvnic_disable_irqs(adapter);
1899
1900 adapter->state = VNIC_CLOSED;
1901
1902 if (reset_state == VNIC_CLOSED)
1903 return 0;
1904
1905 rc = __ibmvnic_open(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06001906 if (rc) {
1907 rc = IBMVNIC_OPEN_FAILED;
1908 goto out;
1909 }
Juliet Kimb27507b2019-09-20 16:11:22 -04001910
1911 /* refresh device's multicast list */
1912 ibmvnic_set_multi(netdev);
1913
1914 /* kick napi */
1915 for (i = 0; i < adapter->req_rx_queues; i++)
1916 napi_schedule(&adapter->napi[i]);
1917
Dany Madden0cb4bc62020-11-25 18:04:27 -06001918out:
1919 if (rc)
1920 adapter->state = reset_state;
1921 return rc;
Juliet Kimb27507b2019-09-20 16:11:22 -04001922}
1923
1924/**
Nathan Fontenoted651a12017-05-03 14:04:38 -04001925 * do_reset returns zero if we are able to keep processing reset events, or
1926 * non-zero if we hit a fatal error and must halt.
1927 */
1928static int do_reset(struct ibmvnic_adapter *adapter,
1929 struct ibmvnic_rwi *rwi, u32 reset_state)
1930{
John Allen896d8692018-01-18 16:26:31 -06001931 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001932 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001933 struct net_device *netdev = adapter->netdev;
1934 int i, rc;
1935
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001936 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1937 rwi->reset_reason);
1938
Juliet Kimb27507b2019-09-20 16:11:22 -04001939 rtnl_lock();
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001940 /*
1941 * Now that we have the rtnl lock, clear any pending failover.
1942 * This will ensure ibmvnic_open() has either completed or will
1943 * block until failover is complete.
1944 */
1945 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
1946 adapter->failover_pending = false;
Juliet Kimb27507b2019-09-20 16:11:22 -04001947
Nathan Fontenoted651a12017-05-03 14:04:38 -04001948 netif_carrier_off(netdev);
1949 adapter->reset_reason = rwi->reset_reason;
1950
John Allen896d8692018-01-18 16:26:31 -06001951 old_num_rx_queues = adapter->req_rx_queues;
1952 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001953 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1954 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06001955
Nathan Fontenot30f79622018-04-06 18:37:06 -05001956 ibmvnic_cleanup(netdev);
1957
Thomas Falcon1f946082019-06-07 16:03:53 -05001958 if (reset_state == VNIC_OPEN &&
1959 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05001960 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Juliet Kimb27507b2019-09-20 16:11:22 -04001961 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001962
Juliet Kimb27507b2019-09-20 16:11:22 -04001963 /* Release the RTNL lock before link state change and
1964 * re-acquire after the link state change to allow
1965 * linkwatch_event to grab the RTNL lock and run during
1966 * a reset.
1967 */
1968 rtnl_unlock();
1969 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1970 rtnl_lock();
1971 if (rc)
1972 goto out;
1973
1974 if (adapter->state != VNIC_CLOSING) {
1975 rc = -1;
1976 goto out;
1977 }
1978
1979 adapter->state = VNIC_CLOSED;
John Allenc26eba02017-10-26 16:23:25 -05001980 }
1981
John Allen8cb31cf2017-05-26 10:30:37 -04001982 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1983 /* remove the closed state so when we call open it appears
1984 * we are coming from the probed state.
1985 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001986 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04001987
Juliet Kimb27507b2019-09-20 16:11:22 -04001988 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001989 rc = ibmvnic_reenable_crq_queue(adapter);
1990 release_sub_crqs(adapter, 1);
1991 } else {
1992 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04001993 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001994 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04001995 if (rc)
1996 netdev_err(adapter->netdev,
1997 "Reset failed to enable interrupts. rc=%d\n",
1998 rc);
1999 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05002000 }
2001
2002 if (rc) {
2003 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04002004 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002005 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05002006 }
2007
Lijun Pan635e4422020-08-19 17:52:26 -05002008 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04002009 if (rc) {
2010 rc = IBMVNIC_INIT_FAILED;
2011 goto out;
2012 }
John Allen8cb31cf2017-05-26 10:30:37 -04002013
2014 /* If the adapter was in PROBE state prior to the reset,
2015 * exit here.
2016 */
Juliet Kimb27507b2019-09-20 16:11:22 -04002017 if (reset_state == VNIC_PROBED) {
2018 rc = 0;
2019 goto out;
2020 }
John Allen8cb31cf2017-05-26 10:30:37 -04002021
2022 rc = ibmvnic_login(netdev);
2023 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002024 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002025 }
2026
Juliet Kimb27507b2019-09-20 16:11:22 -04002027 if (adapter->req_rx_queues != old_num_rx_queues ||
2028 adapter->req_tx_queues != old_num_tx_queues ||
2029 adapter->req_rx_add_entries_per_subcrq !=
2030 old_num_rx_slots ||
2031 adapter->req_tx_entries_per_subcrq !=
Mingming Cao9f134572020-08-25 13:26:41 -04002032 old_num_tx_slots ||
2033 !adapter->rx_pool ||
2034 !adapter->tso_pool ||
2035 !adapter->tx_pool) {
John Allen896d8692018-01-18 16:26:31 -06002036 release_rx_pools(adapter);
2037 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06002038 release_napi(adapter);
2039 release_vpd_data(adapter);
2040
2041 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002042 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002043 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002044
John Allenc26eba02017-10-26 16:23:25 -05002045 } else {
2046 rc = reset_tx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002047 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002048 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2049 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002050 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002051 }
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002052
John Allenc26eba02017-10-26 16:23:25 -05002053 rc = reset_rx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002054 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002055 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2056 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002057 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002058 }
John Allenc26eba02017-10-26 16:23:25 -05002059 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002060 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002061 }
John Allene676d812018-03-14 10:41:29 -05002062 adapter->state = VNIC_CLOSED;
2063
Juliet Kimb27507b2019-09-20 16:11:22 -04002064 if (reset_state == VNIC_CLOSED) {
2065 rc = 0;
2066 goto out;
2067 }
John Allene676d812018-03-14 10:41:29 -05002068
Nathan Fontenoted651a12017-05-03 14:04:38 -04002069 rc = __ibmvnic_open(netdev);
2070 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002071 rc = IBMVNIC_OPEN_FAILED;
2072 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002073 }
2074
Thomas Falconbe32a242019-06-07 16:03:54 -05002075 /* refresh device's multicast list */
2076 ibmvnic_set_multi(netdev);
2077
Nathan Fontenoted651a12017-05-03 14:04:38 -04002078 /* kick napi */
2079 for (i = 0; i < adapter->req_rx_queues; i++)
2080 napi_schedule(&adapter->napi[i]);
2081
Lijun Pan98025bc2020-11-20 16:40:12 -06002082 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2083 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Thomas Falcon986103e2018-11-30 10:59:08 -06002084 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
Lijun Pan83935972020-11-20 16:40:11 -06002085 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2086 }
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002087
Juliet Kimb27507b2019-09-20 16:11:22 -04002088 rc = 0;
2089
2090out:
Dany Madden0cb4bc62020-11-25 18:04:27 -06002091 /* restore the adapter state if reset failed */
2092 if (rc)
2093 adapter->state = reset_state;
Juliet Kimb27507b2019-09-20 16:11:22 -04002094 rtnl_unlock();
2095
2096 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002097}
2098
Thomas Falcon2770a792018-05-23 13:38:02 -05002099static int do_hard_reset(struct ibmvnic_adapter *adapter,
2100 struct ibmvnic_rwi *rwi, u32 reset_state)
2101{
2102 struct net_device *netdev = adapter->netdev;
2103 int rc;
2104
2105 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2106 rwi->reset_reason);
2107
2108 netif_carrier_off(netdev);
2109 adapter->reset_reason = rwi->reset_reason;
2110
2111 ibmvnic_cleanup(netdev);
2112 release_resources(adapter);
2113 release_sub_crqs(adapter, 0);
2114 release_crq_queue(adapter);
2115
2116 /* remove the closed state so when we call open it appears
2117 * we are coming from the probed state.
2118 */
2119 adapter->state = VNIC_PROBED;
2120
Thomas Falconbbd669a2019-04-04 18:58:26 -05002121 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002122 rc = init_crq_queue(adapter);
2123 if (rc) {
2124 netdev_err(adapter->netdev,
2125 "Couldn't initialize crq. rc=%d\n", rc);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002126 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002127 }
2128
Lijun Pan635e4422020-08-19 17:52:26 -05002129 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002130 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002131 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002132
2133 /* If the adapter was in PROBE state prior to the reset,
2134 * exit here.
2135 */
2136 if (reset_state == VNIC_PROBED)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002137 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002138
2139 rc = ibmvnic_login(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002140 if (rc)
2141 goto out;
Juliet Kima5681e22018-11-19 15:59:22 -06002142
2143 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002144 if (rc)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002145 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002146
2147 ibmvnic_disable_irqs(adapter);
2148 adapter->state = VNIC_CLOSED;
2149
2150 if (reset_state == VNIC_CLOSED)
Dany Madden0cb4bc62020-11-25 18:04:27 -06002151 goto out;
Thomas Falcon2770a792018-05-23 13:38:02 -05002152
2153 rc = __ibmvnic_open(netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002154 if (rc) {
2155 rc = IBMVNIC_OPEN_FAILED;
2156 goto out;
2157 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002158
Lijun Pan98025bc2020-11-20 16:40:12 -06002159 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
2160 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002161out:
2162 /* restore adapter state if reset failed */
2163 if (rc)
2164 adapter->state = reset_state;
2165 return rc;
Thomas Falcon2770a792018-05-23 13:38:02 -05002166}
2167
Nathan Fontenoted651a12017-05-03 14:04:38 -04002168static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2169{
2170 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002171 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002172
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002173 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002174
2175 if (!list_empty(&adapter->rwi_list)) {
2176 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2177 list);
2178 list_del(&rwi->list);
2179 } else {
2180 rwi = NULL;
2181 }
2182
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002183 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002184 return rwi;
2185}
2186
Nathan Fontenoted651a12017-05-03 14:04:38 -04002187static void __ibmvnic_reset(struct work_struct *work)
2188{
2189 struct ibmvnic_rwi *rwi;
2190 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002191 bool saved_state = false;
2192 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002193 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002194 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002195
2196 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002197
Juliet Kim7ed5b312019-09-20 16:11:23 -04002198 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2199 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2200 IBMVNIC_RESET_DELAY);
2201 return;
2202 }
2203
Nathan Fontenoted651a12017-05-03 14:04:38 -04002204 rwi = get_next_rwi(adapter);
2205 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002206 spin_lock_irqsave(&adapter->state_lock, flags);
2207
Thomas Falcon36f10312019-08-27 11:10:04 -05002208 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002209 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002210 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002211 kfree(rwi);
2212 rc = EBUSY;
2213 break;
2214 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002215
Juliet Kim7d7195a2020-03-10 09:23:58 -05002216 if (!saved_state) {
2217 reset_state = adapter->state;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002218 saved_state = true;
2219 }
2220 spin_unlock_irqrestore(&adapter->state_lock, flags);
2221
Juliet Kimb27507b2019-09-20 16:11:22 -04002222 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2223 /* CHANGE_PARAM requestor holds rtnl_lock */
2224 rc = do_change_param_reset(adapter, rwi, reset_state);
2225 } else if (adapter->force_reset_recovery) {
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002226 /*
2227 * Since we are doing a hard reset now, clear the
2228 * failover_pending flag so we don't ignore any
2229 * future MOBILITY or other resets.
2230 */
2231 adapter->failover_pending = false;
2232
Juliet Kimb27507b2019-09-20 16:11:22 -04002233 /* Transport event occurred during previous reset */
2234 if (adapter->wait_for_reset) {
2235 /* Previous was CHANGE_PARAM; caller locked */
2236 adapter->force_reset_recovery = false;
2237 rc = do_hard_reset(adapter, rwi, reset_state);
2238 } else {
2239 rtnl_lock();
2240 adapter->force_reset_recovery = false;
2241 rc = do_hard_reset(adapter, rwi, reset_state);
2242 rtnl_unlock();
2243 }
Sukadev Bhattiproluf15fde92020-11-25 18:04:28 -06002244 if (rc) {
2245 /* give backing device time to settle down */
2246 netdev_dbg(adapter->netdev,
2247 "[S:%d] Hard reset failed, waiting 60 secs\n",
2248 adapter->state);
2249 set_current_state(TASK_UNINTERRUPTIBLE);
2250 schedule_timeout(60 * HZ);
2251 }
Juliet Kimf9c6cea2020-04-30 13:22:11 -05002252 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2253 adapter->from_passive_init)) {
Thomas Falcon2770a792018-05-23 13:38:02 -05002254 rc = do_reset(adapter, rwi, reset_state);
2255 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002256 kfree(rwi);
Dany Madden0cb4bc62020-11-25 18:04:27 -06002257
Dany Madden18f141b2020-11-25 18:04:25 -06002258 if (rc)
2259 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002260
2261 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002262
2263 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2264 rwi->reset_reason == VNIC_RESET_MOBILITY))
2265 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002266 }
2267
John Allenc26eba02017-10-26 16:23:25 -05002268 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002269 adapter->reset_done_rc = rc;
2270 complete(&adapter->reset_done);
2271 }
2272
Juliet Kim7ed5b312019-09-20 16:11:23 -04002273 clear_bit_unlock(0, &adapter->resetting);
2274}
2275
2276static void __ibmvnic_delayed_reset(struct work_struct *work)
2277{
2278 struct ibmvnic_adapter *adapter;
2279
2280 adapter = container_of(work, struct ibmvnic_adapter,
2281 ibmvnic_delayed_reset.work);
2282 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002283}
2284
Thomas Falconaf894d22018-04-06 18:37:04 -05002285static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2286 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002287{
Thomas Falcon2770a792018-05-23 13:38:02 -05002288 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002289 struct ibmvnic_rwi *rwi, *tmp;
2290 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002291 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002292 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002293
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002294 /*
2295 * If failover is pending don't schedule any other reset.
2296 * Instead let the failover complete. If there is already a
2297 * a failover reset scheduled, we will detect and drop the
2298 * duplicate reset when walking the ->rwi_list below.
2299 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002300 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002301 adapter->state == VNIC_REMOVED ||
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002302 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002303 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002304 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002305 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002306 }
2307
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002308 if (adapter->state == VNIC_PROBING) {
2309 netdev_warn(netdev, "Adapter reset during probe\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002310 ret = adapter->init_done_rc = EAGAIN;
2311 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002312 }
2313
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002314 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002315
2316 list_for_each(entry, &adapter->rwi_list) {
2317 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2318 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002319 netdev_dbg(netdev, "Skipping matching reset\n");
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002320 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falconaf894d22018-04-06 18:37:04 -05002321 ret = EBUSY;
2322 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002323 }
2324 }
2325
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002326 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002327 if (!rwi) {
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002328 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002329 ibmvnic_close(netdev);
Thomas Falconaf894d22018-04-06 18:37:04 -05002330 ret = ENOMEM;
2331 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002332 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002333 /* if we just received a transport event,
2334 * flush reset queue and process this reset
2335 */
2336 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2337 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2338 list_del(entry);
2339 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002340 rwi->reset_reason = reason;
2341 list_add_tail(&rwi->list, &adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002342 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002343 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002344 schedule_work(&adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002345
2346 return 0;
2347err:
Thomas Falconaf894d22018-04-06 18:37:04 -05002348 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002349}
2350
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002351static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002352{
2353 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002354
Lijun Pan855a6312020-11-20 16:40:13 -06002355 if (test_bit(0, &adapter->resetting)) {
2356 netdev_err(adapter->netdev,
2357 "Adapter is resetting, skip timeout reset\n");
2358 return;
2359 }
2360
Nathan Fontenoted651a12017-05-03 14:04:38 -04002361 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002362}
2363
2364static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2365 struct ibmvnic_rx_buff *rx_buff)
2366{
2367 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2368
2369 rx_buff->skb = NULL;
2370
2371 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2372 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2373
2374 atomic_dec(&pool->available);
2375}
2376
2377static int ibmvnic_poll(struct napi_struct *napi, int budget)
2378{
2379 struct net_device *netdev = napi->dev;
2380 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2381 int scrq_num = (int)(napi - adapter->napi);
2382 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04002383
Thomas Falcon032c5e82015-12-21 11:26:06 -06002384restart_poll:
2385 while (frames_processed < budget) {
2386 struct sk_buff *skb;
2387 struct ibmvnic_rx_buff *rx_buff;
2388 union sub_crq *next;
2389 u32 length;
2390 u16 offset;
2391 u8 flags = 0;
2392
Juliet Kim7ed5b312019-09-20 16:11:23 -04002393 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002394 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05002395 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2396 napi_complete_done(napi, frames_processed);
2397 return frames_processed;
2398 }
2399
Thomas Falcon032c5e82015-12-21 11:26:06 -06002400 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2401 break;
2402 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2403 rx_buff =
2404 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2405 rx_comp.correlator);
2406 /* do error checking */
2407 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002408 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2409 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002410 /* free the entry */
2411 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002412 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002413 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002414 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002415 } else if (!rx_buff->skb) {
2416 /* free the entry */
2417 next->rx_comp.first = 0;
2418 remove_buff_from_pool(adapter, rx_buff);
2419 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002420 }
2421
2422 length = be32_to_cpu(next->rx_comp.len);
2423 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2424 flags = next->rx_comp.flags;
2425 skb = rx_buff->skb;
2426 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2427 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002428
2429 /* VLAN Header has been stripped by the system firmware and
2430 * needs to be inserted by the driver
2431 */
2432 if (adapter->rx_vlan_header_insertion &&
2433 (flags & IBMVNIC_VLAN_STRIPPED))
2434 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2435 ntohs(next->rx_comp.vlan_tci));
2436
Thomas Falcon032c5e82015-12-21 11:26:06 -06002437 /* free the entry */
2438 next->rx_comp.first = 0;
2439 remove_buff_from_pool(adapter, rx_buff);
2440
2441 skb_put(skb, length);
2442 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002443 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002444
2445 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2446 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2447 skb->ip_summed = CHECKSUM_UNNECESSARY;
2448 }
2449
2450 length = skb->len;
2451 napi_gro_receive(napi, skb); /* send it up */
2452 netdev->stats.rx_packets++;
2453 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002454 adapter->rx_stats_buffers[scrq_num].packets++;
2455 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002456 frames_processed++;
2457 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002458
2459 if (adapter->state != VNIC_CLOSING)
2460 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002461
2462 if (frames_processed < budget) {
2463 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002464 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002465 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2466 napi_reschedule(napi)) {
2467 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2468 goto restart_poll;
2469 }
2470 }
2471 return frames_processed;
2472}
2473
John Allenc26eba02017-10-26 16:23:25 -05002474static int wait_for_reset(struct ibmvnic_adapter *adapter)
2475{
Thomas Falconaf894d22018-04-06 18:37:04 -05002476 int rc, ret;
2477
John Allenc26eba02017-10-26 16:23:25 -05002478 adapter->fallback.mtu = adapter->req_mtu;
2479 adapter->fallback.rx_queues = adapter->req_rx_queues;
2480 adapter->fallback.tx_queues = adapter->req_tx_queues;
2481 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2482 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2483
Thomas Falcon070eca92019-11-25 17:12:53 -06002484 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002485 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002486 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002487
2488 if (rc) {
2489 ret = rc;
2490 goto out;
2491 }
2492 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2493 if (rc) {
2494 ret = -ENODEV;
2495 goto out;
2496 }
John Allenc26eba02017-10-26 16:23:25 -05002497
Thomas Falconaf894d22018-04-06 18:37:04 -05002498 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002499 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002500 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002501 adapter->desired.mtu = adapter->fallback.mtu;
2502 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2503 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2504 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2505 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2506
Thomas Falcon070eca92019-11-25 17:12:53 -06002507 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002508 adapter->wait_for_reset = true;
2509 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002510 if (rc) {
2511 ret = rc;
2512 goto out;
2513 }
2514 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2515 60000);
2516 if (rc) {
2517 ret = -ENODEV;
2518 goto out;
2519 }
John Allenc26eba02017-10-26 16:23:25 -05002520 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002521out:
John Allenc26eba02017-10-26 16:23:25 -05002522 adapter->wait_for_reset = false;
2523
Thomas Falconaf894d22018-04-06 18:37:04 -05002524 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002525}
2526
John Allen3a807b72017-06-06 16:55:52 -05002527static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2528{
John Allenc26eba02017-10-26 16:23:25 -05002529 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2530
2531 adapter->desired.mtu = new_mtu + ETH_HLEN;
2532
2533 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002534}
2535
Thomas Falconf10b09e2018-03-12 11:51:05 -05002536static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2537 struct net_device *dev,
2538 netdev_features_t features)
2539{
2540 /* Some backing hardware adapters can not
2541 * handle packets with a MSS less than 224
2542 * or with only one segment.
2543 */
2544 if (skb_is_gso(skb)) {
2545 if (skb_shinfo(skb)->gso_size < 224 ||
2546 skb_shinfo(skb)->gso_segs == 1)
2547 features &= ~NETIF_F_GSO_MASK;
2548 }
2549
2550 return features;
2551}
2552
Thomas Falcon032c5e82015-12-21 11:26:06 -06002553static const struct net_device_ops ibmvnic_netdev_ops = {
2554 .ndo_open = ibmvnic_open,
2555 .ndo_stop = ibmvnic_close,
2556 .ndo_start_xmit = ibmvnic_xmit,
2557 .ndo_set_rx_mode = ibmvnic_set_multi,
2558 .ndo_set_mac_address = ibmvnic_set_mac,
2559 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002560 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002561 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002562 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002563};
2564
2565/* ethtool functions */
2566
Philippe Reynes8a433792017-01-07 22:37:29 +01002567static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2568 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002569{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002570 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2571 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002572
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002573 rc = send_query_phys_parms(adapter);
2574 if (rc) {
2575 adapter->speed = SPEED_UNKNOWN;
2576 adapter->duplex = DUPLEX_UNKNOWN;
2577 }
2578 cmd->base.speed = adapter->speed;
2579 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002580 cmd->base.port = PORT_FIBRE;
2581 cmd->base.phy_address = 0;
2582 cmd->base.autoneg = AUTONEG_ENABLE;
2583
Thomas Falcon032c5e82015-12-21 11:26:06 -06002584 return 0;
2585}
2586
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002587static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002588 struct ethtool_drvinfo *info)
2589{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002590 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2591
Thomas Falcon032c5e82015-12-21 11:26:06 -06002592 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2593 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002594 strlcpy(info->fw_version, adapter->fw_version,
2595 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002596}
2597
2598static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2599{
2600 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2601
2602 return adapter->msg_enable;
2603}
2604
2605static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2606{
2607 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2608
2609 adapter->msg_enable = data;
2610}
2611
2612static u32 ibmvnic_get_link(struct net_device *netdev)
2613{
2614 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2615
2616 /* Don't need to send a query because we request a logical link up at
2617 * init and then we wait for link state indications
2618 */
2619 return adapter->logical_link_state;
2620}
2621
2622static void ibmvnic_get_ringparam(struct net_device *netdev,
2623 struct ethtool_ringparam *ring)
2624{
John Allenbc131b32017-08-02 16:46:30 -05002625 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2626
Thomas Falcon723ad912018-09-28 18:38:26 -05002627 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2628 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2629 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2630 } else {
2631 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2632 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2633 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002634 ring->rx_mini_max_pending = 0;
2635 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002636 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2637 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002638 ring->rx_mini_pending = 0;
2639 ring->rx_jumbo_pending = 0;
2640}
2641
John Allenc26eba02017-10-26 16:23:25 -05002642static int ibmvnic_set_ringparam(struct net_device *netdev,
2643 struct ethtool_ringparam *ring)
2644{
2645 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002646 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002647
Thomas Falcon723ad912018-09-28 18:38:26 -05002648 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002649 adapter->desired.rx_entries = ring->rx_pending;
2650 adapter->desired.tx_entries = ring->tx_pending;
2651
Thomas Falcon723ad912018-09-28 18:38:26 -05002652 ret = wait_for_reset(adapter);
2653
2654 if (!ret &&
2655 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2656 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2657 netdev_info(netdev,
2658 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2659 ring->rx_pending, ring->tx_pending,
2660 adapter->req_rx_add_entries_per_subcrq,
2661 adapter->req_tx_entries_per_subcrq);
2662 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002663}
2664
John Allenc2dbeb62017-08-02 16:47:17 -05002665static void ibmvnic_get_channels(struct net_device *netdev,
2666 struct ethtool_channels *channels)
2667{
2668 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2669
Thomas Falcon723ad912018-09-28 18:38:26 -05002670 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2671 channels->max_rx = adapter->max_rx_queues;
2672 channels->max_tx = adapter->max_tx_queues;
2673 } else {
2674 channels->max_rx = IBMVNIC_MAX_QUEUES;
2675 channels->max_tx = IBMVNIC_MAX_QUEUES;
2676 }
2677
John Allenc2dbeb62017-08-02 16:47:17 -05002678 channels->max_other = 0;
2679 channels->max_combined = 0;
2680 channels->rx_count = adapter->req_rx_queues;
2681 channels->tx_count = adapter->req_tx_queues;
2682 channels->other_count = 0;
2683 channels->combined_count = 0;
2684}
2685
John Allenc26eba02017-10-26 16:23:25 -05002686static int ibmvnic_set_channels(struct net_device *netdev,
2687 struct ethtool_channels *channels)
2688{
2689 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002690 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002691
Thomas Falcon723ad912018-09-28 18:38:26 -05002692 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002693 adapter->desired.rx_queues = channels->rx_count;
2694 adapter->desired.tx_queues = channels->tx_count;
2695
Thomas Falcon723ad912018-09-28 18:38:26 -05002696 ret = wait_for_reset(adapter);
2697
2698 if (!ret &&
2699 (adapter->req_rx_queues != channels->rx_count ||
2700 adapter->req_tx_queues != channels->tx_count))
2701 netdev_info(netdev,
2702 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2703 channels->rx_count, channels->tx_count,
2704 adapter->req_rx_queues, adapter->req_tx_queues);
2705 return ret;
2706
John Allenc26eba02017-10-26 16:23:25 -05002707}
2708
Thomas Falcon032c5e82015-12-21 11:26:06 -06002709static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2710{
John Allen3d52b592017-08-02 16:44:14 -05002711 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002712 int i;
2713
Thomas Falcon723ad912018-09-28 18:38:26 -05002714 switch (stringset) {
2715 case ETH_SS_STATS:
2716 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2717 i++, data += ETH_GSTRING_LEN)
2718 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2719
2720 for (i = 0; i < adapter->req_tx_queues; i++) {
2721 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2722 data += ETH_GSTRING_LEN;
2723
2724 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2725 data += ETH_GSTRING_LEN;
2726
2727 snprintf(data, ETH_GSTRING_LEN,
2728 "tx%d_dropped_packets", i);
2729 data += ETH_GSTRING_LEN;
2730 }
2731
2732 for (i = 0; i < adapter->req_rx_queues; i++) {
2733 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2734 data += ETH_GSTRING_LEN;
2735
2736 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2737 data += ETH_GSTRING_LEN;
2738
2739 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2740 data += ETH_GSTRING_LEN;
2741 }
2742 break;
2743
2744 case ETH_SS_PRIV_FLAGS:
2745 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2746 strcpy(data + i * ETH_GSTRING_LEN,
2747 ibmvnic_priv_flags[i]);
2748 break;
2749 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06002750 return;
John Allen3d52b592017-08-02 16:44:14 -05002751 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002752}
2753
2754static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2755{
John Allen3d52b592017-08-02 16:44:14 -05002756 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2757
Thomas Falcon032c5e82015-12-21 11:26:06 -06002758 switch (sset) {
2759 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002760 return ARRAY_SIZE(ibmvnic_stats) +
2761 adapter->req_tx_queues * NUM_TX_STATS +
2762 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05002763 case ETH_SS_PRIV_FLAGS:
2764 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002765 default:
2766 return -EOPNOTSUPP;
2767 }
2768}
2769
2770static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2771 struct ethtool_stats *stats, u64 *data)
2772{
2773 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2774 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002775 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002776 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002777
2778 memset(&crq, 0, sizeof(crq));
2779 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2780 crq.request_statistics.cmd = REQUEST_STATISTICS;
2781 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2782 crq.request_statistics.len =
2783 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002784
2785 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06002786 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002787 rc = ibmvnic_send_crq(adapter, &crq);
2788 if (rc)
2789 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06002790 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2791 if (rc)
2792 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002793
2794 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002795 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2796 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002797
2798 for (j = 0; j < adapter->req_tx_queues; j++) {
2799 data[i] = adapter->tx_stats_buffers[j].packets;
2800 i++;
2801 data[i] = adapter->tx_stats_buffers[j].bytes;
2802 i++;
2803 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2804 i++;
2805 }
2806
2807 for (j = 0; j < adapter->req_rx_queues; j++) {
2808 data[i] = adapter->rx_stats_buffers[j].packets;
2809 i++;
2810 data[i] = adapter->rx_stats_buffers[j].bytes;
2811 i++;
2812 data[i] = adapter->rx_stats_buffers[j].interrupts;
2813 i++;
2814 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002815}
2816
Thomas Falcon723ad912018-09-28 18:38:26 -05002817static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2818{
2819 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2820
2821 return adapter->priv_flags;
2822}
2823
2824static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2825{
2826 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2827 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2828
2829 if (which_maxes)
2830 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2831 else
2832 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2833
2834 return 0;
2835}
Thomas Falcon032c5e82015-12-21 11:26:06 -06002836static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002837 .get_drvinfo = ibmvnic_get_drvinfo,
2838 .get_msglevel = ibmvnic_get_msglevel,
2839 .set_msglevel = ibmvnic_set_msglevel,
2840 .get_link = ibmvnic_get_link,
2841 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002842 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002843 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002844 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002845 .get_strings = ibmvnic_get_strings,
2846 .get_sset_count = ibmvnic_get_sset_count,
2847 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002848 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05002849 .get_priv_flags = ibmvnic_get_priv_flags,
2850 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002851};
2852
2853/* Routines for managing CRQs/sCRQs */
2854
Nathan Fontenot57a49432017-05-26 10:31:12 -04002855static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2856 struct ibmvnic_sub_crq_queue *scrq)
2857{
2858 int rc;
2859
Dany Madden9281cf22020-11-25 18:04:26 -06002860 if (!scrq) {
2861 netdev_dbg(adapter->netdev,
2862 "Invalid scrq reset. irq (%d) or msgs (%p).\n",
2863 scrq->irq, scrq->msgs);
2864 return -EINVAL;
2865 }
2866
Nathan Fontenot57a49432017-05-26 10:31:12 -04002867 if (scrq->irq) {
2868 free_irq(scrq->irq, scrq);
2869 irq_dispose_mapping(scrq->irq);
2870 scrq->irq = 0;
2871 }
Dany Madden9281cf22020-11-25 18:04:26 -06002872 if (scrq->msgs) {
2873 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2874 atomic_set(&scrq->used, 0);
2875 scrq->cur = 0;
2876 } else {
2877 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2878 return -EINVAL;
2879 }
Nathan Fontenot57a49432017-05-26 10:31:12 -04002880
2881 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2882 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2883 return rc;
2884}
2885
2886static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2887{
2888 int i, rc;
2889
Lijun Pana0faaa22020-11-23 13:35:45 -06002890 if (!adapter->tx_scrq || !adapter->rx_scrq)
2891 return -EINVAL;
2892
Nathan Fontenot57a49432017-05-26 10:31:12 -04002893 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002894 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002895 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2896 if (rc)
2897 return rc;
2898 }
2899
2900 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002901 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002902 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2903 if (rc)
2904 return rc;
2905 }
2906
Nathan Fontenot57a49432017-05-26 10:31:12 -04002907 return rc;
2908}
2909
Thomas Falcon032c5e82015-12-21 11:26:06 -06002910static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002911 struct ibmvnic_sub_crq_queue *scrq,
2912 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002913{
2914 struct device *dev = &adapter->vdev->dev;
2915 long rc;
2916
2917 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2918
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002919 if (do_h_free) {
2920 /* Close the sub-crqs */
2921 do {
2922 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2923 adapter->vdev->unit_address,
2924 scrq->crq_num);
2925 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002926
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002927 if (rc) {
2928 netdev_err(adapter->netdev,
2929 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2930 scrq->crq_num, rc);
2931 }
Thomas Falconffa73852017-04-19 13:44:29 -04002932 }
2933
Thomas Falcon032c5e82015-12-21 11:26:06 -06002934 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2935 DMA_BIDIRECTIONAL);
2936 free_pages((unsigned long)scrq->msgs, 2);
2937 kfree(scrq);
2938}
2939
2940static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2941 *adapter)
2942{
2943 struct device *dev = &adapter->vdev->dev;
2944 struct ibmvnic_sub_crq_queue *scrq;
2945 int rc;
2946
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002947 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002948 if (!scrq)
2949 return NULL;
2950
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04002951 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002952 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002953 if (!scrq->msgs) {
2954 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2955 goto zero_page_failed;
2956 }
2957
2958 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2959 DMA_BIDIRECTIONAL);
2960 if (dma_mapping_error(dev, scrq->msg_token)) {
2961 dev_warn(dev, "Couldn't map crq queue messages page\n");
2962 goto map_failed;
2963 }
2964
2965 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2966 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2967
2968 if (rc == H_RESOURCE)
2969 rc = ibmvnic_reset_crq(adapter);
2970
2971 if (rc == H_CLOSED) {
2972 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2973 } else if (rc) {
2974 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2975 goto reg_failed;
2976 }
2977
Thomas Falcon032c5e82015-12-21 11:26:06 -06002978 scrq->adapter = adapter;
2979 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002980 spin_lock_init(&scrq->lock);
2981
2982 netdev_dbg(adapter->netdev,
2983 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2984 scrq->crq_num, scrq->hw_irq, scrq->irq);
2985
2986 return scrq;
2987
Thomas Falcon032c5e82015-12-21 11:26:06 -06002988reg_failed:
2989 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2990 DMA_BIDIRECTIONAL);
2991map_failed:
2992 free_pages((unsigned long)scrq->msgs, 2);
2993zero_page_failed:
2994 kfree(scrq);
2995
2996 return NULL;
2997}
2998
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002999static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003000{
3001 int i;
3002
3003 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003004 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003005 if (!adapter->tx_scrq[i])
3006 continue;
3007
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003008 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3009 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003010 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003011 free_irq(adapter->tx_scrq[i]->irq,
3012 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003013 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003014 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003015 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003016
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003017 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3018 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003019 }
3020
Nathan Fontenot9501df32017-03-15 23:38:07 -04003021 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003022 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003023 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003024 }
3025
3026 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003027 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003028 if (!adapter->rx_scrq[i])
3029 continue;
3030
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003031 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3032 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003033 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003034 free_irq(adapter->rx_scrq[i]->irq,
3035 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003036 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003037 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003038 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003039
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003040 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3041 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003042 }
3043
Nathan Fontenot9501df32017-03-15 23:38:07 -04003044 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003045 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003046 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003047 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003048}
3049
3050static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3051 struct ibmvnic_sub_crq_queue *scrq)
3052{
3053 struct device *dev = &adapter->vdev->dev;
3054 unsigned long rc;
3055
3056 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3057 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3058 if (rc)
3059 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3060 scrq->hw_irq, rc);
3061 return rc;
3062}
3063
3064static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3065 struct ibmvnic_sub_crq_queue *scrq)
3066{
3067 struct device *dev = &adapter->vdev->dev;
3068 unsigned long rc;
3069
3070 if (scrq->hw_irq > 0x100000000ULL) {
3071 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3072 return 1;
3073 }
3074
Juliet Kim7ed5b312019-09-20 16:11:23 -04003075 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003076 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003077 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003078
Juliet Kim284f87d2019-11-20 10:50:03 -05003079 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003080 /* H_EOI would fail with rc = H_FUNCTION when running
3081 * in XIVE mode which is expected, but not an error.
3082 */
3083 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003084 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3085 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003086 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003087
Thomas Falcon032c5e82015-12-21 11:26:06 -06003088 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3089 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3090 if (rc)
3091 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3092 scrq->hw_irq, rc);
3093 return rc;
3094}
3095
3096static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3097 struct ibmvnic_sub_crq_queue *scrq)
3098{
3099 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003100 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003101 struct ibmvnic_tx_buff *txbuff;
3102 union sub_crq *next;
3103 int index;
3104 int i, j;
3105
3106restart_loop:
3107 while (pending_scrq(adapter, scrq)) {
3108 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003109 int num_entries = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003110
3111 next = ibmvnic_next_scrq(adapter, scrq);
3112 for (i = 0; i < next->tx_comp.num_comps; i++) {
3113 if (next->tx_comp.rcs[i]) {
3114 dev_err(dev, "tx error %x\n",
3115 next->tx_comp.rcs[i]);
3116 continue;
3117 }
3118 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003119 if (index & IBMVNIC_TSO_POOL_MASK) {
3120 tx_pool = &adapter->tso_pool[pool];
3121 index &= ~IBMVNIC_TSO_POOL_MASK;
3122 } else {
3123 tx_pool = &adapter->tx_pool[pool];
3124 }
3125
3126 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon032c5e82015-12-21 11:26:06 -06003127
3128 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3129 if (!txbuff->data_dma[j])
3130 continue;
3131
3132 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003133 }
3134
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003135 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003136 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003137 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003138 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003139
Thomas Falconffc385b2018-02-18 10:08:41 -06003140 num_entries += txbuff->num_entries;
3141
Thomas Falcon06b3e352018-03-16 20:00:28 -05003142 tx_pool->free_map[tx_pool->producer_index] = index;
3143 tx_pool->producer_index =
3144 (tx_pool->producer_index + 1) %
3145 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003146 }
3147 /* remove tx_comp scrq*/
3148 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003149
Thomas Falconffc385b2018-02-18 10:08:41 -06003150 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003151 (adapter->req_tx_entries_per_subcrq / 2) &&
3152 __netif_subqueue_stopped(adapter->netdev,
3153 scrq->pool_index)) {
3154 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003155 netdev_dbg(adapter->netdev, "Started queue %d\n",
3156 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003157 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003158 }
3159
3160 enable_scrq_irq(adapter, scrq);
3161
3162 if (pending_scrq(adapter, scrq)) {
3163 disable_scrq_irq(adapter, scrq);
3164 goto restart_loop;
3165 }
3166
3167 return 0;
3168}
3169
3170static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3171{
3172 struct ibmvnic_sub_crq_queue *scrq = instance;
3173 struct ibmvnic_adapter *adapter = scrq->adapter;
3174
3175 disable_scrq_irq(adapter, scrq);
3176 ibmvnic_complete_tx(adapter, scrq);
3177
3178 return IRQ_HANDLED;
3179}
3180
3181static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3182{
3183 struct ibmvnic_sub_crq_queue *scrq = instance;
3184 struct ibmvnic_adapter *adapter = scrq->adapter;
3185
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003186 /* When booting a kdump kernel we can hit pending interrupts
3187 * prior to completing driver initialization.
3188 */
3189 if (unlikely(adapter->state != VNIC_OPEN))
3190 return IRQ_NONE;
3191
John Allen3d52b592017-08-02 16:44:14 -05003192 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3193
Thomas Falcon032c5e82015-12-21 11:26:06 -06003194 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3195 disable_scrq_irq(adapter, scrq);
3196 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3197 }
3198
3199 return IRQ_HANDLED;
3200}
3201
Thomas Falconea22d512016-07-06 15:35:17 -05003202static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3203{
3204 struct device *dev = &adapter->vdev->dev;
3205 struct ibmvnic_sub_crq_queue *scrq;
3206 int i = 0, j = 0;
3207 int rc = 0;
3208
3209 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003210 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3211 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003212 scrq = adapter->tx_scrq[i];
3213 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3214
Michael Ellerman99c17902016-09-10 19:59:05 +10003215 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003216 rc = -EINVAL;
3217 dev_err(dev, "Error mapping irq\n");
3218 goto req_tx_irq_failed;
3219 }
3220
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003221 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3222 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003223 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003224 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003225
3226 if (rc) {
3227 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3228 scrq->irq, rc);
3229 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003230 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003231 }
3232 }
3233
3234 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003235 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3236 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003237 scrq = adapter->rx_scrq[i];
3238 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003239 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003240 rc = -EINVAL;
3241 dev_err(dev, "Error mapping irq\n");
3242 goto req_rx_irq_failed;
3243 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003244 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3245 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003246 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003247 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003248 if (rc) {
3249 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3250 scrq->irq, rc);
3251 irq_dispose_mapping(scrq->irq);
3252 goto req_rx_irq_failed;
3253 }
3254 }
3255 return rc;
3256
3257req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003258 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003259 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3260 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003261 }
Thomas Falconea22d512016-07-06 15:35:17 -05003262 i = adapter->req_tx_queues;
3263req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003264 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003265 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003266 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003267 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003268 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003269 return rc;
3270}
3271
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003272static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003273{
3274 struct device *dev = &adapter->vdev->dev;
3275 struct ibmvnic_sub_crq_queue **allqueues;
3276 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003277 int total_queues;
3278 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003279 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003280
Thomas Falcon032c5e82015-12-21 11:26:06 -06003281 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3282
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003283 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003284 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003285 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003286
3287 for (i = 0; i < total_queues; i++) {
3288 allqueues[i] = init_sub_crq_queue(adapter);
3289 if (!allqueues[i]) {
3290 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3291 break;
3292 }
3293 registered_queues++;
3294 }
3295
3296 /* Make sure we were able to register the minimum number of queues */
3297 if (registered_queues <
3298 adapter->min_tx_queues + adapter->min_rx_queues) {
3299 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3300 goto tx_failed;
3301 }
3302
3303 /* Distribute the failed allocated queues*/
3304 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3305 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3306 switch (i % 3) {
3307 case 0:
3308 if (adapter->req_rx_queues > adapter->min_rx_queues)
3309 adapter->req_rx_queues--;
3310 else
3311 more++;
3312 break;
3313 case 1:
3314 if (adapter->req_tx_queues > adapter->min_tx_queues)
3315 adapter->req_tx_queues--;
3316 else
3317 more++;
3318 break;
3319 }
3320 }
3321
3322 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003323 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003324 if (!adapter->tx_scrq)
3325 goto tx_failed;
3326
3327 for (i = 0; i < adapter->req_tx_queues; i++) {
3328 adapter->tx_scrq[i] = allqueues[i];
3329 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003330 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003331 }
3332
3333 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003334 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003335 if (!adapter->rx_scrq)
3336 goto rx_failed;
3337
3338 for (i = 0; i < adapter->req_rx_queues; i++) {
3339 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3340 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003341 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003342 }
3343
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003344 kfree(allqueues);
3345 return 0;
3346
3347rx_failed:
3348 kfree(adapter->tx_scrq);
3349 adapter->tx_scrq = NULL;
3350tx_failed:
3351 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003352 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003353 kfree(allqueues);
3354 return -1;
3355}
3356
Lijun Pan09081b92020-09-27 20:13:27 -05003357static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003358{
3359 struct device *dev = &adapter->vdev->dev;
3360 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003361 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003362
3363 if (!retry) {
3364 /* Sub-CRQ entries are 32 byte long */
3365 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3366
3367 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3368 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3369 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3370 return;
3371 }
3372
John Allenc26eba02017-10-26 16:23:25 -05003373 if (adapter->desired.mtu)
3374 adapter->req_mtu = adapter->desired.mtu;
3375 else
3376 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003377
John Allenc26eba02017-10-26 16:23:25 -05003378 if (!adapter->desired.tx_entries)
3379 adapter->desired.tx_entries =
3380 adapter->max_tx_entries_per_subcrq;
3381 if (!adapter->desired.rx_entries)
3382 adapter->desired.rx_entries =
3383 adapter->max_rx_add_entries_per_subcrq;
3384
3385 max_entries = IBMVNIC_MAX_LTB_SIZE /
3386 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3387
3388 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3389 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3390 adapter->desired.tx_entries = max_entries;
3391 }
3392
3393 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3394 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3395 adapter->desired.rx_entries = max_entries;
3396 }
3397
3398 if (adapter->desired.tx_entries)
3399 adapter->req_tx_entries_per_subcrq =
3400 adapter->desired.tx_entries;
3401 else
3402 adapter->req_tx_entries_per_subcrq =
3403 adapter->max_tx_entries_per_subcrq;
3404
3405 if (adapter->desired.rx_entries)
3406 adapter->req_rx_add_entries_per_subcrq =
3407 adapter->desired.rx_entries;
3408 else
3409 adapter->req_rx_add_entries_per_subcrq =
3410 adapter->max_rx_add_entries_per_subcrq;
3411
3412 if (adapter->desired.tx_queues)
3413 adapter->req_tx_queues =
3414 adapter->desired.tx_queues;
3415 else
3416 adapter->req_tx_queues =
3417 adapter->opt_tx_comp_sub_queues;
3418
3419 if (adapter->desired.rx_queues)
3420 adapter->req_rx_queues =
3421 adapter->desired.rx_queues;
3422 else
3423 adapter->req_rx_queues =
3424 adapter->opt_rx_comp_queues;
3425
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003426 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003427 }
3428
Thomas Falcon032c5e82015-12-21 11:26:06 -06003429 memset(&crq, 0, sizeof(crq));
3430 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3431 crq.request_capability.cmd = REQUEST_CAPABILITY;
3432
3433 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003434 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003435 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003436 ibmvnic_send_crq(adapter, &crq);
3437
3438 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003439 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003440 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003441 ibmvnic_send_crq(adapter, &crq);
3442
3443 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003444 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003445 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003446 ibmvnic_send_crq(adapter, &crq);
3447
3448 crq.request_capability.capability =
3449 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3450 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003451 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003452 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003453 ibmvnic_send_crq(adapter, &crq);
3454
3455 crq.request_capability.capability =
3456 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3457 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003458 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003459 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003460 ibmvnic_send_crq(adapter, &crq);
3461
3462 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003463 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003464 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003465 ibmvnic_send_crq(adapter, &crq);
3466
3467 if (adapter->netdev->flags & IFF_PROMISC) {
3468 if (adapter->promisc_supported) {
3469 crq.request_capability.capability =
3470 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003471 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003472 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003473 ibmvnic_send_crq(adapter, &crq);
3474 }
3475 } else {
3476 crq.request_capability.capability =
3477 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003478 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003479 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003480 ibmvnic_send_crq(adapter, &crq);
3481 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003482}
3483
3484static int pending_scrq(struct ibmvnic_adapter *adapter,
3485 struct ibmvnic_sub_crq_queue *scrq)
3486{
3487 union sub_crq *entry = &scrq->msgs[scrq->cur];
3488
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05003489 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003490 return 1;
3491 else
3492 return 0;
3493}
3494
3495static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3496 struct ibmvnic_sub_crq_queue *scrq)
3497{
3498 union sub_crq *entry;
3499 unsigned long flags;
3500
3501 spin_lock_irqsave(&scrq->lock, flags);
3502 entry = &scrq->msgs[scrq->cur];
3503 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3504 if (++scrq->cur == scrq->size)
3505 scrq->cur = 0;
3506 } else {
3507 entry = NULL;
3508 }
3509 spin_unlock_irqrestore(&scrq->lock, flags);
3510
3511 return entry;
3512}
3513
3514static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3515{
3516 struct ibmvnic_crq_queue *queue = &adapter->crq;
3517 union ibmvnic_crq *crq;
3518
3519 crq = &queue->msgs[queue->cur];
3520 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3521 if (++queue->cur == queue->size)
3522 queue->cur = 0;
3523 } else {
3524 crq = NULL;
3525 }
3526
3527 return crq;
3528}
3529
Thomas Falcon2d14d372018-07-13 12:03:32 -05003530static void print_subcrq_error(struct device *dev, int rc, const char *func)
3531{
3532 switch (rc) {
3533 case H_PARAMETER:
3534 dev_warn_ratelimited(dev,
3535 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3536 func, rc);
3537 break;
3538 case H_CLOSED:
3539 dev_warn_ratelimited(dev,
3540 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3541 func, rc);
3542 break;
3543 default:
3544 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3545 break;
3546 }
3547}
3548
Thomas Falcon032c5e82015-12-21 11:26:06 -06003549static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3550 union sub_crq *sub_crq)
3551{
3552 unsigned int ua = adapter->vdev->unit_address;
3553 struct device *dev = &adapter->vdev->dev;
3554 u64 *u64_crq = (u64 *)sub_crq;
3555 int rc;
3556
3557 netdev_dbg(adapter->netdev,
3558 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3559 (unsigned long int)cpu_to_be64(remote_handle),
3560 (unsigned long int)cpu_to_be64(u64_crq[0]),
3561 (unsigned long int)cpu_to_be64(u64_crq[1]),
3562 (unsigned long int)cpu_to_be64(u64_crq[2]),
3563 (unsigned long int)cpu_to_be64(u64_crq[3]));
3564
3565 /* Make sure the hypervisor sees the complete request */
3566 mb();
3567
3568 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3569 cpu_to_be64(remote_handle),
3570 cpu_to_be64(u64_crq[0]),
3571 cpu_to_be64(u64_crq[1]),
3572 cpu_to_be64(u64_crq[2]),
3573 cpu_to_be64(u64_crq[3]));
3574
Thomas Falcon2d14d372018-07-13 12:03:32 -05003575 if (rc)
3576 print_subcrq_error(dev, rc, __func__);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003577
3578 return rc;
3579}
3580
Thomas Falconad7775d2016-04-01 17:20:34 -05003581static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3582 u64 remote_handle, u64 ioba, u64 num_entries)
3583{
3584 unsigned int ua = adapter->vdev->unit_address;
3585 struct device *dev = &adapter->vdev->dev;
3586 int rc;
3587
3588 /* Make sure the hypervisor sees the complete request */
3589 mb();
3590 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3591 cpu_to_be64(remote_handle),
3592 ioba, num_entries);
3593
Thomas Falcon2d14d372018-07-13 12:03:32 -05003594 if (rc)
3595 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003596
3597 return rc;
3598}
3599
Thomas Falcon032c5e82015-12-21 11:26:06 -06003600static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3601 union ibmvnic_crq *crq)
3602{
3603 unsigned int ua = adapter->vdev->unit_address;
3604 struct device *dev = &adapter->vdev->dev;
3605 u64 *u64_crq = (u64 *)crq;
3606 int rc;
3607
3608 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3609 (unsigned long int)cpu_to_be64(u64_crq[0]),
3610 (unsigned long int)cpu_to_be64(u64_crq[1]));
3611
Thomas Falcon51536982018-05-23 13:37:56 -05003612 if (!adapter->crq.active &&
3613 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3614 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3615 return -EINVAL;
3616 }
3617
Thomas Falcon032c5e82015-12-21 11:26:06 -06003618 /* Make sure the hypervisor sees the complete request */
3619 mb();
3620
3621 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3622 cpu_to_be64(u64_crq[0]),
3623 cpu_to_be64(u64_crq[1]));
3624
3625 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003626 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003627 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05003628 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003629 }
3630
Thomas Falcon032c5e82015-12-21 11:26:06 -06003631 dev_warn(dev, "Send error (rc=%d)\n", rc);
3632 }
3633
3634 return rc;
3635}
3636
3637static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3638{
Thomas Falcon36a782f2020-08-31 11:59:57 -05003639 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003640 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05003641 int retries = 100;
3642 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003643
3644 memset(&crq, 0, sizeof(crq));
3645 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3646 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3647 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3648
Thomas Falcon36a782f2020-08-31 11:59:57 -05003649 do {
3650 rc = ibmvnic_send_crq(adapter, &crq);
3651 if (rc != H_CLOSED)
3652 break;
3653 retries--;
3654 msleep(50);
3655
3656 } while (retries > 0);
3657
3658 if (rc) {
3659 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3660 return rc;
3661 }
3662
3663 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003664}
3665
Thomas Falcon032c5e82015-12-21 11:26:06 -06003666static int send_version_xchg(struct ibmvnic_adapter *adapter)
3667{
3668 union ibmvnic_crq crq;
3669
3670 memset(&crq, 0, sizeof(crq));
3671 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3672 crq.version_exchange.cmd = VERSION_EXCHANGE;
3673 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3674
3675 return ibmvnic_send_crq(adapter, &crq);
3676}
3677
Nathan Fontenot37798d02017-11-08 11:23:56 -06003678struct vnic_login_client_data {
3679 u8 type;
3680 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003681 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003682} __packed;
3683
3684static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3685{
3686 int len;
3687
3688 /* Calculate the amount of buffer space needed for the
3689 * vnic client data in the login buffer. There are four entries,
3690 * OS name, LPAR name, device name, and a null last entry.
3691 */
3692 len = 4 * sizeof(struct vnic_login_client_data);
3693 len += 6; /* "Linux" plus NULL */
3694 len += strlen(utsname()->nodename) + 1;
3695 len += strlen(adapter->netdev->name) + 1;
3696
3697 return len;
3698}
3699
3700static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3701 struct vnic_login_client_data *vlcd)
3702{
3703 const char *os_name = "Linux";
3704 int len;
3705
3706 /* Type 1 - LPAR OS */
3707 vlcd->type = 1;
3708 len = strlen(os_name) + 1;
3709 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003710 strncpy(vlcd->name, os_name, len);
3711 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003712
3713 /* Type 2 - LPAR name */
3714 vlcd->type = 2;
3715 len = strlen(utsname()->nodename) + 1;
3716 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003717 strncpy(vlcd->name, utsname()->nodename, len);
3718 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003719
3720 /* Type 3 - device name */
3721 vlcd->type = 3;
3722 len = strlen(adapter->netdev->name) + 1;
3723 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003724 strncpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003725}
3726
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003727static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003728{
3729 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3730 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003731 struct device *dev = &adapter->vdev->dev;
3732 dma_addr_t rsp_buffer_token;
3733 dma_addr_t buffer_token;
3734 size_t rsp_buffer_size;
3735 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003736 size_t buffer_size;
3737 __be64 *tx_list_p;
3738 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003739 int client_data_len;
3740 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003741 int i;
3742
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003743 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3744 netdev_err(adapter->netdev,
3745 "RX or TX queues are not allocated, device login failed\n");
3746 return -1;
3747 }
3748
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003749 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003750 client_data_len = vnic_client_data_len(adapter);
3751
Thomas Falcon032c5e82015-12-21 11:26:06 -06003752 buffer_size =
3753 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003754 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3755 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003756
Nathan Fontenot37798d02017-11-08 11:23:56 -06003757 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003758 if (!login_buffer)
3759 goto buf_alloc_failed;
3760
3761 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3762 DMA_TO_DEVICE);
3763 if (dma_mapping_error(dev, buffer_token)) {
3764 dev_err(dev, "Couldn't map login buffer\n");
3765 goto buf_map_failed;
3766 }
3767
John Allen498cd8e2016-04-06 11:49:55 -05003768 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3769 sizeof(u64) * adapter->req_tx_queues +
3770 sizeof(u64) * adapter->req_rx_queues +
3771 sizeof(u64) * adapter->req_rx_queues +
3772 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003773
3774 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3775 if (!login_rsp_buffer)
3776 goto buf_rsp_alloc_failed;
3777
3778 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3779 rsp_buffer_size, DMA_FROM_DEVICE);
3780 if (dma_mapping_error(dev, rsp_buffer_token)) {
3781 dev_err(dev, "Couldn't map login rsp buffer\n");
3782 goto buf_rsp_map_failed;
3783 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003784
Thomas Falcon032c5e82015-12-21 11:26:06 -06003785 adapter->login_buf = login_buffer;
3786 adapter->login_buf_token = buffer_token;
3787 adapter->login_buf_sz = buffer_size;
3788 adapter->login_rsp_buf = login_rsp_buffer;
3789 adapter->login_rsp_buf_token = rsp_buffer_token;
3790 adapter->login_rsp_buf_sz = rsp_buffer_size;
3791
3792 login_buffer->len = cpu_to_be32(buffer_size);
3793 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3794 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3795 login_buffer->off_txcomp_subcrqs =
3796 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3797 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3798 login_buffer->off_rxcomp_subcrqs =
3799 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3800 sizeof(u64) * adapter->req_tx_queues);
3801 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3802 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3803
3804 tx_list_p = (__be64 *)((char *)login_buffer +
3805 sizeof(struct ibmvnic_login_buffer));
3806 rx_list_p = (__be64 *)((char *)login_buffer +
3807 sizeof(struct ibmvnic_login_buffer) +
3808 sizeof(u64) * adapter->req_tx_queues);
3809
3810 for (i = 0; i < adapter->req_tx_queues; i++) {
3811 if (adapter->tx_scrq[i]) {
3812 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3813 crq_num);
3814 }
3815 }
3816
3817 for (i = 0; i < adapter->req_rx_queues; i++) {
3818 if (adapter->rx_scrq[i]) {
3819 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3820 crq_num);
3821 }
3822 }
3823
Nathan Fontenot37798d02017-11-08 11:23:56 -06003824 /* Insert vNIC login client data */
3825 vlcd = (struct vnic_login_client_data *)
3826 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3827 login_buffer->client_data_offset =
3828 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3829 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3830
3831 vnic_add_client_data(adapter, vlcd);
3832
Thomas Falcon032c5e82015-12-21 11:26:06 -06003833 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3834 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3835 netdev_dbg(adapter->netdev, "%016lx\n",
3836 ((unsigned long int *)(adapter->login_buf))[i]);
3837 }
3838
3839 memset(&crq, 0, sizeof(crq));
3840 crq.login.first = IBMVNIC_CRQ_CMD;
3841 crq.login.cmd = LOGIN;
3842 crq.login.ioba = cpu_to_be32(buffer_token);
3843 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003844 ibmvnic_send_crq(adapter, &crq);
3845
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003846 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003847
Thomas Falcon032c5e82015-12-21 11:26:06 -06003848buf_rsp_map_failed:
3849 kfree(login_rsp_buffer);
3850buf_rsp_alloc_failed:
3851 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3852buf_map_failed:
3853 kfree(login_buffer);
3854buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003855 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003856}
3857
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003858static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3859 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003860{
3861 union ibmvnic_crq crq;
3862
3863 memset(&crq, 0, sizeof(crq));
3864 crq.request_map.first = IBMVNIC_CRQ_CMD;
3865 crq.request_map.cmd = REQUEST_MAP;
3866 crq.request_map.map_id = map_id;
3867 crq.request_map.ioba = cpu_to_be32(addr);
3868 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003869 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003870}
3871
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003872static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003873{
3874 union ibmvnic_crq crq;
3875
3876 memset(&crq, 0, sizeof(crq));
3877 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3878 crq.request_unmap.cmd = REQUEST_UNMAP;
3879 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003880 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003881}
3882
Lijun Pan69980d02020-09-27 20:13:28 -05003883static void send_query_map(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003884{
3885 union ibmvnic_crq crq;
3886
3887 memset(&crq, 0, sizeof(crq));
3888 crq.query_map.first = IBMVNIC_CRQ_CMD;
3889 crq.query_map.cmd = QUERY_MAP;
3890 ibmvnic_send_crq(adapter, &crq);
3891}
3892
3893/* Send a series of CRQs requesting various capabilities of the VNIC server */
Lijun Pan491099a2020-09-27 20:13:26 -05003894static void send_query_cap(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003895{
3896 union ibmvnic_crq crq;
3897
Thomas Falcon901e0402017-02-15 12:17:59 -06003898 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003899 memset(&crq, 0, sizeof(crq));
3900 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3901 crq.query_capability.cmd = QUERY_CAPABILITY;
3902
3903 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003904 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003905 ibmvnic_send_crq(adapter, &crq);
3906
3907 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003908 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003909 ibmvnic_send_crq(adapter, &crq);
3910
3911 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003912 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003913 ibmvnic_send_crq(adapter, &crq);
3914
3915 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003916 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003917 ibmvnic_send_crq(adapter, &crq);
3918
3919 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003920 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003921 ibmvnic_send_crq(adapter, &crq);
3922
3923 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003924 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003925 ibmvnic_send_crq(adapter, &crq);
3926
3927 crq.query_capability.capability =
3928 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003929 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003930 ibmvnic_send_crq(adapter, &crq);
3931
3932 crq.query_capability.capability =
3933 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003934 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003935 ibmvnic_send_crq(adapter, &crq);
3936
3937 crq.query_capability.capability =
3938 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003939 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003940 ibmvnic_send_crq(adapter, &crq);
3941
3942 crq.query_capability.capability =
3943 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003944 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003945 ibmvnic_send_crq(adapter, &crq);
3946
3947 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06003948 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003949 ibmvnic_send_crq(adapter, &crq);
3950
3951 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003952 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003953 ibmvnic_send_crq(adapter, &crq);
3954
3955 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003956 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003957 ibmvnic_send_crq(adapter, &crq);
3958
3959 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003960 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003961 ibmvnic_send_crq(adapter, &crq);
3962
3963 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06003964 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003965 ibmvnic_send_crq(adapter, &crq);
3966
3967 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06003968 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003969 ibmvnic_send_crq(adapter, &crq);
3970
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003971 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3972 atomic_inc(&adapter->running_cap_crqs);
3973 ibmvnic_send_crq(adapter, &crq);
3974
Thomas Falcon032c5e82015-12-21 11:26:06 -06003975 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003976 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003977 ibmvnic_send_crq(adapter, &crq);
3978
3979 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003980 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003981 ibmvnic_send_crq(adapter, &crq);
3982
3983 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003984 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003985 ibmvnic_send_crq(adapter, &crq);
3986
3987 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003988 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003989 ibmvnic_send_crq(adapter, &crq);
3990
3991 crq.query_capability.capability =
3992 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06003993 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003994 ibmvnic_send_crq(adapter, &crq);
3995
3996 crq.query_capability.capability =
3997 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003998 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003999 ibmvnic_send_crq(adapter, &crq);
4000
4001 crq.query_capability.capability =
4002 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004003 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004004 ibmvnic_send_crq(adapter, &crq);
4005
4006 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004007 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004008 ibmvnic_send_crq(adapter, &crq);
4009}
4010
Lijun Pan16e811f2020-09-27 20:13:29 -05004011static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4012{
4013 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4014 struct device *dev = &adapter->vdev->dev;
4015 union ibmvnic_crq crq;
4016
4017 adapter->ip_offload_tok =
4018 dma_map_single(dev,
4019 &adapter->ip_offload_buf,
4020 buf_sz,
4021 DMA_FROM_DEVICE);
4022
4023 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4024 if (!firmware_has_feature(FW_FEATURE_CMO))
4025 dev_err(dev, "Couldn't map offload buffer\n");
4026 return;
4027 }
4028
4029 memset(&crq, 0, sizeof(crq));
4030 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4031 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4032 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4033 crq.query_ip_offload.ioba =
4034 cpu_to_be32(adapter->ip_offload_tok);
4035
4036 ibmvnic_send_crq(adapter, &crq);
4037}
4038
Lijun Pan46899bd2020-09-27 20:13:30 -05004039static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4040{
4041 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4042 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4043 struct device *dev = &adapter->vdev->dev;
4044 netdev_features_t old_hw_features = 0;
4045 union ibmvnic_crq crq;
4046
4047 adapter->ip_offload_ctrl_tok =
4048 dma_map_single(dev,
4049 ctrl_buf,
4050 sizeof(adapter->ip_offload_ctrl),
4051 DMA_TO_DEVICE);
4052
4053 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4054 dev_err(dev, "Couldn't map ip offload control buffer\n");
4055 return;
4056 }
4057
4058 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4059 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4060 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4061 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4062 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4063 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4064 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4065 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4066 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4067 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4068
4069 /* large_rx disabled for now, additional features needed */
4070 ctrl_buf->large_rx_ipv4 = 0;
4071 ctrl_buf->large_rx_ipv6 = 0;
4072
4073 if (adapter->state != VNIC_PROBING) {
4074 old_hw_features = adapter->netdev->hw_features;
4075 adapter->netdev->hw_features = 0;
4076 }
4077
4078 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4079
4080 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4081 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4082
4083 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4084 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4085
4086 if ((adapter->netdev->features &
4087 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4088 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4089
4090 if (buf->large_tx_ipv4)
4091 adapter->netdev->hw_features |= NETIF_F_TSO;
4092 if (buf->large_tx_ipv6)
4093 adapter->netdev->hw_features |= NETIF_F_TSO6;
4094
4095 if (adapter->state == VNIC_PROBING) {
4096 adapter->netdev->features |= adapter->netdev->hw_features;
4097 } else if (old_hw_features != adapter->netdev->hw_features) {
4098 netdev_features_t tmp = 0;
4099
4100 /* disable features no longer supported */
4101 adapter->netdev->features &= adapter->netdev->hw_features;
4102 /* turn on features now supported if previously enabled */
4103 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4104 adapter->netdev->hw_features;
4105 adapter->netdev->features |=
4106 tmp & adapter->netdev->wanted_features;
4107 }
4108
4109 memset(&crq, 0, sizeof(crq));
4110 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4111 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4112 crq.control_ip_offload.len =
4113 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4114 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4115 ibmvnic_send_crq(adapter, &crq);
4116}
4117
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004118static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4119 struct ibmvnic_adapter *adapter)
4120{
4121 struct device *dev = &adapter->vdev->dev;
4122
4123 if (crq->get_vpd_size_rsp.rc.code) {
4124 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4125 crq->get_vpd_size_rsp.rc.code);
4126 complete(&adapter->fw_done);
4127 return;
4128 }
4129
4130 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4131 complete(&adapter->fw_done);
4132}
4133
4134static void handle_vpd_rsp(union ibmvnic_crq *crq,
4135 struct ibmvnic_adapter *adapter)
4136{
4137 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004138 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004139 u8 fw_level_len = 0;
4140
4141 memset(adapter->fw_version, 0, 32);
4142
4143 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4144 DMA_FROM_DEVICE);
4145
4146 if (crq->get_vpd_rsp.rc.code) {
4147 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4148 crq->get_vpd_rsp.rc.code);
4149 goto complete;
4150 }
4151
4152 /* get the position of the firmware version info
4153 * located after the ASCII 'RM' substring in the buffer
4154 */
4155 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4156 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02004157 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004158 goto complete;
4159 }
4160
4161 /* get length of firmware level ASCII substring */
4162 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4163 fw_level_len = *(substr + 2);
4164 } else {
4165 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4166 goto complete;
4167 }
4168
4169 /* copy firmware version string from vpd into adapter */
4170 if ((substr + 3 + fw_level_len) <
4171 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004172 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004173 } else {
4174 dev_info(dev, "FW substr extrapolated VPD buff\n");
4175 }
4176
4177complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004178 if (adapter->fw_version[0] == '\0')
4179 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004180 complete(&adapter->fw_done);
4181}
4182
Thomas Falcon032c5e82015-12-21 11:26:06 -06004183static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4184{
4185 struct device *dev = &adapter->vdev->dev;
4186 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004187 int i;
4188
4189 dma_unmap_single(dev, adapter->ip_offload_tok,
4190 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4191
4192 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4193 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4194 netdev_dbg(adapter->netdev, "%016lx\n",
4195 ((unsigned long int *)(buf))[i]);
4196
4197 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4198 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4199 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4200 buf->tcp_ipv4_chksum);
4201 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4202 buf->tcp_ipv6_chksum);
4203 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4204 buf->udp_ipv4_chksum);
4205 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4206 buf->udp_ipv6_chksum);
4207 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4208 buf->large_tx_ipv4);
4209 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4210 buf->large_tx_ipv6);
4211 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4212 buf->large_rx_ipv4);
4213 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4214 buf->large_rx_ipv6);
4215 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4216 buf->max_ipv4_header_size);
4217 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4218 buf->max_ipv6_header_size);
4219 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4220 buf->max_tcp_header_size);
4221 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4222 buf->max_udp_header_size);
4223 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4224 buf->max_large_tx_size);
4225 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4226 buf->max_large_rx_size);
4227 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4228 buf->ipv6_extension_header);
4229 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4230 buf->tcp_pseudosum_req);
4231 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4232 buf->num_ipv6_ext_headers);
4233 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4234 buf->off_ipv6_ext_headers);
4235
Lijun Pan46899bd2020-09-27 20:13:30 -05004236 send_control_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004237}
4238
Thomas Falconc9008d32018-08-06 21:39:59 -05004239static const char *ibmvnic_fw_err_cause(u16 cause)
4240{
4241 switch (cause) {
4242 case ADAPTER_PROBLEM:
4243 return "adapter problem";
4244 case BUS_PROBLEM:
4245 return "bus problem";
4246 case FW_PROBLEM:
4247 return "firmware problem";
4248 case DD_PROBLEM:
4249 return "device driver problem";
4250 case EEH_RECOVERY:
4251 return "EEH recovery";
4252 case FW_UPDATED:
4253 return "firmware updated";
4254 case LOW_MEMORY:
4255 return "low Memory";
4256 default:
4257 return "unknown";
4258 }
4259}
4260
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004261static void handle_error_indication(union ibmvnic_crq *crq,
4262 struct ibmvnic_adapter *adapter)
4263{
4264 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004265 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004266
Thomas Falconc9008d32018-08-06 21:39:59 -05004267 cause = be16_to_cpu(crq->error_indication.error_cause);
4268
4269 dev_warn_ratelimited(dev,
4270 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4271 crq->error_indication.flags
4272 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4273 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004274
Nathan Fontenoted651a12017-05-03 14:04:38 -04004275 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4276 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004277 else
4278 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004279}
4280
Thomas Falconf8136142018-01-29 13:45:05 -06004281static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4282 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004283{
4284 struct net_device *netdev = adapter->netdev;
4285 struct device *dev = &adapter->vdev->dev;
4286 long rc;
4287
4288 rc = crq->change_mac_addr_rsp.rc.code;
4289 if (rc) {
4290 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004291 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004292 }
Lijun Pand9b0e592020-10-20 17:39:19 -05004293 /* crq->change_mac_addr.mac_addr is the requested one
4294 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4295 */
Thomas Falcon62740e92019-05-09 23:13:43 -05004296 ether_addr_copy(netdev->dev_addr,
4297 &crq->change_mac_addr_rsp.mac_addr[0]);
Lijun Pand9b0e592020-10-20 17:39:19 -05004298 ether_addr_copy(adapter->mac_addr,
4299 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004300out:
4301 complete(&adapter->fw_done);
4302 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004303}
4304
4305static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4306 struct ibmvnic_adapter *adapter)
4307{
4308 struct device *dev = &adapter->vdev->dev;
4309 u64 *req_value;
4310 char *name;
4311
Thomas Falcon901e0402017-02-15 12:17:59 -06004312 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004313 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4314 case REQ_TX_QUEUES:
4315 req_value = &adapter->req_tx_queues;
4316 name = "tx";
4317 break;
4318 case REQ_RX_QUEUES:
4319 req_value = &adapter->req_rx_queues;
4320 name = "rx";
4321 break;
4322 case REQ_RX_ADD_QUEUES:
4323 req_value = &adapter->req_rx_add_queues;
4324 name = "rx_add";
4325 break;
4326 case REQ_TX_ENTRIES_PER_SUBCRQ:
4327 req_value = &adapter->req_tx_entries_per_subcrq;
4328 name = "tx_entries_per_subcrq";
4329 break;
4330 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4331 req_value = &adapter->req_rx_add_entries_per_subcrq;
4332 name = "rx_add_entries_per_subcrq";
4333 break;
4334 case REQ_MTU:
4335 req_value = &adapter->req_mtu;
4336 name = "mtu";
4337 break;
4338 case PROMISC_REQUESTED:
4339 req_value = &adapter->promisc;
4340 name = "promisc";
4341 break;
4342 default:
4343 dev_err(dev, "Got invalid cap request rsp %d\n",
4344 crq->request_capability.capability);
4345 return;
4346 }
4347
4348 switch (crq->request_capability_rsp.rc.code) {
4349 case SUCCESS:
4350 break;
4351 case PARTIALSUCCESS:
4352 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4353 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06004354 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06004355 number), name);
John Allene7913802018-01-18 16:27:12 -06004356
4357 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4358 REQ_MTU) {
4359 pr_err("mtu of %llu is not supported. Reverting.\n",
4360 *req_value);
4361 *req_value = adapter->fallback.mtu;
4362 } else {
4363 *req_value =
4364 be64_to_cpu(crq->request_capability_rsp.number);
4365 }
4366
Lijun Pan09081b92020-09-27 20:13:27 -05004367 send_request_cap(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004368 return;
4369 default:
4370 dev_err(dev, "Error %d in request cap rsp\n",
4371 crq->request_capability_rsp.rc.code);
4372 return;
4373 }
4374
4375 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004376 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon249168a2017-02-15 12:18:00 -06004377 adapter->wait_capability = false;
Lijun Pan16e811f2020-09-27 20:13:29 -05004378 send_query_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004379 }
4380}
4381
4382static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4383 struct ibmvnic_adapter *adapter)
4384{
4385 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004386 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004387 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4388 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004389 u64 *tx_handle_array;
4390 u64 *rx_handle_array;
4391 int num_tx_pools;
4392 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004393 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004394 int i;
4395
4396 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004397 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004398 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004399 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004400
John Allen498cd8e2016-04-06 11:49:55 -05004401 /* If the number of queues requested can't be allocated by the
4402 * server, the login response will return with code 1. We will need
4403 * to resend the login buffer with fewer queues requested.
4404 */
4405 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004406 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004407 complete(&adapter->init_done);
4408 return 0;
4409 }
4410
John Allenc26eba02017-10-26 16:23:25 -05004411 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4412
Thomas Falcon032c5e82015-12-21 11:26:06 -06004413 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4414 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4415 netdev_dbg(adapter->netdev, "%016lx\n",
4416 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4417 }
4418
4419 /* Sanity checks */
4420 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4421 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4422 adapter->req_rx_add_queues !=
4423 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4424 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
Dany Madden31d6b402020-11-25 18:04:24 -06004425 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004426 return -EIO;
4427 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004428 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4429 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4430 /* variable buffer sizes are not supported, so just read the
4431 * first entry.
4432 */
4433 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004434
4435 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4436 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4437
4438 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4439 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4440 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4441 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4442
4443 for (i = 0; i < num_tx_pools; i++)
4444 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4445
4446 for (i = 0; i < num_rx_pools; i++)
4447 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4448
Thomas Falcon507ebe62020-08-21 13:39:01 -05004449 adapter->num_active_tx_scrqs = num_tx_pools;
4450 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004451 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004452 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004453 complete(&adapter->init_done);
4454
Thomas Falcon032c5e82015-12-21 11:26:06 -06004455 return 0;
4456}
4457
Thomas Falcon032c5e82015-12-21 11:26:06 -06004458static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4459 struct ibmvnic_adapter *adapter)
4460{
4461 struct device *dev = &adapter->vdev->dev;
4462 long rc;
4463
4464 rc = crq->request_unmap_rsp.rc.code;
4465 if (rc)
4466 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4467}
4468
4469static void handle_query_map_rsp(union ibmvnic_crq *crq,
4470 struct ibmvnic_adapter *adapter)
4471{
4472 struct net_device *netdev = adapter->netdev;
4473 struct device *dev = &adapter->vdev->dev;
4474 long rc;
4475
4476 rc = crq->query_map_rsp.rc.code;
4477 if (rc) {
4478 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4479 return;
4480 }
4481 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4482 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4483 crq->query_map_rsp.free_pages);
4484}
4485
4486static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4487 struct ibmvnic_adapter *adapter)
4488{
4489 struct net_device *netdev = adapter->netdev;
4490 struct device *dev = &adapter->vdev->dev;
4491 long rc;
4492
Thomas Falcon901e0402017-02-15 12:17:59 -06004493 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004494 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004495 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004496 rc = crq->query_capability.rc.code;
4497 if (rc) {
4498 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4499 goto out;
4500 }
4501
4502 switch (be16_to_cpu(crq->query_capability.capability)) {
4503 case MIN_TX_QUEUES:
4504 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004505 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004506 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4507 adapter->min_tx_queues);
4508 break;
4509 case MIN_RX_QUEUES:
4510 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004511 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004512 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4513 adapter->min_rx_queues);
4514 break;
4515 case MIN_RX_ADD_QUEUES:
4516 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004517 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004518 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4519 adapter->min_rx_add_queues);
4520 break;
4521 case MAX_TX_QUEUES:
4522 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004523 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004524 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4525 adapter->max_tx_queues);
4526 break;
4527 case MAX_RX_QUEUES:
4528 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004529 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004530 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4531 adapter->max_rx_queues);
4532 break;
4533 case MAX_RX_ADD_QUEUES:
4534 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004535 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004536 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4537 adapter->max_rx_add_queues);
4538 break;
4539 case MIN_TX_ENTRIES_PER_SUBCRQ:
4540 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004541 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004542 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4543 adapter->min_tx_entries_per_subcrq);
4544 break;
4545 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4546 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004547 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004548 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4549 adapter->min_rx_add_entries_per_subcrq);
4550 break;
4551 case MAX_TX_ENTRIES_PER_SUBCRQ:
4552 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004553 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004554 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4555 adapter->max_tx_entries_per_subcrq);
4556 break;
4557 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4558 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004559 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004560 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4561 adapter->max_rx_add_entries_per_subcrq);
4562 break;
4563 case TCP_IP_OFFLOAD:
4564 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004565 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004566 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4567 adapter->tcp_ip_offload);
4568 break;
4569 case PROMISC_SUPPORTED:
4570 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004571 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004572 netdev_dbg(netdev, "promisc_supported = %lld\n",
4573 adapter->promisc_supported);
4574 break;
4575 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004576 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004577 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004578 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4579 break;
4580 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004581 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004582 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004583 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4584 break;
4585 case MAX_MULTICAST_FILTERS:
4586 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004587 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004588 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4589 adapter->max_multicast_filters);
4590 break;
4591 case VLAN_HEADER_INSERTION:
4592 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004593 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004594 if (adapter->vlan_header_insertion)
4595 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4596 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4597 adapter->vlan_header_insertion);
4598 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004599 case RX_VLAN_HEADER_INSERTION:
4600 adapter->rx_vlan_header_insertion =
4601 be64_to_cpu(crq->query_capability.number);
4602 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4603 adapter->rx_vlan_header_insertion);
4604 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004605 case MAX_TX_SG_ENTRIES:
4606 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004607 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004608 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4609 adapter->max_tx_sg_entries);
4610 break;
4611 case RX_SG_SUPPORTED:
4612 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004613 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004614 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4615 adapter->rx_sg_supported);
4616 break;
4617 case OPT_TX_COMP_SUB_QUEUES:
4618 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004619 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004620 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4621 adapter->opt_tx_comp_sub_queues);
4622 break;
4623 case OPT_RX_COMP_QUEUES:
4624 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004625 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004626 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4627 adapter->opt_rx_comp_queues);
4628 break;
4629 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4630 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004631 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004632 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4633 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4634 break;
4635 case OPT_TX_ENTRIES_PER_SUBCRQ:
4636 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004637 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004638 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4639 adapter->opt_tx_entries_per_subcrq);
4640 break;
4641 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4642 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004643 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004644 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4645 adapter->opt_rxba_entries_per_subcrq);
4646 break;
4647 case TX_RX_DESC_REQ:
4648 adapter->tx_rx_desc_req = crq->query_capability.number;
4649 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4650 adapter->tx_rx_desc_req);
4651 break;
4652
4653 default:
4654 netdev_err(netdev, "Got invalid cap rsp %d\n",
4655 crq->query_capability.capability);
4656 }
4657
4658out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004659 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4660 adapter->wait_capability = false;
Lijun Pan09081b92020-09-27 20:13:27 -05004661 send_request_cap(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004662 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004663}
4664
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004665static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4666{
4667 union ibmvnic_crq crq;
4668 int rc;
4669
4670 memset(&crq, 0, sizeof(crq));
4671 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4672 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004673
4674 mutex_lock(&adapter->fw_lock);
4675 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06004676 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004677
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004678 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004679 if (rc) {
4680 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004681 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004682 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004683
4684 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004685 if (rc) {
4686 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06004687 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004688 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004689
Thomas Falconff25dcb2019-11-25 17:12:56 -06004690 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004691 return adapter->fw_done_rc ? -EIO : 0;
4692}
4693
4694static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4695 struct ibmvnic_adapter *adapter)
4696{
4697 struct net_device *netdev = adapter->netdev;
4698 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004699 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004700
4701 rc = crq->query_phys_parms_rsp.rc.code;
4702 if (rc) {
4703 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4704 return rc;
4705 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004706 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004707 case IBMVNIC_10MBPS:
4708 adapter->speed = SPEED_10;
4709 break;
4710 case IBMVNIC_100MBPS:
4711 adapter->speed = SPEED_100;
4712 break;
4713 case IBMVNIC_1GBPS:
4714 adapter->speed = SPEED_1000;
4715 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05004716 case IBMVNIC_10GBPS:
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004717 adapter->speed = SPEED_10000;
4718 break;
4719 case IBMVNIC_25GBPS:
4720 adapter->speed = SPEED_25000;
4721 break;
4722 case IBMVNIC_40GBPS:
4723 adapter->speed = SPEED_40000;
4724 break;
4725 case IBMVNIC_50GBPS:
4726 adapter->speed = SPEED_50000;
4727 break;
4728 case IBMVNIC_100GBPS:
4729 adapter->speed = SPEED_100000;
4730 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05004731 case IBMVNIC_200GBPS:
4732 adapter->speed = SPEED_200000;
4733 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004734 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004735 if (netif_carrier_ok(netdev))
4736 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004737 adapter->speed = SPEED_UNKNOWN;
4738 }
4739 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4740 adapter->duplex = DUPLEX_FULL;
4741 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4742 adapter->duplex = DUPLEX_HALF;
4743 else
4744 adapter->duplex = DUPLEX_UNKNOWN;
4745
4746 return rc;
4747}
4748
Thomas Falcon032c5e82015-12-21 11:26:06 -06004749static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4750 struct ibmvnic_adapter *adapter)
4751{
4752 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4753 struct net_device *netdev = adapter->netdev;
4754 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004755 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004756 long rc;
4757
4758 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004759 (unsigned long int)cpu_to_be64(u64_crq[0]),
4760 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004761 switch (gen_crq->first) {
4762 case IBMVNIC_CRQ_INIT_RSP:
4763 switch (gen_crq->cmd) {
4764 case IBMVNIC_CRQ_INIT:
4765 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004766 adapter->from_passive_init = true;
Thomas Falcon17c87052018-05-23 13:37:58 -05004767 if (!completion_done(&adapter->init_done)) {
4768 complete(&adapter->init_done);
4769 adapter->init_done_rc = -EIO;
4770 }
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004771 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004772 break;
4773 case IBMVNIC_CRQ_INIT_COMPLETE:
4774 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05004775 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004776 send_version_xchg(adapter);
4777 break;
4778 default:
4779 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4780 }
4781 return;
4782 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004783 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05004784 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06004785 /* terminate any thread waiting for a response
4786 * from the device
4787 */
4788 if (!completion_done(&adapter->fw_done)) {
4789 adapter->fw_done_rc = -EIO;
4790 complete(&adapter->fw_done);
4791 }
4792 if (!completion_done(&adapter->stats_done))
4793 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04004794 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05004795 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004796 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004797 dev_info(dev, "Migrated, re-enabling adapter\n");
4798 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004799 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4800 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004801 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004802 } else {
4803 /* The adapter lost the connection */
4804 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4805 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004806 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004807 }
4808 return;
4809 case IBMVNIC_CRQ_CMD_RSP:
4810 break;
4811 default:
4812 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4813 gen_crq->first);
4814 return;
4815 }
4816
4817 switch (gen_crq->cmd) {
4818 case VERSION_EXCHANGE_RSP:
4819 rc = crq->version_exchange_rsp.rc.code;
4820 if (rc) {
4821 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4822 break;
4823 }
Thomas Falcon78468892020-05-28 11:19:17 -05004824 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06004825 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05004826 dev_info(dev, "Partner protocol version is %d\n",
4827 ibmvnic_version);
Lijun Pan491099a2020-09-27 20:13:26 -05004828 send_query_cap(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004829 break;
4830 case QUERY_CAPABILITY_RSP:
4831 handle_query_cap_rsp(crq, adapter);
4832 break;
4833 case QUERY_MAP_RSP:
4834 handle_query_map_rsp(crq, adapter);
4835 break;
4836 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004837 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4838 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004839 break;
4840 case REQUEST_UNMAP_RSP:
4841 handle_request_unmap_rsp(crq, adapter);
4842 break;
4843 case REQUEST_CAPABILITY_RSP:
4844 handle_request_cap_rsp(crq, adapter);
4845 break;
4846 case LOGIN_RSP:
4847 netdev_dbg(netdev, "Got Login Response\n");
4848 handle_login_rsp(crq, adapter);
4849 break;
4850 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004851 netdev_dbg(netdev,
4852 "Got Logical Link State Response, state: %d rc: %d\n",
4853 crq->logical_link_state_rsp.link_state,
4854 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004855 adapter->logical_link_state =
4856 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004857 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4858 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004859 break;
4860 case LINK_STATE_INDICATION:
4861 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4862 adapter->phys_link_state =
4863 crq->link_state_indication.phys_link_state;
4864 adapter->logical_link_state =
4865 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05004866 if (adapter->phys_link_state && adapter->logical_link_state)
4867 netif_carrier_on(netdev);
4868 else
4869 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004870 break;
4871 case CHANGE_MAC_ADDR_RSP:
4872 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004873 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004874 break;
4875 case ERROR_INDICATION:
4876 netdev_dbg(netdev, "Got Error Indication\n");
4877 handle_error_indication(crq, adapter);
4878 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004879 case REQUEST_STATISTICS_RSP:
4880 netdev_dbg(netdev, "Got Statistics Response\n");
4881 complete(&adapter->stats_done);
4882 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004883 case QUERY_IP_OFFLOAD_RSP:
4884 netdev_dbg(netdev, "Got Query IP offload Response\n");
4885 handle_query_ip_offload_rsp(adapter);
4886 break;
4887 case MULTICAST_CTRL_RSP:
4888 netdev_dbg(netdev, "Got multicast control Response\n");
4889 break;
4890 case CONTROL_IP_OFFLOAD_RSP:
4891 netdev_dbg(netdev, "Got Control IP offload Response\n");
4892 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4893 sizeof(adapter->ip_offload_ctrl),
4894 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004895 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004896 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004897 case COLLECT_FW_TRACE_RSP:
4898 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4899 complete(&adapter->fw_done);
4900 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004901 case GET_VPD_SIZE_RSP:
4902 handle_vpd_size_rsp(crq, adapter);
4903 break;
4904 case GET_VPD_RSP:
4905 handle_vpd_rsp(crq, adapter);
4906 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004907 case QUERY_PHYS_PARMS_RSP:
4908 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4909 complete(&adapter->fw_done);
4910 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004911 default:
4912 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4913 gen_crq->cmd);
4914 }
4915}
4916
4917static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4918{
4919 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004920
Thomas Falcon6c267b32017-02-15 12:17:58 -06004921 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004922 return IRQ_HANDLED;
4923}
4924
Allen Paisaa7c3fe2020-09-14 12:59:29 +05304925static void ibmvnic_tasklet(struct tasklet_struct *t)
Thomas Falcon6c267b32017-02-15 12:17:58 -06004926{
Allen Paisaa7c3fe2020-09-14 12:59:29 +05304927 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004928 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004929 union ibmvnic_crq *crq;
4930 unsigned long flags;
4931 bool done = false;
4932
4933 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004934 while (!done) {
4935 /* Pull all the valid messages off the CRQ */
4936 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4937 ibmvnic_handle_crq(crq, adapter);
4938 crq->generic.first = 0;
4939 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004940
4941 /* remain in tasklet until all
4942 * capabilities responses are received
4943 */
4944 if (!adapter->wait_capability)
4945 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004946 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004947 /* if capabilities CRQ's were sent in this tasklet, the following
4948 * tasklet must wait until all responses are received
4949 */
4950 if (atomic_read(&adapter->running_cap_crqs) != 0)
4951 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004952 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004953}
4954
4955static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4956{
4957 struct vio_dev *vdev = adapter->vdev;
4958 int rc;
4959
4960 do {
4961 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4962 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4963
4964 if (rc)
4965 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4966
4967 return rc;
4968}
4969
4970static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4971{
4972 struct ibmvnic_crq_queue *crq = &adapter->crq;
4973 struct device *dev = &adapter->vdev->dev;
4974 struct vio_dev *vdev = adapter->vdev;
4975 int rc;
4976
4977 /* Close the CRQ */
4978 do {
4979 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4980 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4981
4982 /* Clean out the queue */
Lijun Pan0e435be2020-11-23 13:35:46 -06004983 if (!crq->msgs)
4984 return -EINVAL;
4985
Thomas Falcon032c5e82015-12-21 11:26:06 -06004986 memset(crq->msgs, 0, PAGE_SIZE);
4987 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05004988 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004989
4990 /* And re-open it again */
4991 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4992 crq->msg_token, PAGE_SIZE);
4993
4994 if (rc == H_CLOSED)
4995 /* Adapter is good, but other end is not ready */
4996 dev_warn(dev, "Partner adapter not ready\n");
4997 else if (rc != 0)
4998 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4999
5000 return rc;
5001}
5002
Nathan Fontenotf9928872017-03-30 02:48:54 -04005003static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005004{
5005 struct ibmvnic_crq_queue *crq = &adapter->crq;
5006 struct vio_dev *vdev = adapter->vdev;
5007 long rc;
5008
Nathan Fontenotf9928872017-03-30 02:48:54 -04005009 if (!crq->msgs)
5010 return;
5011
Thomas Falcon032c5e82015-12-21 11:26:06 -06005012 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5013 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005014 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005015 do {
5016 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5017 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5018
5019 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5020 DMA_BIDIRECTIONAL);
5021 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005022 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05005023 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005024}
5025
Nathan Fontenotf9928872017-03-30 02:48:54 -04005026static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005027{
5028 struct ibmvnic_crq_queue *crq = &adapter->crq;
5029 struct device *dev = &adapter->vdev->dev;
5030 struct vio_dev *vdev = adapter->vdev;
5031 int rc, retrc = -ENOMEM;
5032
Nathan Fontenotf9928872017-03-30 02:48:54 -04005033 if (crq->msgs)
5034 return 0;
5035
Thomas Falcon032c5e82015-12-21 11:26:06 -06005036 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5037 /* Should we allocate more than one page? */
5038
5039 if (!crq->msgs)
5040 return -ENOMEM;
5041
5042 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5043 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5044 DMA_BIDIRECTIONAL);
5045 if (dma_mapping_error(dev, crq->msg_token))
5046 goto map_failed;
5047
5048 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5049 crq->msg_token, PAGE_SIZE);
5050
5051 if (rc == H_RESOURCE)
5052 /* maybe kexecing and resource is busy. try a reset */
5053 rc = ibmvnic_reset_crq(adapter);
5054 retrc = rc;
5055
5056 if (rc == H_CLOSED) {
5057 dev_warn(dev, "Partner adapter not ready\n");
5058 } else if (rc) {
5059 dev_warn(dev, "Error %d opening adapter\n", rc);
5060 goto reg_crq_failed;
5061 }
5062
5063 retrc = 0;
5064
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305065 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005066
Thomas Falcon032c5e82015-12-21 11:26:06 -06005067 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03005068 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5069 adapter->vdev->unit_address);
5070 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005071 if (rc) {
5072 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5073 vdev->irq, rc);
5074 goto req_irq_failed;
5075 }
5076
5077 rc = vio_enable_interrupts(vdev);
5078 if (rc) {
5079 dev_err(dev, "Error %d enabling interrupts\n", rc);
5080 goto req_irq_failed;
5081 }
5082
5083 crq->cur = 0;
5084 spin_lock_init(&crq->lock);
5085
5086 return retrc;
5087
5088req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06005089 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005090 do {
5091 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5092 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5093reg_crq_failed:
5094 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5095map_failed:
5096 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005097 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005098 return retrc;
5099}
5100
Lijun Pan635e4422020-08-19 17:52:26 -05005101static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005102{
5103 struct device *dev = &adapter->vdev->dev;
5104 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005105 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005106 int rc;
5107
John Allen017892c12017-05-26 10:30:19 -04005108 adapter->from_passive_init = false;
5109
Lijun Pan635e4422020-08-19 17:52:26 -05005110 if (reset) {
5111 old_num_rx_queues = adapter->req_rx_queues;
5112 old_num_tx_queues = adapter->req_tx_queues;
5113 reinit_completion(&adapter->init_done);
5114 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005115
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005116 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005117 rc = ibmvnic_send_crq_init(adapter);
5118 if (rc) {
5119 dev_err(dev, "Send crq init failed with error %d\n", rc);
5120 return rc;
5121 }
5122
John Allenf6ef6402017-03-17 17:13:42 -05005123 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5124 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04005125 return -1;
5126 }
5127
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005128 if (adapter->init_done_rc) {
5129 release_crq_queue(adapter);
5130 return adapter->init_done_rc;
5131 }
5132
Lijun Pan785a2b12020-09-17 21:12:46 -05005133 if (adapter->from_passive_init) {
5134 adapter->state = VNIC_OPEN;
5135 adapter->from_passive_init = false;
5136 return -1;
5137 }
5138
Lijun Pan635e4422020-08-19 17:52:26 -05005139 if (reset &&
5140 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005141 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005142 if (adapter->req_rx_queues != old_num_rx_queues ||
5143 adapter->req_tx_queues != old_num_tx_queues) {
5144 release_sub_crqs(adapter, 0);
5145 rc = init_sub_crqs(adapter);
5146 } else {
5147 rc = reset_sub_crq_queues(adapter);
5148 }
5149 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005150 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005151 }
5152
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005153 if (rc) {
5154 dev_err(dev, "Initialization of sub crqs failed\n");
5155 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005156 return rc;
5157 }
5158
5159 rc = init_sub_crq_irqs(adapter);
5160 if (rc) {
5161 dev_err(dev, "Failed to initialize sub crq irqs\n");
5162 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005163 }
5164
5165 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005166}
5167
Thomas Falcon40c9db82017-06-12 12:35:04 -05005168static struct device_attribute dev_attr_failover;
5169
Thomas Falcon032c5e82015-12-21 11:26:06 -06005170static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5171{
5172 struct ibmvnic_adapter *adapter;
5173 struct net_device *netdev;
5174 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005175 int rc;
5176
5177 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5178 dev->unit_address);
5179
5180 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5181 VETH_MAC_ADDR, NULL);
5182 if (!mac_addr_p) {
5183 dev_err(&dev->dev,
5184 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5185 __FILE__, __LINE__);
5186 return 0;
5187 }
5188
5189 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005190 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005191 if (!netdev)
5192 return -ENOMEM;
5193
5194 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005195 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005196 dev_set_drvdata(&dev->dev, netdev);
5197 adapter->vdev = dev;
5198 adapter->netdev = netdev;
5199
5200 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5201 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5202 netdev->irq = dev->irq;
5203 netdev->netdev_ops = &ibmvnic_netdev_ops;
5204 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5205 SET_NETDEV_DEV(netdev, &dev->dev);
5206
5207 spin_lock_init(&adapter->stats_lock);
5208
Nathan Fontenoted651a12017-05-03 14:04:38 -04005209 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005210 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5211 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005212 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005213 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005214 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005215 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005216 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005217 init_completion(&adapter->fw_done);
5218 init_completion(&adapter->reset_done);
5219 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005220 clear_bit(0, &adapter->resetting);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005221
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005222 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005223 rc = init_crq_queue(adapter);
5224 if (rc) {
5225 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5226 rc);
5227 goto ibmvnic_init_fail;
5228 }
5229
Lijun Pan635e4422020-08-19 17:52:26 -05005230 rc = ibmvnic_reset_init(adapter, false);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005231 if (rc && rc != EAGAIN)
5232 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005233 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005234
Thomas Falcon07184212018-05-16 15:49:05 -05005235 rc = init_stats_buffers(adapter);
5236 if (rc)
5237 goto ibmvnic_init_fail;
5238
5239 rc = init_stats_token(adapter);
5240 if (rc)
5241 goto ibmvnic_stats_fail;
5242
Thomas Falconf39f0d12017-02-14 10:22:59 -06005243 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05005244 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5245 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005246
Thomas Falcon40c9db82017-06-12 12:35:04 -05005247 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005248 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005249 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005250
Mick Tarsele876a8a2017-09-28 13:53:18 -07005251 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005252 rc = register_netdev(netdev);
5253 if (rc) {
5254 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005255 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005256 }
5257 dev_info(&dev->dev, "ibmvnic registered\n");
5258
Nathan Fontenot90c80142017-05-03 14:04:32 -04005259 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05005260
5261 adapter->wait_for_reset = false;
5262
Thomas Falcon032c5e82015-12-21 11:26:06 -06005263 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005264
5265ibmvnic_register_fail:
5266 device_remove_file(&dev->dev, &dev_attr_failover);
5267
Thomas Falcon07184212018-05-16 15:49:05 -05005268ibmvnic_dev_file_err:
5269 release_stats_token(adapter);
5270
5271ibmvnic_stats_fail:
5272 release_stats_buffers(adapter);
5273
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005274ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005275 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005276 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005277 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005278 free_netdev(netdev);
5279
5280 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005281}
5282
5283static int ibmvnic_remove(struct vio_dev *dev)
5284{
5285 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005286 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005287 unsigned long flags;
5288
5289 spin_lock_irqsave(&adapter->state_lock, flags);
Lijun Pan3ada2882020-11-23 13:35:47 -06005290 if (test_bit(0, &adapter->resetting)) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05005291 spin_unlock_irqrestore(&adapter->state_lock, flags);
5292 return -EBUSY;
5293 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06005294
Nathan Fontenot90c80142017-05-03 14:04:32 -04005295 adapter->state = VNIC_REMOVING;
Juliet Kim7d7195a2020-03-10 09:23:58 -05005296 spin_unlock_irqrestore(&adapter->state_lock, flags);
5297
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005298 flush_work(&adapter->ibmvnic_reset);
5299 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5300
Juliet Kima5681e22018-11-19 15:59:22 -06005301 rtnl_lock();
5302 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005303
5304 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005305 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005306 release_crq_queue(adapter);
5307
Thomas Falcon53cc7722018-02-26 18:10:56 -06005308 release_stats_token(adapter);
5309 release_stats_buffers(adapter);
5310
Nathan Fontenot90c80142017-05-03 14:04:32 -04005311 adapter->state = VNIC_REMOVED;
5312
Juliet Kima5681e22018-11-19 15:59:22 -06005313 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005314 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005315 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005316 free_netdev(netdev);
5317 dev_set_drvdata(&dev->dev, NULL);
5318
5319 return 0;
5320}
5321
Thomas Falcon40c9db82017-06-12 12:35:04 -05005322static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5323 const char *buf, size_t count)
5324{
5325 struct net_device *netdev = dev_get_drvdata(dev);
5326 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5327 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5328 __be64 session_token;
5329 long rc;
5330
5331 if (!sysfs_streq(buf, "1"))
5332 return -EINVAL;
5333
5334 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5335 H_GET_SESSION_TOKEN, 0, 0, 0);
5336 if (rc) {
5337 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5338 rc);
5339 return -EINVAL;
5340 }
5341
5342 session_token = (__be64)retbuf[0];
5343 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5344 be64_to_cpu(session_token));
5345 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5346 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5347 if (rc) {
5348 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5349 rc);
5350 return -EINVAL;
5351 }
5352
5353 return count;
5354}
5355
Joe Perches6cbaefb2017-12-19 10:15:09 -08005356static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005357
Thomas Falcon032c5e82015-12-21 11:26:06 -06005358static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5359{
5360 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5361 struct ibmvnic_adapter *adapter;
5362 struct iommu_table *tbl;
5363 unsigned long ret = 0;
5364 int i;
5365
5366 tbl = get_iommu_table_base(&vdev->dev);
5367
5368 /* netdev inits at probe time along with the structures we need below*/
5369 if (!netdev)
5370 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5371
5372 adapter = netdev_priv(netdev);
5373
5374 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005375 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5376
5377 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5378 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5379
Thomas Falcon507ebe62020-08-21 13:39:01 -05005380 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005381 ret += adapter->rx_pool[i].size *
5382 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5383
5384 return ret;
5385}
5386
5387static int ibmvnic_resume(struct device *dev)
5388{
5389 struct net_device *netdev = dev_get_drvdata(dev);
5390 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005391
John Allencb89ba22017-06-19 11:27:53 -05005392 if (adapter->state != VNIC_OPEN)
5393 return 0;
5394
John Allena2488782017-07-24 13:26:06 -05005395 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005396
5397 return 0;
5398}
5399
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305400static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005401 {"network", "IBM,vnic"},
5402 {"", "" }
5403};
5404MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5405
5406static const struct dev_pm_ops ibmvnic_pm_ops = {
5407 .resume = ibmvnic_resume
5408};
5409
5410static struct vio_driver ibmvnic_driver = {
5411 .id_table = ibmvnic_device_table,
5412 .probe = ibmvnic_probe,
5413 .remove = ibmvnic_remove,
5414 .get_desired_dma = ibmvnic_get_desired_dma,
5415 .name = ibmvnic_driver_name,
5416 .pm = &ibmvnic_pm_ops,
5417};
5418
5419/* module functions */
5420static int __init ibmvnic_module_init(void)
5421{
5422 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5423 IBMVNIC_DRIVER_VERSION);
5424
5425 return vio_register_driver(&ibmvnic_driver);
5426}
5427
5428static void __exit ibmvnic_module_exit(void)
5429{
5430 vio_unregister_driver(&ibmvnic_driver);
5431}
5432
5433module_init(ibmvnic_module_init);
5434module_exit(ibmvnic_module_exit);