blob: eface3543b2c398424f401559ec9cf1b4810675e [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060082static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060083static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050089static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060090static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99static int ibmvnic_poll(struct napi_struct *napi, int data);
Lijun Pan69980d02020-09-27 20:13:28 -0500100static void send_query_map(struct ibmvnic_adapter *adapter);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600103static int send_login(struct ibmvnic_adapter *adapter);
Lijun Pan491099a2020-09-27 20:13:26 -0500104static void send_query_cap(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500105static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500107static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400108static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500109static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500110static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300111static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600112
113struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
115 int offset;
116};
117
118#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121
122static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145};
146
147static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
149 unsigned long *irq)
150{
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 long rc;
153
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 *number = retbuf[0];
156 *irq = retbuf[1];
157
158 return rc;
159}
160
Thomas Falcon476d96c2019-11-25 17:12:55 -0600161/**
162 * ibmvnic_wait_for_completion - Check device state and wait for completion
163 * @adapter: private device data
164 * @comp_done: completion structure to wait for
165 * @timeout: time to wait in milliseconds
166 *
167 * Wait for a completion signal or until the timeout limit is reached
168 * while checking that the device is still active.
169 */
170static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171 struct completion *comp_done,
172 unsigned long timeout)
173{
174 struct net_device *netdev;
175 unsigned long div_timeout;
176 u8 retry;
177
178 netdev = adapter->netdev;
179 retry = 5;
180 div_timeout = msecs_to_jiffies(timeout / retry);
181 while (true) {
182 if (!adapter->crq.active) {
183 netdev_err(netdev, "Device down!\n");
184 return -ENODEV;
185 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600186 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600187 break;
188 if (wait_for_completion_timeout(comp_done, div_timeout))
189 return 0;
190 }
191 netdev_err(netdev, "Operation timed out.\n");
192 return -ETIMEDOUT;
193}
194
Thomas Falcon032c5e82015-12-21 11:26:06 -0600195static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196 struct ibmvnic_long_term_buff *ltb, int size)
197{
198 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500199 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600200
201 ltb->size = size;
202 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
203 GFP_KERNEL);
204
205 if (!ltb->buff) {
206 dev_err(dev, "Couldn't alloc long term buffer\n");
207 return -ENOMEM;
208 }
209 ltb->map_id = adapter->map_id;
210 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500211
Thomas Falconff25dcb2019-11-25 17:12:56 -0600212 mutex_lock(&adapter->fw_lock);
213 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600214 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500215 rc = send_request_map(adapter, ltb->addr,
216 ltb->size, ltb->map_id);
217 if (rc) {
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600219 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500220 return rc;
221 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600222
223 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224 if (rc) {
225 dev_err(dev,
226 "Long term map request aborted or timed out,rc = %d\n",
227 rc);
228 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600229 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600230 return rc;
231 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500232
233 if (adapter->fw_done_rc) {
234 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
235 adapter->fw_done_rc);
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500236 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600237 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500238 return -1;
239 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600240 mutex_unlock(&adapter->fw_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600241 return 0;
242}
243
244static void free_long_term_buff(struct ibmvnic_adapter *adapter,
245 struct ibmvnic_long_term_buff *ltb)
246{
247 struct device *dev = &adapter->vdev->dev;
248
Nathan Fontenotc657e322017-03-30 02:49:06 -0400249 if (!ltb->buff)
250 return;
251
Nathan Fontenoted651a12017-05-03 14:04:38 -0400252 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
253 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500254 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400255 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600256}
257
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500258static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
259 struct ibmvnic_long_term_buff *ltb)
260{
Thomas Falcon476d96c2019-11-25 17:12:55 -0600261 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500262 int rc;
263
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500264 memset(ltb->buff, 0, ltb->size);
265
Thomas Falconff25dcb2019-11-25 17:12:56 -0600266 mutex_lock(&adapter->fw_lock);
267 adapter->fw_done_rc = 0;
268
Thomas Falcon070eca92019-11-25 17:12:53 -0600269 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500270 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600271 if (rc) {
272 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500273 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -0600274 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600275
276 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
277 if (rc) {
278 dev_info(dev,
279 "Reset failed, long term map request timed out or aborted\n");
Thomas Falconff25dcb2019-11-25 17:12:56 -0600280 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600281 return rc;
282 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500283
284 if (adapter->fw_done_rc) {
Thomas Falcon476d96c2019-11-25 17:12:55 -0600285 dev_info(dev,
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500286 "Reset failed, attempting to free and reallocate buffer\n");
287 free_long_term_buff(adapter, ltb);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600288 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500289 return alloc_long_term_buff(adapter, ltb, ltb->size);
290 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600291 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500292 return 0;
293}
294
Thomas Falconf185a492017-05-26 10:30:48 -0400295static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
296{
297 int i;
298
Thomas Falcon507ebe62020-08-21 13:39:01 -0500299 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400300 adapter->rx_pool[i].active = 0;
301}
302
Thomas Falcon032c5e82015-12-21 11:26:06 -0600303static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
304 struct ibmvnic_rx_pool *pool)
305{
306 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500307 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600308 struct device *dev = &adapter->vdev->dev;
309 int buffers_added = 0;
310 unsigned long lpar_rc;
311 union sub_crq sub_crq;
312 struct sk_buff *skb;
313 unsigned int offset;
314 dma_addr_t dma_addr;
315 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600316 int shift = 0;
317 int index;
318 int i;
319
Thomas Falconf185a492017-05-26 10:30:48 -0400320 if (!pool->active)
321 return;
322
Thomas Falcon032c5e82015-12-21 11:26:06 -0600323 for (i = 0; i < count; ++i) {
324 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
325 if (!skb) {
326 dev_err(dev, "Couldn't replenish rx buff\n");
327 adapter->replenish_no_mem++;
328 break;
329 }
330
331 index = pool->free_map[pool->next_free];
332
333 if (pool->rx_buff[index].skb)
334 dev_err(dev, "Inconsistent free_map!\n");
335
336 /* Copy the skb to the long term mapped DMA buffer */
337 offset = index * pool->buff_size;
338 dst = pool->long_term_buff.buff + offset;
339 memset(dst, 0, pool->buff_size);
340 dma_addr = pool->long_term_buff.addr + offset;
341 pool->rx_buff[index].data = dst;
342
343 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
344 pool->rx_buff[index].dma = dma_addr;
345 pool->rx_buff[index].skb = skb;
346 pool->rx_buff[index].pool_index = pool->index;
347 pool->rx_buff[index].size = pool->buff_size;
348
349 memset(&sub_crq, 0, sizeof(sub_crq));
350 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351 sub_crq.rx_add.correlator =
352 cpu_to_be64((u64)&pool->rx_buff[index]);
353 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
355
356 /* The length field of the sCRQ is defined to be 24 bits so the
357 * buffer size needs to be left shifted by a byte before it is
358 * converted to big endian to prevent the last byte from being
359 * truncated.
360 */
361#ifdef __LITTLE_ENDIAN__
362 shift = 8;
363#endif
364 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
365
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500366 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600367 if (lpar_rc != H_SUCCESS)
368 goto failure;
369
370 buffers_added++;
371 adapter->replenish_add_buff_success++;
372 pool->next_free = (pool->next_free + 1) % pool->size;
373 }
374 atomic_add(buffers_added, &pool->available);
375 return;
376
377failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500378 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -0600380 pool->free_map[pool->next_free] = index;
381 pool->rx_buff[index].skb = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600382
383 dev_kfree_skb_any(skb);
384 adapter->replenish_add_buff_failure++;
385 atomic_add(buffers_added, &pool->available);
Thomas Falconf185a492017-05-26 10:30:48 -0400386
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500387 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400388 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500389 * queue is closed or pending failover.
390 * Firmware guarantees that a signal will be sent to the
391 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400392 */
393 deactivate_rx_pools(adapter);
394 netif_carrier_off(adapter->netdev);
395 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600396}
397
398static void replenish_pools(struct ibmvnic_adapter *adapter)
399{
400 int i;
401
Thomas Falcon032c5e82015-12-21 11:26:06 -0600402 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500403 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600404 if (adapter->rx_pool[i].active)
405 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
406 }
407}
408
John Allen3d52b592017-08-02 16:44:14 -0500409static void release_stats_buffers(struct ibmvnic_adapter *adapter)
410{
411 kfree(adapter->tx_stats_buffers);
412 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600413 adapter->tx_stats_buffers = NULL;
414 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500415}
416
417static int init_stats_buffers(struct ibmvnic_adapter *adapter)
418{
419 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600420 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500421 sizeof(struct ibmvnic_tx_queue_stats),
422 GFP_KERNEL);
423 if (!adapter->tx_stats_buffers)
424 return -ENOMEM;
425
426 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600427 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500428 sizeof(struct ibmvnic_rx_queue_stats),
429 GFP_KERNEL);
430 if (!adapter->rx_stats_buffers)
431 return -ENOMEM;
432
433 return 0;
434}
435
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400436static void release_stats_token(struct ibmvnic_adapter *adapter)
437{
438 struct device *dev = &adapter->vdev->dev;
439
440 if (!adapter->stats_token)
441 return;
442
443 dma_unmap_single(dev, adapter->stats_token,
444 sizeof(struct ibmvnic_statistics),
445 DMA_FROM_DEVICE);
446 adapter->stats_token = 0;
447}
448
449static int init_stats_token(struct ibmvnic_adapter *adapter)
450{
451 struct device *dev = &adapter->vdev->dev;
452 dma_addr_t stok;
453
454 stok = dma_map_single(dev, &adapter->stats,
455 sizeof(struct ibmvnic_statistics),
456 DMA_FROM_DEVICE);
457 if (dma_mapping_error(dev, stok)) {
458 dev_err(dev, "Couldn't map stats buffer\n");
459 return -1;
460 }
461
462 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500463 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400464 return 0;
465}
466
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400467static int reset_rx_pools(struct ibmvnic_adapter *adapter)
468{
469 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500470 u64 buff_size;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400471 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500472 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600473
Mingming Cao9f134572020-08-25 13:26:41 -0400474 if (!adapter->rx_pool)
475 return -1;
476
Thomas Falcon507ebe62020-08-21 13:39:01 -0500477 buff_size = adapter->cur_rx_buf_sz;
478 rx_scrqs = adapter->num_active_rx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400479 for (i = 0; i < rx_scrqs; i++) {
480 rx_pool = &adapter->rx_pool[i];
481
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500482 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
483
Thomas Falcon507ebe62020-08-21 13:39:01 -0500484 if (rx_pool->buff_size != buff_size) {
John Allen896d8692018-01-18 16:26:31 -0600485 free_long_term_buff(adapter, &rx_pool->long_term_buff);
Thomas Falcon507ebe62020-08-21 13:39:01 -0500486 rx_pool->buff_size = buff_size;
Thomas Falcon7c940b12019-06-07 16:03:55 -0500487 rc = alloc_long_term_buff(adapter,
488 &rx_pool->long_term_buff,
489 rx_pool->size *
490 rx_pool->buff_size);
John Allen896d8692018-01-18 16:26:31 -0600491 } else {
492 rc = reset_long_term_buff(adapter,
493 &rx_pool->long_term_buff);
494 }
495
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500496 if (rc)
497 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400498
499 for (j = 0; j < rx_pool->size; j++)
500 rx_pool->free_map[j] = j;
501
502 memset(rx_pool->rx_buff, 0,
503 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
504
505 atomic_set(&rx_pool->available, 0);
506 rx_pool->next_alloc = 0;
507 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500508 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400509 }
510
511 return 0;
512}
513
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400514static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600515{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400516 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400517 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600518
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400519 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600520 return;
521
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600522 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400523 rx_pool = &adapter->rx_pool[i];
524
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500525 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
526
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400527 kfree(rx_pool->free_map);
528 free_long_term_buff(adapter, &rx_pool->long_term_buff);
529
530 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400531 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400532
533 for (j = 0; j < rx_pool->size; j++) {
534 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600535 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
536 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400537 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600538 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400539
540 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600541 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400542
543 kfree(adapter->rx_pool);
544 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600545 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400546}
547
548static int init_rx_pools(struct net_device *netdev)
549{
550 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
551 struct device *dev = &adapter->vdev->dev;
552 struct ibmvnic_rx_pool *rx_pool;
553 int rxadd_subcrqs;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500554 u64 buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400555 int i, j;
556
Thomas Falcon507ebe62020-08-21 13:39:01 -0500557 rxadd_subcrqs = adapter->num_active_rx_scrqs;
558 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400559
560 adapter->rx_pool = kcalloc(rxadd_subcrqs,
561 sizeof(struct ibmvnic_rx_pool),
562 GFP_KERNEL);
563 if (!adapter->rx_pool) {
564 dev_err(dev, "Failed to allocate rx pools\n");
565 return -1;
566 }
567
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600568 adapter->num_active_rx_pools = rxadd_subcrqs;
569
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400570 for (i = 0; i < rxadd_subcrqs; i++) {
571 rx_pool = &adapter->rx_pool[i];
572
573 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500574 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400575 i, adapter->req_rx_add_entries_per_subcrq,
Thomas Falcon507ebe62020-08-21 13:39:01 -0500576 buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400577
578 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
579 rx_pool->index = i;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500580 rx_pool->buff_size = buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400581 rx_pool->active = 1;
582
583 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
584 GFP_KERNEL);
585 if (!rx_pool->free_map) {
586 release_rx_pools(adapter);
587 return -1;
588 }
589
590 rx_pool->rx_buff = kcalloc(rx_pool->size,
591 sizeof(struct ibmvnic_rx_buff),
592 GFP_KERNEL);
593 if (!rx_pool->rx_buff) {
594 dev_err(dev, "Couldn't alloc rx buffers\n");
595 release_rx_pools(adapter);
596 return -1;
597 }
598
599 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
600 rx_pool->size * rx_pool->buff_size)) {
601 release_rx_pools(adapter);
602 return -1;
603 }
604
605 for (j = 0; j < rx_pool->size; ++j)
606 rx_pool->free_map[j] = j;
607
608 atomic_set(&rx_pool->available, 0);
609 rx_pool->next_alloc = 0;
610 rx_pool->next_free = 0;
611 }
612
613 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600614}
615
Thomas Falcone26dc252018-03-16 20:00:25 -0500616static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
617 struct ibmvnic_tx_pool *tx_pool)
618{
619 int rc, i;
620
621 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
622 if (rc)
623 return rc;
624
625 memset(tx_pool->tx_buff, 0,
626 tx_pool->num_buffers *
627 sizeof(struct ibmvnic_tx_buff));
628
629 for (i = 0; i < tx_pool->num_buffers; i++)
630 tx_pool->free_map[i] = i;
631
632 tx_pool->consumer_index = 0;
633 tx_pool->producer_index = 0;
634
635 return 0;
636}
637
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400638static int reset_tx_pools(struct ibmvnic_adapter *adapter)
639{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400640 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500641 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400642
Mingming Cao9f134572020-08-25 13:26:41 -0400643 if (!adapter->tx_pool)
644 return -1;
645
Thomas Falcon507ebe62020-08-21 13:39:01 -0500646 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400647 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500648 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500649 if (rc)
650 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500651 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500652 if (rc)
653 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400654 }
655
656 return 0;
657}
658
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200659static void release_vpd_data(struct ibmvnic_adapter *adapter)
660{
661 if (!adapter->vpd)
662 return;
663
664 kfree(adapter->vpd->buff);
665 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600666
667 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200668}
669
Thomas Falconfb794212018-03-16 20:00:26 -0500670static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
671 struct ibmvnic_tx_pool *tx_pool)
672{
673 kfree(tx_pool->tx_buff);
674 kfree(tx_pool->free_map);
675 free_long_term_buff(adapter, &tx_pool->long_term_buff);
676}
677
Nathan Fontenotc657e322017-03-30 02:49:06 -0400678static void release_tx_pools(struct ibmvnic_adapter *adapter)
679{
John Allen896d8692018-01-18 16:26:31 -0600680 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400681
682 if (!adapter->tx_pool)
683 return;
684
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600685 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500686 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
687 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400688 }
689
690 kfree(adapter->tx_pool);
691 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500692 kfree(adapter->tso_pool);
693 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600694 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400695}
696
Thomas Falcon32053062018-03-16 20:00:27 -0500697static int init_one_tx_pool(struct net_device *netdev,
698 struct ibmvnic_tx_pool *tx_pool,
699 int num_entries, int buf_size)
700{
701 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
702 int i;
703
704 tx_pool->tx_buff = kcalloc(num_entries,
705 sizeof(struct ibmvnic_tx_buff),
706 GFP_KERNEL);
707 if (!tx_pool->tx_buff)
708 return -1;
709
710 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
711 num_entries * buf_size))
712 return -1;
713
714 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
715 if (!tx_pool->free_map)
716 return -1;
717
718 for (i = 0; i < num_entries; i++)
719 tx_pool->free_map[i] = i;
720
721 tx_pool->consumer_index = 0;
722 tx_pool->producer_index = 0;
723 tx_pool->num_buffers = num_entries;
724 tx_pool->buf_size = buf_size;
725
726 return 0;
727}
728
Nathan Fontenotc657e322017-03-30 02:49:06 -0400729static int init_tx_pools(struct net_device *netdev)
730{
731 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400732 int tx_subcrqs;
Thomas Falcon32053062018-03-16 20:00:27 -0500733 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400734
Thomas Falcon507ebe62020-08-21 13:39:01 -0500735 tx_subcrqs = adapter->num_active_tx_scrqs;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400736 adapter->tx_pool = kcalloc(tx_subcrqs,
737 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
738 if (!adapter->tx_pool)
739 return -1;
740
Thomas Falcon32053062018-03-16 20:00:27 -0500741 adapter->tso_pool = kcalloc(tx_subcrqs,
742 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
743 if (!adapter->tso_pool)
744 return -1;
745
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600746 adapter->num_active_tx_pools = tx_subcrqs;
747
Nathan Fontenotc657e322017-03-30 02:49:06 -0400748 for (i = 0; i < tx_subcrqs; i++) {
Thomas Falcon32053062018-03-16 20:00:27 -0500749 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
750 adapter->req_tx_entries_per_subcrq,
751 adapter->req_mtu + VLAN_HLEN);
752 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400753 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500754 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400755 }
756
Thomas Falcon7c940b12019-06-07 16:03:55 -0500757 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
758 IBMVNIC_TSO_BUFS,
759 IBMVNIC_TSO_BUF_SZ);
Thomas Falcon32053062018-03-16 20:00:27 -0500760 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400761 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500762 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400763 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400764 }
765
766 return 0;
767}
768
John Allend944c3d62017-05-26 10:30:13 -0400769static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
770{
771 int i;
772
773 if (adapter->napi_enabled)
774 return;
775
776 for (i = 0; i < adapter->req_rx_queues; i++)
777 napi_enable(&adapter->napi[i]);
778
779 adapter->napi_enabled = true;
780}
781
782static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
783{
784 int i;
785
786 if (!adapter->napi_enabled)
787 return;
788
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500789 for (i = 0; i < adapter->req_rx_queues; i++) {
790 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400791 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500792 }
John Allend944c3d62017-05-26 10:30:13 -0400793
794 adapter->napi_enabled = false;
795}
796
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600797static int init_napi(struct ibmvnic_adapter *adapter)
798{
799 int i;
800
801 adapter->napi = kcalloc(adapter->req_rx_queues,
802 sizeof(struct napi_struct), GFP_KERNEL);
803 if (!adapter->napi)
804 return -ENOMEM;
805
806 for (i = 0; i < adapter->req_rx_queues; i++) {
807 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
808 netif_napi_add(adapter->netdev, &adapter->napi[i],
809 ibmvnic_poll, NAPI_POLL_WEIGHT);
810 }
811
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600812 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600813 return 0;
814}
815
816static void release_napi(struct ibmvnic_adapter *adapter)
817{
818 int i;
819
820 if (!adapter->napi)
821 return;
822
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600823 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800824 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
825 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600826 }
827
828 kfree(adapter->napi);
829 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600830 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500831 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600832}
833
John Allena57a5d22017-03-17 17:13:41 -0500834static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600835{
836 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500837 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500838 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -0500839 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -0500840 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500841 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600842
John Allenbd0b6722017-03-17 17:13:40 -0500843 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500844 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -0500845 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500846 netdev_warn(netdev, "Login attempts exceeded\n");
847 return -1;
848 }
849
850 adapter->init_done_rc = 0;
851 reinit_completion(&adapter->init_done);
852 rc = send_login(adapter);
853 if (rc) {
854 netdev_warn(netdev, "Unable to login\n");
855 return rc;
856 }
857
858 if (!wait_for_completion_timeout(&adapter->init_done,
859 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -0500860 netdev_warn(netdev, "Login timed out, retrying...\n");
861 retry = true;
862 adapter->init_done_rc = 0;
863 retry_count++;
864 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500865 }
866
Thomas Falcondff515a32020-06-15 10:29:23 -0500867 if (adapter->init_done_rc == ABORTED) {
868 netdev_warn(netdev, "Login aborted, retrying...\n");
869 retry = true;
870 adapter->init_done_rc = 0;
871 retry_count++;
872 /* FW or device may be busy, so
873 * wait a bit before retrying login
874 */
875 msleep(500);
876 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500877 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600878 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500879
Thomas Falconeb110412018-05-24 14:37:53 -0500880 retry = true;
881 netdev_dbg(netdev,
882 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500883 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -0500884 reinit_completion(&adapter->init_done);
Lijun Pan491099a2020-09-27 20:13:26 -0500885 send_query_cap(adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500886 if (!wait_for_completion_timeout(&adapter->init_done,
887 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500888 netdev_warn(netdev,
889 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -0500890 return -1;
891 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500892
Thomas Falcon4d96f122017-08-01 15:04:36 -0500893 rc = init_sub_crqs(adapter);
894 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500895 netdev_warn(netdev,
896 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500897 return -1;
898 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500899
Thomas Falcon4d96f122017-08-01 15:04:36 -0500900 rc = init_sub_crq_irqs(adapter);
901 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500902 netdev_warn(netdev,
903 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500904 return -1;
905 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500906 } else if (adapter->init_done_rc) {
907 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -0500908 return -1;
909 }
Thomas Falconeb110412018-05-24 14:37:53 -0500910 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -0500911
Thomas Falcon62740e92019-05-09 23:13:43 -0500912 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -0600913
John Allena57a5d22017-03-17 17:13:41 -0500914 return 0;
915}
916
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600917static void release_login_buffer(struct ibmvnic_adapter *adapter)
918{
919 kfree(adapter->login_buf);
920 adapter->login_buf = NULL;
921}
922
923static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
924{
925 kfree(adapter->login_rsp_buf);
926 adapter->login_rsp_buf = NULL;
927}
928
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400929static void release_resources(struct ibmvnic_adapter *adapter)
930{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200931 release_vpd_data(adapter);
932
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400933 release_tx_pools(adapter);
934 release_rx_pools(adapter);
935
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600936 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600937 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400938}
939
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400940static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
941{
942 struct net_device *netdev = adapter->netdev;
943 unsigned long timeout = msecs_to_jiffies(30000);
944 union ibmvnic_crq crq;
945 bool resend;
946 int rc;
947
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500948 netdev_dbg(netdev, "setting link state %d\n", link_state);
949
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400950 memset(&crq, 0, sizeof(crq));
951 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
952 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
953 crq.logical_link_state.link_state = link_state;
954
955 do {
956 resend = false;
957
958 reinit_completion(&adapter->init_done);
959 rc = ibmvnic_send_crq(adapter, &crq);
960 if (rc) {
961 netdev_err(netdev, "Failed to set link state\n");
962 return rc;
963 }
964
965 if (!wait_for_completion_timeout(&adapter->init_done,
966 timeout)) {
967 netdev_err(netdev, "timeout setting link state\n");
968 return -1;
969 }
970
Lijun Pan4c5f6af2020-08-19 17:52:23 -0500971 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400972 /* Partuial success, delay and re-send */
973 mdelay(1000);
974 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -0500975 } else if (adapter->init_done_rc) {
976 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
977 adapter->init_done_rc);
978 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400979 }
980 } while (resend);
981
982 return 0;
983}
984
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400985static int set_real_num_queues(struct net_device *netdev)
986{
987 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
988 int rc;
989
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500990 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
991 adapter->req_tx_queues, adapter->req_rx_queues);
992
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400993 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
994 if (rc) {
995 netdev_err(netdev, "failed to set the number of tx queues\n");
996 return rc;
997 }
998
999 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1000 if (rc)
1001 netdev_err(netdev, "failed to set the number of rx queues\n");
1002
1003 return rc;
1004}
1005
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001006static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1007{
1008 struct device *dev = &adapter->vdev->dev;
1009 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001010 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001011 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001012
1013 if (adapter->vpd->buff)
1014 len = adapter->vpd->len;
1015
Thomas Falconff25dcb2019-11-25 17:12:56 -06001016 mutex_lock(&adapter->fw_lock);
1017 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001018 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001019
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001020 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1021 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001022 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001023 if (rc) {
1024 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001025 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001026 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001027
1028 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1029 if (rc) {
1030 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001031 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001032 return rc;
1033 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001034 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001035
1036 if (!adapter->vpd->len)
1037 return -ENODATA;
1038
1039 if (!adapter->vpd->buff)
1040 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1041 else if (adapter->vpd->len != len)
1042 adapter->vpd->buff =
1043 krealloc(adapter->vpd->buff,
1044 adapter->vpd->len, GFP_KERNEL);
1045
1046 if (!adapter->vpd->buff) {
1047 dev_err(dev, "Could allocate VPD buffer\n");
1048 return -ENOMEM;
1049 }
1050
1051 adapter->vpd->dma_addr =
1052 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1053 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001054 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001055 dev_err(dev, "Could not map VPD buffer\n");
1056 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001057 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001058 return -ENOMEM;
1059 }
1060
Thomas Falconff25dcb2019-11-25 17:12:56 -06001061 mutex_lock(&adapter->fw_lock);
1062 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001063 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001064
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001065 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1066 crq.get_vpd.cmd = GET_VPD;
1067 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1068 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001069 rc = ibmvnic_send_crq(adapter, &crq);
1070 if (rc) {
1071 kfree(adapter->vpd->buff);
1072 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001073 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001074 return rc;
1075 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001076
1077 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1078 if (rc) {
1079 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1080 kfree(adapter->vpd->buff);
1081 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001082 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001083 return rc;
1084 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001085
Thomas Falconff25dcb2019-11-25 17:12:56 -06001086 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001087 return 0;
1088}
1089
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001090static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001091{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001092 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001093 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001094
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001095 rc = set_real_num_queues(netdev);
1096 if (rc)
1097 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001098
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001099 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1100 if (!adapter->vpd)
1101 return -ENOMEM;
1102
John Allen69d08dc2018-01-18 16:27:58 -06001103 /* Vital Product Data (VPD) */
1104 rc = ibmvnic_get_vpd(adapter);
1105 if (rc) {
1106 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1107 return rc;
1108 }
1109
Thomas Falcon032c5e82015-12-21 11:26:06 -06001110 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001111
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001112 rc = init_napi(adapter);
1113 if (rc)
1114 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001115
Lijun Pan69980d02020-09-27 20:13:28 -05001116 send_query_map(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001117
1118 rc = init_rx_pools(netdev);
1119 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001120 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001121
Nathan Fontenotc657e322017-03-30 02:49:06 -04001122 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001123 return rc;
1124}
1125
Nathan Fontenoted651a12017-05-03 14:04:38 -04001126static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001127{
1128 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001129 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001130 int i, rc;
1131
Nathan Fontenot90c80142017-05-03 14:04:32 -04001132 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001133 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001134 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001135
Thomas Falcon032c5e82015-12-21 11:26:06 -06001136 /* We're ready to receive frames, enable the sub-crq interrupts and
1137 * set the logical link state to up
1138 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001139 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001140 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001141 if (prev_state == VNIC_CLOSED)
1142 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001143 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001144 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001145
Nathan Fontenoted651a12017-05-03 14:04:38 -04001146 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001147 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001148 if (prev_state == VNIC_CLOSED)
1149 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001150 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001151 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001152
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001153 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001154 if (rc) {
1155 for (i = 0; i < adapter->req_rx_queues; i++)
1156 napi_disable(&adapter->napi[i]);
1157 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001158 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001159 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001160
Nathan Fontenoted651a12017-05-03 14:04:38 -04001161 netif_tx_start_all_queues(netdev);
1162
1163 if (prev_state == VNIC_CLOSED) {
1164 for (i = 0; i < adapter->req_rx_queues; i++)
1165 napi_schedule(&adapter->napi[i]);
1166 }
1167
1168 adapter->state = VNIC_OPEN;
1169 return rc;
1170}
1171
1172static int ibmvnic_open(struct net_device *netdev)
1173{
1174 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001175 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001176
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001177 /* If device failover is pending, just set device state and return.
1178 * Device operation will be handled by reset routine.
1179 */
1180 if (adapter->failover_pending) {
1181 adapter->state = VNIC_OPEN;
1182 return 0;
1183 }
1184
Nathan Fontenoted651a12017-05-03 14:04:38 -04001185 if (adapter->state != VNIC_CLOSED) {
1186 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001187 if (rc)
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001188 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001189
1190 rc = init_resources(adapter);
1191 if (rc) {
1192 netdev_err(netdev, "failed to initialize resources\n");
1193 release_resources(adapter);
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001194 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001195 }
1196 }
1197
1198 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001199
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001200out:
1201 /*
1202 * If open fails due to a pending failover, set device state and
1203 * return. Device operation will be handled by reset routine.
1204 */
1205 if (rc && adapter->failover_pending) {
1206 adapter->state = VNIC_OPEN;
1207 rc = 0;
1208 }
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001209 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001210}
1211
Thomas Falcond0869c02018-02-13 18:23:43 -06001212static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1213{
1214 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001215 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001216 u64 rx_entries;
1217 int rx_scrqs;
1218 int i, j;
1219
1220 if (!adapter->rx_pool)
1221 return;
1222
Thomas Falcon660e3092018-04-20 14:25:32 -05001223 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001224 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1225
1226 /* Free any remaining skbs in the rx buffer pools */
1227 for (i = 0; i < rx_scrqs; i++) {
1228 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001229 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001230 continue;
1231
1232 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1233 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001234 rx_buff = &rx_pool->rx_buff[j];
1235 if (rx_buff && rx_buff->skb) {
1236 dev_kfree_skb_any(rx_buff->skb);
1237 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001238 }
1239 }
1240 }
1241}
1242
Thomas Falcone9e1e972018-03-16 20:00:30 -05001243static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1244 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001245{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001246 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001247 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001248 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001249
Dan Carpenter050e85c2018-03-23 14:36:15 +03001250 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001251 return;
1252
1253 tx_entries = tx_pool->num_buffers;
1254
1255 for (i = 0; i < tx_entries; i++) {
1256 tx_buff = &tx_pool->tx_buff[i];
1257 if (tx_buff && tx_buff->skb) {
1258 dev_kfree_skb_any(tx_buff->skb);
1259 tx_buff->skb = NULL;
1260 }
1261 }
1262}
1263
1264static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1265{
1266 int tx_scrqs;
1267 int i;
1268
1269 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001270 return;
1271
Thomas Falcon660e3092018-04-20 14:25:32 -05001272 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001273
1274 /* Free any remaining skbs in the tx buffer pools */
1275 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001276 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001277 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1278 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001279 }
1280}
1281
John Allen6095e592018-03-30 13:44:21 -05001282static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001283{
John Allen6095e592018-03-30 13:44:21 -05001284 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001285 int i;
1286
Nathan Fontenot46293b92017-05-03 14:05:02 -04001287 if (adapter->tx_scrq) {
1288 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001289 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001290 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001291 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001292 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001293 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001294 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001295 }
1296
Nathan Fontenot46293b92017-05-03 14:05:02 -04001297 if (adapter->rx_scrq) {
1298 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001299 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001300 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001301 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001302 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001303 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001304 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001305 }
1306 }
John Allen6095e592018-03-30 13:44:21 -05001307}
1308
1309static void ibmvnic_cleanup(struct net_device *netdev)
1310{
1311 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1312
1313 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001314 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001315 netif_tx_disable(netdev);
1316 else
1317 netif_tx_stop_all_queues(netdev);
1318
1319 ibmvnic_napi_disable(adapter);
1320 ibmvnic_disable_irqs(adapter);
1321
Thomas Falcond0869c02018-02-13 18:23:43 -06001322 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001323 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001324}
1325
1326static int __ibmvnic_close(struct net_device *netdev)
1327{
1328 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1329 int rc = 0;
1330
1331 adapter->state = VNIC_CLOSING;
1332 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1333 if (rc)
1334 return rc;
Nathan Fontenot90c80142017-05-03 14:04:32 -04001335 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001336 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001337}
1338
Nathan Fontenoted651a12017-05-03 14:04:38 -04001339static int ibmvnic_close(struct net_device *netdev)
1340{
1341 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1342 int rc;
1343
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001344 /* If device failover is pending, just set device state and return.
1345 * Device operation will be handled by reset routine.
1346 */
1347 if (adapter->failover_pending) {
1348 adapter->state = VNIC_CLOSED;
1349 return 0;
1350 }
1351
Nathan Fontenoted651a12017-05-03 14:04:38 -04001352 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001353 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001354
1355 return rc;
1356}
1357
Thomas Falconad7775d2016-04-01 17:20:34 -05001358/**
1359 * build_hdr_data - creates L2/L3/L4 header data buffer
1360 * @hdr_field - bitfield determining needed headers
1361 * @skb - socket buffer
1362 * @hdr_len - array of header lengths
1363 * @tot_len - total length of data
1364 *
1365 * Reads hdr_field to determine which headers are needed by firmware.
1366 * Builds a buffer containing these headers. Saves individual header
1367 * lengths and total buffer length to be used to build descriptors.
1368 */
1369static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1370 int *hdr_len, u8 *hdr_data)
1371{
1372 int len = 0;
1373 u8 *hdr;
1374
Thomas Falconda75e3b2018-03-12 11:51:02 -05001375 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1376 hdr_len[0] = sizeof(struct vlan_ethhdr);
1377 else
1378 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001379
1380 if (skb->protocol == htons(ETH_P_IP)) {
1381 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1382 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1383 hdr_len[2] = tcp_hdrlen(skb);
1384 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1385 hdr_len[2] = sizeof(struct udphdr);
1386 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1387 hdr_len[1] = sizeof(struct ipv6hdr);
1388 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1389 hdr_len[2] = tcp_hdrlen(skb);
1390 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1391 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001392 } else if (skb->protocol == htons(ETH_P_ARP)) {
1393 hdr_len[1] = arp_hdr_len(skb->dev);
1394 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001395 }
1396
1397 memset(hdr_data, 0, 120);
1398 if ((hdr_field >> 6) & 1) {
1399 hdr = skb_mac_header(skb);
1400 memcpy(hdr_data, hdr, hdr_len[0]);
1401 len += hdr_len[0];
1402 }
1403
1404 if ((hdr_field >> 5) & 1) {
1405 hdr = skb_network_header(skb);
1406 memcpy(hdr_data + len, hdr, hdr_len[1]);
1407 len += hdr_len[1];
1408 }
1409
1410 if ((hdr_field >> 4) & 1) {
1411 hdr = skb_transport_header(skb);
1412 memcpy(hdr_data + len, hdr, hdr_len[2]);
1413 len += hdr_len[2];
1414 }
1415 return len;
1416}
1417
1418/**
1419 * create_hdr_descs - create header and header extension descriptors
1420 * @hdr_field - bitfield determining needed headers
1421 * @data - buffer containing header data
1422 * @len - length of data buffer
1423 * @hdr_len - array of individual header lengths
1424 * @scrq_arr - descriptor array
1425 *
1426 * Creates header and, if needed, header extension descriptors and
1427 * places them in a descriptor array, scrq_arr
1428 */
1429
Thomas Falcon2de09682017-10-16 10:02:11 -05001430static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1431 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001432{
1433 union sub_crq hdr_desc;
1434 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001435 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001436 u8 *data, *cur;
1437 int tmp;
1438
1439 while (tmp_len > 0) {
1440 cur = hdr_data + len - tmp_len;
1441
1442 memset(&hdr_desc, 0, sizeof(hdr_desc));
1443 if (cur != hdr_data) {
1444 data = hdr_desc.hdr_ext.data;
1445 tmp = tmp_len > 29 ? 29 : tmp_len;
1446 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1447 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1448 hdr_desc.hdr_ext.len = tmp;
1449 } else {
1450 data = hdr_desc.hdr.data;
1451 tmp = tmp_len > 24 ? 24 : tmp_len;
1452 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1453 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1454 hdr_desc.hdr.len = tmp;
1455 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1456 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1457 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1458 hdr_desc.hdr.flag = hdr_field << 1;
1459 }
1460 memcpy(data, cur, tmp);
1461 tmp_len -= tmp;
1462 *scrq_arr = hdr_desc;
1463 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001464 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001465 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001466
1467 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001468}
1469
1470/**
1471 * build_hdr_descs_arr - build a header descriptor array
1472 * @skb - socket buffer
1473 * @num_entries - number of descriptors to be sent
1474 * @subcrq - first TX descriptor
1475 * @hdr_field - bit field determining which headers will be sent
1476 *
1477 * This function will build a TX descriptor array with applicable
1478 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1479 */
1480
1481static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1482 int *num_entries, u8 hdr_field)
1483{
1484 int hdr_len[3] = {0, 0, 0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001485 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001486 u8 *hdr_data = txbuff->hdr_data;
1487
1488 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1489 txbuff->hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001490 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconad7775d2016-04-01 17:20:34 -05001491 txbuff->indir_arr + 1);
1492}
1493
Thomas Falcon1f247a62018-03-12 11:51:04 -05001494static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1495 struct net_device *netdev)
1496{
1497 /* For some backing devices, mishandling of small packets
1498 * can result in a loss of connection or TX stall. Device
1499 * architects recommend that no packet should be smaller
1500 * than the minimum MTU value provided to the driver, so
1501 * pad any packets to that length
1502 */
1503 if (skb->len < netdev->min_mtu)
1504 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001505
1506 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001507}
1508
YueHaibing94b2bb22018-09-18 14:35:47 +08001509static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001510{
1511 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1512 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001513 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001514 struct device *dev = &adapter->vdev->dev;
1515 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001516 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001517 struct ibmvnic_tx_pool *tx_pool;
1518 unsigned int tx_send_failed = 0;
1519 unsigned int tx_map_failed = 0;
1520 unsigned int tx_dropped = 0;
1521 unsigned int tx_packets = 0;
1522 unsigned int tx_bytes = 0;
1523 dma_addr_t data_dma_addr;
1524 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001525 unsigned long lpar_rc;
1526 union sub_crq tx_crq;
1527 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001528 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001529 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001530 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001531 u8 proto = 0;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001532 u64 handle;
YueHaibing94b2bb22018-09-18 14:35:47 +08001533 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001534
Juliet Kim7ed5b312019-09-20 16:11:23 -04001535 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001536 if (!netif_subqueue_stopped(netdev, skb))
1537 netif_stop_subqueue(netdev, queue_num);
1538 dev_kfree_skb_any(skb);
1539
Thomas Falcon032c5e82015-12-21 11:26:06 -06001540 tx_send_failed++;
1541 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001542 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001543 goto out;
1544 }
1545
Thomas Falcon7083a452018-03-12 21:05:26 -05001546 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001547 tx_dropped++;
1548 tx_send_failed++;
1549 ret = NETDEV_TX_OK;
1550 goto out;
1551 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001552 if (skb_is_gso(skb))
1553 tx_pool = &adapter->tso_pool[queue_num];
1554 else
1555 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001556
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001557 tx_scrq = adapter->tx_scrq[queue_num];
1558 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001559 handle = tx_scrq->handle;
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001560
Thomas Falcon032c5e82015-12-21 11:26:06 -06001561 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001562
Thomas Falcon86b61a52018-03-16 20:00:29 -05001563 if (index == IBMVNIC_INVALID_MAP) {
1564 dev_kfree_skb_any(skb);
1565 tx_send_failed++;
1566 tx_dropped++;
1567 ret = NETDEV_TX_OK;
1568 goto out;
1569 }
1570
1571 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1572
Thomas Falcon06b3e352018-03-16 20:00:28 -05001573 offset = index * tx_pool->buf_size;
1574 dst = tx_pool->long_term_buff.buff + offset;
1575 memset(dst, 0, tx_pool->buf_size);
1576 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001577
Thomas Falcon15482052017-10-17 12:36:54 -05001578 if (skb_shinfo(skb)->nr_frags) {
1579 int cur, i;
1580
1581 /* Copy the head */
1582 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1583 cur = skb_headlen(skb);
1584
1585 /* Copy the frags */
1586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1587 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1588
1589 memcpy(dst + cur,
1590 page_address(skb_frag_page(frag)) +
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07001591 skb_frag_off(frag), skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001592 cur += skb_frag_size(frag);
1593 }
1594 } else {
1595 skb_copy_from_linear_data(skb, dst, skb->len);
1596 }
1597
Thomas Falcon032c5e82015-12-21 11:26:06 -06001598 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001599 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001600
1601 tx_buff = &tx_pool->tx_buff[index];
1602 tx_buff->skb = skb;
1603 tx_buff->data_dma[0] = data_dma_addr;
1604 tx_buff->data_len[0] = skb->len;
1605 tx_buff->index = index;
1606 tx_buff->pool_index = queue_num;
1607 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001608
1609 memset(&tx_crq, 0, sizeof(tx_crq));
1610 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1611 tx_crq.v1.type = IBMVNIC_TX_DESC;
1612 tx_crq.v1.n_crq_elem = 1;
1613 tx_crq.v1.n_sge = 1;
1614 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001615
Thomas Falconfdb06102017-10-17 12:36:55 -05001616 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001617 tx_crq.v1.correlator =
1618 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001619 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001620 tx_crq.v1.correlator = cpu_to_be32(index);
1621 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001622 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1623 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1624
Michał Mirosławe84b4792018-11-07 17:50:52 +01001625 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001626 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1627 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1628 }
1629
1630 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001631 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1632 proto = ip_hdr(skb)->protocol;
1633 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1634 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1635 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001636 }
1637
Thomas Falcona0dca102018-01-18 19:29:48 -06001638 if (proto == IPPROTO_TCP)
1639 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1640 else if (proto == IPPROTO_UDP)
1641 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1642
Thomas Falconad7775d2016-04-01 17:20:34 -05001643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001644 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001645 hdrs += 2;
1646 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001647 if (skb_is_gso(skb)) {
1648 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1649 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1650 hdrs += 2;
1651 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001652 /* determine if l2/3/4 headers are sent to firmware */
John Allen2fa56a42018-02-09 13:19:46 -06001653 if ((*hdrs >> 7) & 1) {
Thomas Falconad7775d2016-04-01 17:20:34 -05001654 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1655 tx_crq.v1.n_crq_elem = num_entries;
Thomas Falconecba6162018-02-26 18:10:55 -06001656 tx_buff->num_entries = num_entries;
Thomas Falconad7775d2016-04-01 17:20:34 -05001657 tx_buff->indir_arr[0] = tx_crq;
1658 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1659 sizeof(tx_buff->indir_arr),
1660 DMA_TO_DEVICE);
1661 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001662 dev_kfree_skb_any(skb);
1663 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001664 if (!firmware_has_feature(FW_FEATURE_CMO))
1665 dev_err(dev, "tx: unable to map descriptor array\n");
1666 tx_map_failed++;
1667 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001668 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001669 goto tx_err_out;
Thomas Falconad7775d2016-04-01 17:20:34 -05001670 }
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001671 lpar_rc = send_subcrq_indirect(adapter, handle,
Thomas Falconad7775d2016-04-01 17:20:34 -05001672 (u64)tx_buff->indir_dma,
1673 (u64)num_entries);
Thomas Falcon80f0fe02019-08-14 14:57:05 -05001674 dma_unmap_single(dev, tx_buff->indir_dma,
1675 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
Thomas Falconad7775d2016-04-01 17:20:34 -05001676 } else {
Thomas Falconecba6162018-02-26 18:10:55 -06001677 tx_buff->num_entries = num_entries;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001678 lpar_rc = send_subcrq(adapter, handle,
John Allen498cd8e2016-04-06 11:49:55 -05001679 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001680 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001681 if (lpar_rc != H_SUCCESS) {
Thomas Falcon2d14d372018-07-13 12:03:32 -05001682 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1683 dev_err_ratelimited(dev, "tx: send failed\n");
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001684 dev_kfree_skb_any(skb);
1685 tx_buff->skb = NULL;
1686
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001687 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1688 /* Disable TX and report carrier off if queue is closed
1689 * or pending failover.
Thomas Falconb8c80b82017-05-26 10:30:42 -04001690 * Firmware guarantees that a signal will be sent to the
1691 * driver, triggering a reset or some other action.
1692 */
1693 netif_tx_stop_all_queues(netdev);
1694 netif_carrier_off(netdev);
1695 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001696
Thomas Falcon032c5e82015-12-21 11:26:06 -06001697 tx_send_failed++;
1698 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001699 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001700 goto tx_err_out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001701 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001702
Thomas Falconffc385b2018-02-18 10:08:41 -06001703 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001704 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001705 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001706 netif_stop_subqueue(netdev, queue_num);
1707 }
1708
Thomas Falcon032c5e82015-12-21 11:26:06 -06001709 tx_packets++;
1710 tx_bytes += skb->len;
1711 txq->trans_start = jiffies;
1712 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001713 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001714
Thomas Falcon86b61a52018-03-16 20:00:29 -05001715tx_err_out:
1716 /* roll back consumer index and map array*/
1717 if (tx_pool->consumer_index == 0)
1718 tx_pool->consumer_index =
1719 tx_pool->num_buffers - 1;
1720 else
1721 tx_pool->consumer_index--;
1722 tx_pool->free_map[tx_pool->consumer_index] = index;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001723out:
1724 netdev->stats.tx_dropped += tx_dropped;
1725 netdev->stats.tx_bytes += tx_bytes;
1726 netdev->stats.tx_packets += tx_packets;
1727 adapter->tx_send_failed += tx_send_failed;
1728 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001729 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1730 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1731 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001732
1733 return ret;
1734}
1735
1736static void ibmvnic_set_multi(struct net_device *netdev)
1737{
1738 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1739 struct netdev_hw_addr *ha;
1740 union ibmvnic_crq crq;
1741
1742 memset(&crq, 0, sizeof(crq));
1743 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1744 crq.request_capability.cmd = REQUEST_CAPABILITY;
1745
1746 if (netdev->flags & IFF_PROMISC) {
1747 if (!adapter->promisc_supported)
1748 return;
1749 } else {
1750 if (netdev->flags & IFF_ALLMULTI) {
1751 /* Accept all multicast */
1752 memset(&crq, 0, sizeof(crq));
1753 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1754 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1755 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1756 ibmvnic_send_crq(adapter, &crq);
1757 } else if (netdev_mc_empty(netdev)) {
1758 /* Reject all multicast */
1759 memset(&crq, 0, sizeof(crq));
1760 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1761 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1762 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1763 ibmvnic_send_crq(adapter, &crq);
1764 } else {
1765 /* Accept one or more multicast(s) */
1766 netdev_for_each_mc_addr(ha, netdev) {
1767 memset(&crq, 0, sizeof(crq));
1768 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1769 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1770 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1771 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1772 ha->addr);
1773 ibmvnic_send_crq(adapter, &crq);
1774 }
1775 }
1776 }
1777}
1778
Thomas Falcon62740e92019-05-09 23:13:43 -05001779static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001780{
1781 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001782 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001783 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001784
Thomas Falcon62740e92019-05-09 23:13:43 -05001785 if (!is_valid_ether_addr(dev_addr)) {
1786 rc = -EADDRNOTAVAIL;
1787 goto err;
1788 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001789
1790 memset(&crq, 0, sizeof(crq));
1791 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1792 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05001793 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06001794
Thomas Falconff25dcb2019-11-25 17:12:56 -06001795 mutex_lock(&adapter->fw_lock);
1796 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001797 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001798
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001799 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05001800 if (rc) {
1801 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001802 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001803 goto err;
1804 }
1805
Thomas Falcon476d96c2019-11-25 17:12:55 -06001806 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001807 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06001808 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05001809 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001810 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001811 goto err;
1812 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001813 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001814 return 0;
1815err:
1816 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1817 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001818}
1819
John Allenc26eba02017-10-26 16:23:25 -05001820static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1821{
1822 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1823 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001824 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001825
Thomas Falcon62740e92019-05-09 23:13:43 -05001826 rc = 0;
Lijun Pan8fc36722020-10-27 17:04:56 -05001827 if (!is_valid_ether_addr(addr->sa_data))
1828 return -EADDRNOTAVAIL;
1829
1830 if (adapter->state != VNIC_PROBED) {
1831 ether_addr_copy(adapter->mac_addr, addr->sa_data);
Thomas Falcon62740e92019-05-09 23:13:43 -05001832 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
Lijun Pan8fc36722020-10-27 17:04:56 -05001833 }
John Allenc26eba02017-10-26 16:23:25 -05001834
Thomas Falconf8136142018-01-29 13:45:05 -06001835 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001836}
1837
Nathan Fontenoted651a12017-05-03 14:04:38 -04001838/**
Juliet Kimb27507b2019-09-20 16:11:22 -04001839 * do_change_param_reset returns zero if we are able to keep processing reset
1840 * events, or non-zero if we hit a fatal error and must halt.
1841 */
1842static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1843 struct ibmvnic_rwi *rwi,
1844 u32 reset_state)
1845{
1846 struct net_device *netdev = adapter->netdev;
1847 int i, rc;
1848
1849 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1850 rwi->reset_reason);
1851
1852 netif_carrier_off(netdev);
1853 adapter->reset_reason = rwi->reset_reason;
1854
1855 ibmvnic_cleanup(netdev);
1856
1857 if (reset_state == VNIC_OPEN) {
1858 rc = __ibmvnic_close(netdev);
1859 if (rc)
1860 return rc;
1861 }
1862
1863 release_resources(adapter);
1864 release_sub_crqs(adapter, 1);
1865 release_crq_queue(adapter);
1866
1867 adapter->state = VNIC_PROBED;
1868
1869 rc = init_crq_queue(adapter);
1870
1871 if (rc) {
1872 netdev_err(adapter->netdev,
1873 "Couldn't initialize crq. rc=%d\n", rc);
1874 return rc;
1875 }
1876
Lijun Pan635e4422020-08-19 17:52:26 -05001877 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04001878 if (rc)
1879 return IBMVNIC_INIT_FAILED;
1880
1881 /* If the adapter was in PROBE state prior to the reset,
1882 * exit here.
1883 */
1884 if (reset_state == VNIC_PROBED)
1885 return 0;
1886
1887 rc = ibmvnic_login(netdev);
1888 if (rc) {
1889 adapter->state = reset_state;
1890 return rc;
1891 }
1892
1893 rc = init_resources(adapter);
1894 if (rc)
1895 return rc;
1896
1897 ibmvnic_disable_irqs(adapter);
1898
1899 adapter->state = VNIC_CLOSED;
1900
1901 if (reset_state == VNIC_CLOSED)
1902 return 0;
1903
1904 rc = __ibmvnic_open(netdev);
1905 if (rc)
1906 return IBMVNIC_OPEN_FAILED;
1907
1908 /* refresh device's multicast list */
1909 ibmvnic_set_multi(netdev);
1910
1911 /* kick napi */
1912 for (i = 0; i < adapter->req_rx_queues; i++)
1913 napi_schedule(&adapter->napi[i]);
1914
1915 return 0;
1916}
1917
1918/**
Nathan Fontenoted651a12017-05-03 14:04:38 -04001919 * do_reset returns zero if we are able to keep processing reset events, or
1920 * non-zero if we hit a fatal error and must halt.
1921 */
1922static int do_reset(struct ibmvnic_adapter *adapter,
1923 struct ibmvnic_rwi *rwi, u32 reset_state)
1924{
John Allen896d8692018-01-18 16:26:31 -06001925 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001926 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001927 struct net_device *netdev = adapter->netdev;
1928 int i, rc;
1929
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001930 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1931 rwi->reset_reason);
1932
Juliet Kimb27507b2019-09-20 16:11:22 -04001933 rtnl_lock();
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001934 /*
1935 * Now that we have the rtnl lock, clear any pending failover.
1936 * This will ensure ibmvnic_open() has either completed or will
1937 * block until failover is complete.
1938 */
1939 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
1940 adapter->failover_pending = false;
Juliet Kimb27507b2019-09-20 16:11:22 -04001941
Nathan Fontenoted651a12017-05-03 14:04:38 -04001942 netif_carrier_off(netdev);
1943 adapter->reset_reason = rwi->reset_reason;
1944
John Allen896d8692018-01-18 16:26:31 -06001945 old_num_rx_queues = adapter->req_rx_queues;
1946 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001947 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1948 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06001949
Nathan Fontenot30f79622018-04-06 18:37:06 -05001950 ibmvnic_cleanup(netdev);
1951
Thomas Falcon1f946082019-06-07 16:03:53 -05001952 if (reset_state == VNIC_OPEN &&
1953 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05001954 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Juliet Kimb27507b2019-09-20 16:11:22 -04001955 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001956
Juliet Kimb27507b2019-09-20 16:11:22 -04001957 /* Release the RTNL lock before link state change and
1958 * re-acquire after the link state change to allow
1959 * linkwatch_event to grab the RTNL lock and run during
1960 * a reset.
1961 */
1962 rtnl_unlock();
1963 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1964 rtnl_lock();
1965 if (rc)
1966 goto out;
1967
1968 if (adapter->state != VNIC_CLOSING) {
1969 rc = -1;
1970 goto out;
1971 }
1972
1973 adapter->state = VNIC_CLOSED;
John Allenc26eba02017-10-26 16:23:25 -05001974 }
1975
John Allen8cb31cf2017-05-26 10:30:37 -04001976 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1977 /* remove the closed state so when we call open it appears
1978 * we are coming from the probed state.
1979 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001980 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04001981
Juliet Kimb27507b2019-09-20 16:11:22 -04001982 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001983 rc = ibmvnic_reenable_crq_queue(adapter);
1984 release_sub_crqs(adapter, 1);
1985 } else {
1986 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04001987 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001988 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04001989 if (rc)
1990 netdev_err(adapter->netdev,
1991 "Reset failed to enable interrupts. rc=%d\n",
1992 rc);
1993 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05001994 }
1995
1996 if (rc) {
1997 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04001998 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04001999 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05002000 }
2001
Lijun Pan635e4422020-08-19 17:52:26 -05002002 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04002003 if (rc) {
2004 rc = IBMVNIC_INIT_FAILED;
2005 goto out;
2006 }
John Allen8cb31cf2017-05-26 10:30:37 -04002007
2008 /* If the adapter was in PROBE state prior to the reset,
2009 * exit here.
2010 */
Juliet Kimb27507b2019-09-20 16:11:22 -04002011 if (reset_state == VNIC_PROBED) {
2012 rc = 0;
2013 goto out;
2014 }
John Allen8cb31cf2017-05-26 10:30:37 -04002015
2016 rc = ibmvnic_login(netdev);
2017 if (rc) {
John Allen3578a7e2018-07-16 10:29:30 -05002018 adapter->state = reset_state;
Juliet Kimb27507b2019-09-20 16:11:22 -04002019 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002020 }
2021
Juliet Kimb27507b2019-09-20 16:11:22 -04002022 if (adapter->req_rx_queues != old_num_rx_queues ||
2023 adapter->req_tx_queues != old_num_tx_queues ||
2024 adapter->req_rx_add_entries_per_subcrq !=
2025 old_num_rx_slots ||
2026 adapter->req_tx_entries_per_subcrq !=
Mingming Cao9f134572020-08-25 13:26:41 -04002027 old_num_tx_slots ||
2028 !adapter->rx_pool ||
2029 !adapter->tso_pool ||
2030 !adapter->tx_pool) {
John Allen896d8692018-01-18 16:26:31 -06002031 release_rx_pools(adapter);
2032 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06002033 release_napi(adapter);
2034 release_vpd_data(adapter);
2035
2036 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002037 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002038 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002039
John Allenc26eba02017-10-26 16:23:25 -05002040 } else {
2041 rc = reset_tx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002042 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002043 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2044 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002045 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002046 }
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002047
John Allenc26eba02017-10-26 16:23:25 -05002048 rc = reset_rx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002049 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002050 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2051 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002052 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002053 }
John Allenc26eba02017-10-26 16:23:25 -05002054 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002055 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002056 }
John Allene676d812018-03-14 10:41:29 -05002057 adapter->state = VNIC_CLOSED;
2058
Juliet Kimb27507b2019-09-20 16:11:22 -04002059 if (reset_state == VNIC_CLOSED) {
2060 rc = 0;
2061 goto out;
2062 }
John Allene676d812018-03-14 10:41:29 -05002063
Nathan Fontenoted651a12017-05-03 14:04:38 -04002064 rc = __ibmvnic_open(netdev);
2065 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002066 rc = IBMVNIC_OPEN_FAILED;
2067 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002068 }
2069
Thomas Falconbe32a242019-06-07 16:03:54 -05002070 /* refresh device's multicast list */
2071 ibmvnic_set_multi(netdev);
2072
Nathan Fontenoted651a12017-05-03 14:04:38 -04002073 /* kick napi */
2074 for (i = 0; i < adapter->req_rx_queues; i++)
2075 napi_schedule(&adapter->napi[i]);
2076
Lijun Pan83935972020-11-20 16:40:11 -06002077 if (adapter->reset_reason != VNIC_RESET_FAILOVER) {
Thomas Falcon986103e2018-11-30 10:59:08 -06002078 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
Lijun Pan83935972020-11-20 16:40:11 -06002079 call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
2080 }
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002081
Juliet Kimb27507b2019-09-20 16:11:22 -04002082 rc = 0;
2083
2084out:
2085 rtnl_unlock();
2086
2087 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002088}
2089
Thomas Falcon2770a792018-05-23 13:38:02 -05002090static int do_hard_reset(struct ibmvnic_adapter *adapter,
2091 struct ibmvnic_rwi *rwi, u32 reset_state)
2092{
2093 struct net_device *netdev = adapter->netdev;
2094 int rc;
2095
2096 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2097 rwi->reset_reason);
2098
2099 netif_carrier_off(netdev);
2100 adapter->reset_reason = rwi->reset_reason;
2101
2102 ibmvnic_cleanup(netdev);
2103 release_resources(adapter);
2104 release_sub_crqs(adapter, 0);
2105 release_crq_queue(adapter);
2106
2107 /* remove the closed state so when we call open it appears
2108 * we are coming from the probed state.
2109 */
2110 adapter->state = VNIC_PROBED;
2111
Thomas Falconbbd669a2019-04-04 18:58:26 -05002112 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002113 rc = init_crq_queue(adapter);
2114 if (rc) {
2115 netdev_err(adapter->netdev,
2116 "Couldn't initialize crq. rc=%d\n", rc);
2117 return rc;
2118 }
2119
Lijun Pan635e4422020-08-19 17:52:26 -05002120 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002121 if (rc)
2122 return rc;
2123
2124 /* If the adapter was in PROBE state prior to the reset,
2125 * exit here.
2126 */
2127 if (reset_state == VNIC_PROBED)
2128 return 0;
2129
2130 rc = ibmvnic_login(netdev);
2131 if (rc) {
2132 adapter->state = VNIC_PROBED;
2133 return 0;
2134 }
Juliet Kima5681e22018-11-19 15:59:22 -06002135
2136 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002137 if (rc)
2138 return rc;
2139
2140 ibmvnic_disable_irqs(adapter);
2141 adapter->state = VNIC_CLOSED;
2142
2143 if (reset_state == VNIC_CLOSED)
2144 return 0;
2145
2146 rc = __ibmvnic_open(netdev);
Juliet Kimb27507b2019-09-20 16:11:22 -04002147 if (rc)
2148 return IBMVNIC_OPEN_FAILED;
Thomas Falcon2770a792018-05-23 13:38:02 -05002149
Thomas Falcon2770a792018-05-23 13:38:02 -05002150 return 0;
2151}
2152
Nathan Fontenoted651a12017-05-03 14:04:38 -04002153static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2154{
2155 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002156 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002157
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002158 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002159
2160 if (!list_empty(&adapter->rwi_list)) {
2161 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2162 list);
2163 list_del(&rwi->list);
2164 } else {
2165 rwi = NULL;
2166 }
2167
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002168 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002169 return rwi;
2170}
2171
2172static void free_all_rwi(struct ibmvnic_adapter *adapter)
2173{
2174 struct ibmvnic_rwi *rwi;
2175
2176 rwi = get_next_rwi(adapter);
2177 while (rwi) {
2178 kfree(rwi);
2179 rwi = get_next_rwi(adapter);
2180 }
2181}
2182
2183static void __ibmvnic_reset(struct work_struct *work)
2184{
2185 struct ibmvnic_rwi *rwi;
2186 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002187 bool saved_state = false;
2188 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002189 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002190 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002191
2192 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002193
Juliet Kim7ed5b312019-09-20 16:11:23 -04002194 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2195 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2196 IBMVNIC_RESET_DELAY);
2197 return;
2198 }
2199
Nathan Fontenoted651a12017-05-03 14:04:38 -04002200 rwi = get_next_rwi(adapter);
2201 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002202 spin_lock_irqsave(&adapter->state_lock, flags);
2203
Thomas Falcon36f10312019-08-27 11:10:04 -05002204 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002205 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002206 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002207 kfree(rwi);
2208 rc = EBUSY;
2209 break;
2210 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002211
Juliet Kim7d7195a2020-03-10 09:23:58 -05002212 if (!saved_state) {
2213 reset_state = adapter->state;
2214 adapter->state = VNIC_RESETTING;
2215 saved_state = true;
2216 }
2217 spin_unlock_irqrestore(&adapter->state_lock, flags);
2218
Juliet Kimb27507b2019-09-20 16:11:22 -04002219 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2220 /* CHANGE_PARAM requestor holds rtnl_lock */
2221 rc = do_change_param_reset(adapter, rwi, reset_state);
2222 } else if (adapter->force_reset_recovery) {
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002223 /*
2224 * Since we are doing a hard reset now, clear the
2225 * failover_pending flag so we don't ignore any
2226 * future MOBILITY or other resets.
2227 */
2228 adapter->failover_pending = false;
2229
Juliet Kimb27507b2019-09-20 16:11:22 -04002230 /* Transport event occurred during previous reset */
2231 if (adapter->wait_for_reset) {
2232 /* Previous was CHANGE_PARAM; caller locked */
2233 adapter->force_reset_recovery = false;
2234 rc = do_hard_reset(adapter, rwi, reset_state);
2235 } else {
2236 rtnl_lock();
2237 adapter->force_reset_recovery = false;
2238 rc = do_hard_reset(adapter, rwi, reset_state);
2239 rtnl_unlock();
2240 }
Juliet Kimf9c6cea2020-04-30 13:22:11 -05002241 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2242 adapter->from_passive_init)) {
Thomas Falcon2770a792018-05-23 13:38:02 -05002243 rc = do_reset(adapter, rwi, reset_state);
2244 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002245 kfree(rwi);
Juliet Kimb27507b2019-09-20 16:11:22 -04002246 if (rc == IBMVNIC_OPEN_FAILED) {
2247 if (list_empty(&adapter->rwi_list))
2248 adapter->state = VNIC_CLOSED;
2249 else
2250 adapter->state = reset_state;
2251 rc = 0;
2252 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
Thomas Falcon2770a792018-05-23 13:38:02 -05002253 !adapter->force_reset_recovery)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002254 break;
2255
2256 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002257
2258 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2259 rwi->reset_reason == VNIC_RESET_MOBILITY))
2260 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002261 }
2262
John Allenc26eba02017-10-26 16:23:25 -05002263 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002264 adapter->reset_done_rc = rc;
2265 complete(&adapter->reset_done);
2266 }
2267
Nathan Fontenoted651a12017-05-03 14:04:38 -04002268 if (rc) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002269 netdev_dbg(adapter->netdev, "Reset failed\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04002270 free_all_rwi(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002271 }
Juliet Kim1c2977c2019-09-05 17:30:01 -04002272
Juliet Kim7ed5b312019-09-20 16:11:23 -04002273 clear_bit_unlock(0, &adapter->resetting);
2274}
2275
2276static void __ibmvnic_delayed_reset(struct work_struct *work)
2277{
2278 struct ibmvnic_adapter *adapter;
2279
2280 adapter = container_of(work, struct ibmvnic_adapter,
2281 ibmvnic_delayed_reset.work);
2282 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002283}
2284
Thomas Falconaf894d22018-04-06 18:37:04 -05002285static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2286 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002287{
Thomas Falcon2770a792018-05-23 13:38:02 -05002288 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002289 struct ibmvnic_rwi *rwi, *tmp;
2290 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002291 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002292 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002293
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002294 /*
2295 * If failover is pending don't schedule any other reset.
2296 * Instead let the failover complete. If there is already a
2297 * a failover reset scheduled, we will detect and drop the
2298 * duplicate reset when walking the ->rwi_list below.
2299 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002300 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002301 adapter->state == VNIC_REMOVED ||
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002302 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002303 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002304 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002305 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002306 }
2307
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002308 if (adapter->state == VNIC_PROBING) {
2309 netdev_warn(netdev, "Adapter reset during probe\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002310 ret = adapter->init_done_rc = EAGAIN;
2311 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002312 }
2313
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002314 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002315
2316 list_for_each(entry, &adapter->rwi_list) {
2317 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2318 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002319 netdev_dbg(netdev, "Skipping matching reset\n");
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002320 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falconaf894d22018-04-06 18:37:04 -05002321 ret = EBUSY;
2322 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002323 }
2324 }
2325
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002326 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002327 if (!rwi) {
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002328 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002329 ibmvnic_close(netdev);
Thomas Falconaf894d22018-04-06 18:37:04 -05002330 ret = ENOMEM;
2331 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002332 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002333 /* if we just received a transport event,
2334 * flush reset queue and process this reset
2335 */
2336 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2337 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2338 list_del(entry);
2339 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002340 rwi->reset_reason = reason;
2341 list_add_tail(&rwi->list, &adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002342 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002343 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002344 schedule_work(&adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002345
2346 return 0;
2347err:
Thomas Falconaf894d22018-04-06 18:37:04 -05002348 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002349}
2350
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002351static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002352{
2353 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002354
Nathan Fontenoted651a12017-05-03 14:04:38 -04002355 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002356}
2357
2358static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2359 struct ibmvnic_rx_buff *rx_buff)
2360{
2361 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2362
2363 rx_buff->skb = NULL;
2364
2365 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2366 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2367
2368 atomic_dec(&pool->available);
2369}
2370
2371static int ibmvnic_poll(struct napi_struct *napi, int budget)
2372{
2373 struct net_device *netdev = napi->dev;
2374 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2375 int scrq_num = (int)(napi - adapter->napi);
2376 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04002377
Thomas Falcon032c5e82015-12-21 11:26:06 -06002378restart_poll:
2379 while (frames_processed < budget) {
2380 struct sk_buff *skb;
2381 struct ibmvnic_rx_buff *rx_buff;
2382 union sub_crq *next;
2383 u32 length;
2384 u16 offset;
2385 u8 flags = 0;
2386
Juliet Kim7ed5b312019-09-20 16:11:23 -04002387 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002388 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05002389 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2390 napi_complete_done(napi, frames_processed);
2391 return frames_processed;
2392 }
2393
Thomas Falcon032c5e82015-12-21 11:26:06 -06002394 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2395 break;
2396 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2397 rx_buff =
2398 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2399 rx_comp.correlator);
2400 /* do error checking */
2401 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002402 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2403 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002404 /* free the entry */
2405 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002406 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002407 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002408 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002409 } else if (!rx_buff->skb) {
2410 /* free the entry */
2411 next->rx_comp.first = 0;
2412 remove_buff_from_pool(adapter, rx_buff);
2413 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002414 }
2415
2416 length = be32_to_cpu(next->rx_comp.len);
2417 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2418 flags = next->rx_comp.flags;
2419 skb = rx_buff->skb;
2420 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2421 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002422
2423 /* VLAN Header has been stripped by the system firmware and
2424 * needs to be inserted by the driver
2425 */
2426 if (adapter->rx_vlan_header_insertion &&
2427 (flags & IBMVNIC_VLAN_STRIPPED))
2428 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2429 ntohs(next->rx_comp.vlan_tci));
2430
Thomas Falcon032c5e82015-12-21 11:26:06 -06002431 /* free the entry */
2432 next->rx_comp.first = 0;
2433 remove_buff_from_pool(adapter, rx_buff);
2434
2435 skb_put(skb, length);
2436 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002437 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002438
2439 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2440 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2441 skb->ip_summed = CHECKSUM_UNNECESSARY;
2442 }
2443
2444 length = skb->len;
2445 napi_gro_receive(napi, skb); /* send it up */
2446 netdev->stats.rx_packets++;
2447 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002448 adapter->rx_stats_buffers[scrq_num].packets++;
2449 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002450 frames_processed++;
2451 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002452
2453 if (adapter->state != VNIC_CLOSING)
2454 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002455
2456 if (frames_processed < budget) {
2457 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002458 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002459 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2460 napi_reschedule(napi)) {
2461 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2462 goto restart_poll;
2463 }
2464 }
2465 return frames_processed;
2466}
2467
John Allenc26eba02017-10-26 16:23:25 -05002468static int wait_for_reset(struct ibmvnic_adapter *adapter)
2469{
Thomas Falconaf894d22018-04-06 18:37:04 -05002470 int rc, ret;
2471
John Allenc26eba02017-10-26 16:23:25 -05002472 adapter->fallback.mtu = adapter->req_mtu;
2473 adapter->fallback.rx_queues = adapter->req_rx_queues;
2474 adapter->fallback.tx_queues = adapter->req_tx_queues;
2475 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2476 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2477
Thomas Falcon070eca92019-11-25 17:12:53 -06002478 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002479 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002480 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002481
2482 if (rc) {
2483 ret = rc;
2484 goto out;
2485 }
2486 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2487 if (rc) {
2488 ret = -ENODEV;
2489 goto out;
2490 }
John Allenc26eba02017-10-26 16:23:25 -05002491
Thomas Falconaf894d22018-04-06 18:37:04 -05002492 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002493 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002494 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002495 adapter->desired.mtu = adapter->fallback.mtu;
2496 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2497 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2498 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2499 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2500
Thomas Falcon070eca92019-11-25 17:12:53 -06002501 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002502 adapter->wait_for_reset = true;
2503 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002504 if (rc) {
2505 ret = rc;
2506 goto out;
2507 }
2508 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2509 60000);
2510 if (rc) {
2511 ret = -ENODEV;
2512 goto out;
2513 }
John Allenc26eba02017-10-26 16:23:25 -05002514 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002515out:
John Allenc26eba02017-10-26 16:23:25 -05002516 adapter->wait_for_reset = false;
2517
Thomas Falconaf894d22018-04-06 18:37:04 -05002518 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002519}
2520
John Allen3a807b72017-06-06 16:55:52 -05002521static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2522{
John Allenc26eba02017-10-26 16:23:25 -05002523 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2524
2525 adapter->desired.mtu = new_mtu + ETH_HLEN;
2526
2527 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002528}
2529
Thomas Falconf10b09e2018-03-12 11:51:05 -05002530static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2531 struct net_device *dev,
2532 netdev_features_t features)
2533{
2534 /* Some backing hardware adapters can not
2535 * handle packets with a MSS less than 224
2536 * or with only one segment.
2537 */
2538 if (skb_is_gso(skb)) {
2539 if (skb_shinfo(skb)->gso_size < 224 ||
2540 skb_shinfo(skb)->gso_segs == 1)
2541 features &= ~NETIF_F_GSO_MASK;
2542 }
2543
2544 return features;
2545}
2546
Thomas Falcon032c5e82015-12-21 11:26:06 -06002547static const struct net_device_ops ibmvnic_netdev_ops = {
2548 .ndo_open = ibmvnic_open,
2549 .ndo_stop = ibmvnic_close,
2550 .ndo_start_xmit = ibmvnic_xmit,
2551 .ndo_set_rx_mode = ibmvnic_set_multi,
2552 .ndo_set_mac_address = ibmvnic_set_mac,
2553 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002554 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002555 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002556 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002557};
2558
2559/* ethtool functions */
2560
Philippe Reynes8a433792017-01-07 22:37:29 +01002561static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2562 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002563{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002564 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2565 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002566
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002567 rc = send_query_phys_parms(adapter);
2568 if (rc) {
2569 adapter->speed = SPEED_UNKNOWN;
2570 adapter->duplex = DUPLEX_UNKNOWN;
2571 }
2572 cmd->base.speed = adapter->speed;
2573 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002574 cmd->base.port = PORT_FIBRE;
2575 cmd->base.phy_address = 0;
2576 cmd->base.autoneg = AUTONEG_ENABLE;
2577
Thomas Falcon032c5e82015-12-21 11:26:06 -06002578 return 0;
2579}
2580
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002581static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002582 struct ethtool_drvinfo *info)
2583{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002584 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2585
Thomas Falcon032c5e82015-12-21 11:26:06 -06002586 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2587 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002588 strlcpy(info->fw_version, adapter->fw_version,
2589 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002590}
2591
2592static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2593{
2594 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2595
2596 return adapter->msg_enable;
2597}
2598
2599static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2600{
2601 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2602
2603 adapter->msg_enable = data;
2604}
2605
2606static u32 ibmvnic_get_link(struct net_device *netdev)
2607{
2608 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2609
2610 /* Don't need to send a query because we request a logical link up at
2611 * init and then we wait for link state indications
2612 */
2613 return adapter->logical_link_state;
2614}
2615
2616static void ibmvnic_get_ringparam(struct net_device *netdev,
2617 struct ethtool_ringparam *ring)
2618{
John Allenbc131b32017-08-02 16:46:30 -05002619 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2620
Thomas Falcon723ad912018-09-28 18:38:26 -05002621 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2622 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2623 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2624 } else {
2625 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2626 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2627 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002628 ring->rx_mini_max_pending = 0;
2629 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002630 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2631 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002632 ring->rx_mini_pending = 0;
2633 ring->rx_jumbo_pending = 0;
2634}
2635
John Allenc26eba02017-10-26 16:23:25 -05002636static int ibmvnic_set_ringparam(struct net_device *netdev,
2637 struct ethtool_ringparam *ring)
2638{
2639 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002640 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002641
Thomas Falcon723ad912018-09-28 18:38:26 -05002642 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002643 adapter->desired.rx_entries = ring->rx_pending;
2644 adapter->desired.tx_entries = ring->tx_pending;
2645
Thomas Falcon723ad912018-09-28 18:38:26 -05002646 ret = wait_for_reset(adapter);
2647
2648 if (!ret &&
2649 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2650 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2651 netdev_info(netdev,
2652 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2653 ring->rx_pending, ring->tx_pending,
2654 adapter->req_rx_add_entries_per_subcrq,
2655 adapter->req_tx_entries_per_subcrq);
2656 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002657}
2658
John Allenc2dbeb62017-08-02 16:47:17 -05002659static void ibmvnic_get_channels(struct net_device *netdev,
2660 struct ethtool_channels *channels)
2661{
2662 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2663
Thomas Falcon723ad912018-09-28 18:38:26 -05002664 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2665 channels->max_rx = adapter->max_rx_queues;
2666 channels->max_tx = adapter->max_tx_queues;
2667 } else {
2668 channels->max_rx = IBMVNIC_MAX_QUEUES;
2669 channels->max_tx = IBMVNIC_MAX_QUEUES;
2670 }
2671
John Allenc2dbeb62017-08-02 16:47:17 -05002672 channels->max_other = 0;
2673 channels->max_combined = 0;
2674 channels->rx_count = adapter->req_rx_queues;
2675 channels->tx_count = adapter->req_tx_queues;
2676 channels->other_count = 0;
2677 channels->combined_count = 0;
2678}
2679
John Allenc26eba02017-10-26 16:23:25 -05002680static int ibmvnic_set_channels(struct net_device *netdev,
2681 struct ethtool_channels *channels)
2682{
2683 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002684 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002685
Thomas Falcon723ad912018-09-28 18:38:26 -05002686 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002687 adapter->desired.rx_queues = channels->rx_count;
2688 adapter->desired.tx_queues = channels->tx_count;
2689
Thomas Falcon723ad912018-09-28 18:38:26 -05002690 ret = wait_for_reset(adapter);
2691
2692 if (!ret &&
2693 (adapter->req_rx_queues != channels->rx_count ||
2694 adapter->req_tx_queues != channels->tx_count))
2695 netdev_info(netdev,
2696 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2697 channels->rx_count, channels->tx_count,
2698 adapter->req_rx_queues, adapter->req_tx_queues);
2699 return ret;
2700
John Allenc26eba02017-10-26 16:23:25 -05002701}
2702
Thomas Falcon032c5e82015-12-21 11:26:06 -06002703static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2704{
John Allen3d52b592017-08-02 16:44:14 -05002705 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002706 int i;
2707
Thomas Falcon723ad912018-09-28 18:38:26 -05002708 switch (stringset) {
2709 case ETH_SS_STATS:
2710 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2711 i++, data += ETH_GSTRING_LEN)
2712 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2713
2714 for (i = 0; i < adapter->req_tx_queues; i++) {
2715 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2716 data += ETH_GSTRING_LEN;
2717
2718 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2719 data += ETH_GSTRING_LEN;
2720
2721 snprintf(data, ETH_GSTRING_LEN,
2722 "tx%d_dropped_packets", i);
2723 data += ETH_GSTRING_LEN;
2724 }
2725
2726 for (i = 0; i < adapter->req_rx_queues; i++) {
2727 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2728 data += ETH_GSTRING_LEN;
2729
2730 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2731 data += ETH_GSTRING_LEN;
2732
2733 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2734 data += ETH_GSTRING_LEN;
2735 }
2736 break;
2737
2738 case ETH_SS_PRIV_FLAGS:
2739 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2740 strcpy(data + i * ETH_GSTRING_LEN,
2741 ibmvnic_priv_flags[i]);
2742 break;
2743 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06002744 return;
John Allen3d52b592017-08-02 16:44:14 -05002745 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002746}
2747
2748static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2749{
John Allen3d52b592017-08-02 16:44:14 -05002750 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2751
Thomas Falcon032c5e82015-12-21 11:26:06 -06002752 switch (sset) {
2753 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002754 return ARRAY_SIZE(ibmvnic_stats) +
2755 adapter->req_tx_queues * NUM_TX_STATS +
2756 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05002757 case ETH_SS_PRIV_FLAGS:
2758 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002759 default:
2760 return -EOPNOTSUPP;
2761 }
2762}
2763
2764static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2765 struct ethtool_stats *stats, u64 *data)
2766{
2767 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2768 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002769 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002770 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002771
2772 memset(&crq, 0, sizeof(crq));
2773 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2774 crq.request_statistics.cmd = REQUEST_STATISTICS;
2775 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2776 crq.request_statistics.len =
2777 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002778
2779 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06002780 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002781 rc = ibmvnic_send_crq(adapter, &crq);
2782 if (rc)
2783 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06002784 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2785 if (rc)
2786 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002787
2788 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002789 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2790 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002791
2792 for (j = 0; j < adapter->req_tx_queues; j++) {
2793 data[i] = adapter->tx_stats_buffers[j].packets;
2794 i++;
2795 data[i] = adapter->tx_stats_buffers[j].bytes;
2796 i++;
2797 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2798 i++;
2799 }
2800
2801 for (j = 0; j < adapter->req_rx_queues; j++) {
2802 data[i] = adapter->rx_stats_buffers[j].packets;
2803 i++;
2804 data[i] = adapter->rx_stats_buffers[j].bytes;
2805 i++;
2806 data[i] = adapter->rx_stats_buffers[j].interrupts;
2807 i++;
2808 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002809}
2810
Thomas Falcon723ad912018-09-28 18:38:26 -05002811static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2812{
2813 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2814
2815 return adapter->priv_flags;
2816}
2817
2818static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2819{
2820 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2821 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2822
2823 if (which_maxes)
2824 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2825 else
2826 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2827
2828 return 0;
2829}
Thomas Falcon032c5e82015-12-21 11:26:06 -06002830static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002831 .get_drvinfo = ibmvnic_get_drvinfo,
2832 .get_msglevel = ibmvnic_get_msglevel,
2833 .set_msglevel = ibmvnic_set_msglevel,
2834 .get_link = ibmvnic_get_link,
2835 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002836 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002837 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002838 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002839 .get_strings = ibmvnic_get_strings,
2840 .get_sset_count = ibmvnic_get_sset_count,
2841 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002842 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05002843 .get_priv_flags = ibmvnic_get_priv_flags,
2844 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002845};
2846
2847/* Routines for managing CRQs/sCRQs */
2848
Nathan Fontenot57a49432017-05-26 10:31:12 -04002849static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2850 struct ibmvnic_sub_crq_queue *scrq)
2851{
2852 int rc;
2853
2854 if (scrq->irq) {
2855 free_irq(scrq->irq, scrq);
2856 irq_dispose_mapping(scrq->irq);
2857 scrq->irq = 0;
2858 }
2859
Thomas Falconc8b2ad02017-06-14 23:50:07 -05002860 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
Thomas Falcon41f71462018-04-06 18:37:03 -05002861 atomic_set(&scrq->used, 0);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002862 scrq->cur = 0;
2863
2864 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2865 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2866 return rc;
2867}
2868
2869static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2870{
2871 int i, rc;
2872
2873 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002874 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002875 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2876 if (rc)
2877 return rc;
2878 }
2879
2880 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002881 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002882 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2883 if (rc)
2884 return rc;
2885 }
2886
Nathan Fontenot57a49432017-05-26 10:31:12 -04002887 return rc;
2888}
2889
Thomas Falcon032c5e82015-12-21 11:26:06 -06002890static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002891 struct ibmvnic_sub_crq_queue *scrq,
2892 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002893{
2894 struct device *dev = &adapter->vdev->dev;
2895 long rc;
2896
2897 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2898
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002899 if (do_h_free) {
2900 /* Close the sub-crqs */
2901 do {
2902 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2903 adapter->vdev->unit_address,
2904 scrq->crq_num);
2905 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002906
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002907 if (rc) {
2908 netdev_err(adapter->netdev,
2909 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2910 scrq->crq_num, rc);
2911 }
Thomas Falconffa73852017-04-19 13:44:29 -04002912 }
2913
Thomas Falcon032c5e82015-12-21 11:26:06 -06002914 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2915 DMA_BIDIRECTIONAL);
2916 free_pages((unsigned long)scrq->msgs, 2);
2917 kfree(scrq);
2918}
2919
2920static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2921 *adapter)
2922{
2923 struct device *dev = &adapter->vdev->dev;
2924 struct ibmvnic_sub_crq_queue *scrq;
2925 int rc;
2926
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002927 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002928 if (!scrq)
2929 return NULL;
2930
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04002931 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002932 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002933 if (!scrq->msgs) {
2934 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2935 goto zero_page_failed;
2936 }
2937
2938 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2939 DMA_BIDIRECTIONAL);
2940 if (dma_mapping_error(dev, scrq->msg_token)) {
2941 dev_warn(dev, "Couldn't map crq queue messages page\n");
2942 goto map_failed;
2943 }
2944
2945 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2946 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2947
2948 if (rc == H_RESOURCE)
2949 rc = ibmvnic_reset_crq(adapter);
2950
2951 if (rc == H_CLOSED) {
2952 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2953 } else if (rc) {
2954 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2955 goto reg_failed;
2956 }
2957
Thomas Falcon032c5e82015-12-21 11:26:06 -06002958 scrq->adapter = adapter;
2959 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002960 spin_lock_init(&scrq->lock);
2961
2962 netdev_dbg(adapter->netdev,
2963 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2964 scrq->crq_num, scrq->hw_irq, scrq->irq);
2965
2966 return scrq;
2967
Thomas Falcon032c5e82015-12-21 11:26:06 -06002968reg_failed:
2969 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2970 DMA_BIDIRECTIONAL);
2971map_failed:
2972 free_pages((unsigned long)scrq->msgs, 2);
2973zero_page_failed:
2974 kfree(scrq);
2975
2976 return NULL;
2977}
2978
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002979static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002980{
2981 int i;
2982
2983 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002984 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002985 if (!adapter->tx_scrq[i])
2986 continue;
2987
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002988 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2989 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002990 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002991 free_irq(adapter->tx_scrq[i]->irq,
2992 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002993 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002994 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002995 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002996
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002997 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2998 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002999 }
3000
Nathan Fontenot9501df32017-03-15 23:38:07 -04003001 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003002 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003003 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003004 }
3005
3006 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003007 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003008 if (!adapter->rx_scrq[i])
3009 continue;
3010
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003011 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3012 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003013 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003014 free_irq(adapter->rx_scrq[i]->irq,
3015 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003016 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003017 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003018 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003019
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003020 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3021 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003022 }
3023
Nathan Fontenot9501df32017-03-15 23:38:07 -04003024 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003025 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003026 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003027 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003028}
3029
3030static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3031 struct ibmvnic_sub_crq_queue *scrq)
3032{
3033 struct device *dev = &adapter->vdev->dev;
3034 unsigned long rc;
3035
3036 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3037 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3038 if (rc)
3039 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3040 scrq->hw_irq, rc);
3041 return rc;
3042}
3043
3044static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3045 struct ibmvnic_sub_crq_queue *scrq)
3046{
3047 struct device *dev = &adapter->vdev->dev;
3048 unsigned long rc;
3049
3050 if (scrq->hw_irq > 0x100000000ULL) {
3051 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3052 return 1;
3053 }
3054
Juliet Kim7ed5b312019-09-20 16:11:23 -04003055 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003056 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003057 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003058
Juliet Kim284f87d2019-11-20 10:50:03 -05003059 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003060 /* H_EOI would fail with rc = H_FUNCTION when running
3061 * in XIVE mode which is expected, but not an error.
3062 */
3063 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003064 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3065 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003066 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003067
Thomas Falcon032c5e82015-12-21 11:26:06 -06003068 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3069 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3070 if (rc)
3071 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3072 scrq->hw_irq, rc);
3073 return rc;
3074}
3075
3076static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3077 struct ibmvnic_sub_crq_queue *scrq)
3078{
3079 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003080 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003081 struct ibmvnic_tx_buff *txbuff;
3082 union sub_crq *next;
3083 int index;
3084 int i, j;
3085
3086restart_loop:
3087 while (pending_scrq(adapter, scrq)) {
3088 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003089 int num_entries = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003090
3091 next = ibmvnic_next_scrq(adapter, scrq);
3092 for (i = 0; i < next->tx_comp.num_comps; i++) {
3093 if (next->tx_comp.rcs[i]) {
3094 dev_err(dev, "tx error %x\n",
3095 next->tx_comp.rcs[i]);
3096 continue;
3097 }
3098 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003099 if (index & IBMVNIC_TSO_POOL_MASK) {
3100 tx_pool = &adapter->tso_pool[pool];
3101 index &= ~IBMVNIC_TSO_POOL_MASK;
3102 } else {
3103 tx_pool = &adapter->tx_pool[pool];
3104 }
3105
3106 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon032c5e82015-12-21 11:26:06 -06003107
3108 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3109 if (!txbuff->data_dma[j])
3110 continue;
3111
3112 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003113 }
3114
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003115 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003116 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003117 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003118 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003119
Thomas Falconffc385b2018-02-18 10:08:41 -06003120 num_entries += txbuff->num_entries;
3121
Thomas Falcon06b3e352018-03-16 20:00:28 -05003122 tx_pool->free_map[tx_pool->producer_index] = index;
3123 tx_pool->producer_index =
3124 (tx_pool->producer_index + 1) %
3125 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003126 }
3127 /* remove tx_comp scrq*/
3128 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003129
Thomas Falconffc385b2018-02-18 10:08:41 -06003130 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003131 (adapter->req_tx_entries_per_subcrq / 2) &&
3132 __netif_subqueue_stopped(adapter->netdev,
3133 scrq->pool_index)) {
3134 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003135 netdev_dbg(adapter->netdev, "Started queue %d\n",
3136 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003137 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003138 }
3139
3140 enable_scrq_irq(adapter, scrq);
3141
3142 if (pending_scrq(adapter, scrq)) {
3143 disable_scrq_irq(adapter, scrq);
3144 goto restart_loop;
3145 }
3146
3147 return 0;
3148}
3149
3150static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3151{
3152 struct ibmvnic_sub_crq_queue *scrq = instance;
3153 struct ibmvnic_adapter *adapter = scrq->adapter;
3154
3155 disable_scrq_irq(adapter, scrq);
3156 ibmvnic_complete_tx(adapter, scrq);
3157
3158 return IRQ_HANDLED;
3159}
3160
3161static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3162{
3163 struct ibmvnic_sub_crq_queue *scrq = instance;
3164 struct ibmvnic_adapter *adapter = scrq->adapter;
3165
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003166 /* When booting a kdump kernel we can hit pending interrupts
3167 * prior to completing driver initialization.
3168 */
3169 if (unlikely(adapter->state != VNIC_OPEN))
3170 return IRQ_NONE;
3171
John Allen3d52b592017-08-02 16:44:14 -05003172 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3173
Thomas Falcon032c5e82015-12-21 11:26:06 -06003174 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3175 disable_scrq_irq(adapter, scrq);
3176 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3177 }
3178
3179 return IRQ_HANDLED;
3180}
3181
Thomas Falconea22d512016-07-06 15:35:17 -05003182static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3183{
3184 struct device *dev = &adapter->vdev->dev;
3185 struct ibmvnic_sub_crq_queue *scrq;
3186 int i = 0, j = 0;
3187 int rc = 0;
3188
3189 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003190 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3191 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003192 scrq = adapter->tx_scrq[i];
3193 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3194
Michael Ellerman99c17902016-09-10 19:59:05 +10003195 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003196 rc = -EINVAL;
3197 dev_err(dev, "Error mapping irq\n");
3198 goto req_tx_irq_failed;
3199 }
3200
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003201 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3202 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003203 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003204 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003205
3206 if (rc) {
3207 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3208 scrq->irq, rc);
3209 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003210 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003211 }
3212 }
3213
3214 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003215 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3216 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003217 scrq = adapter->rx_scrq[i];
3218 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003219 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003220 rc = -EINVAL;
3221 dev_err(dev, "Error mapping irq\n");
3222 goto req_rx_irq_failed;
3223 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003224 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3225 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003226 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003227 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003228 if (rc) {
3229 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3230 scrq->irq, rc);
3231 irq_dispose_mapping(scrq->irq);
3232 goto req_rx_irq_failed;
3233 }
3234 }
3235 return rc;
3236
3237req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003238 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003239 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3240 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003241 }
Thomas Falconea22d512016-07-06 15:35:17 -05003242 i = adapter->req_tx_queues;
3243req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003244 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003245 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003246 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003247 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003248 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003249 return rc;
3250}
3251
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003252static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003253{
3254 struct device *dev = &adapter->vdev->dev;
3255 struct ibmvnic_sub_crq_queue **allqueues;
3256 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003257 int total_queues;
3258 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003259 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003260
Thomas Falcon032c5e82015-12-21 11:26:06 -06003261 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3262
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003263 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003264 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003265 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003266
3267 for (i = 0; i < total_queues; i++) {
3268 allqueues[i] = init_sub_crq_queue(adapter);
3269 if (!allqueues[i]) {
3270 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3271 break;
3272 }
3273 registered_queues++;
3274 }
3275
3276 /* Make sure we were able to register the minimum number of queues */
3277 if (registered_queues <
3278 adapter->min_tx_queues + adapter->min_rx_queues) {
3279 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3280 goto tx_failed;
3281 }
3282
3283 /* Distribute the failed allocated queues*/
3284 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3285 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3286 switch (i % 3) {
3287 case 0:
3288 if (adapter->req_rx_queues > adapter->min_rx_queues)
3289 adapter->req_rx_queues--;
3290 else
3291 more++;
3292 break;
3293 case 1:
3294 if (adapter->req_tx_queues > adapter->min_tx_queues)
3295 adapter->req_tx_queues--;
3296 else
3297 more++;
3298 break;
3299 }
3300 }
3301
3302 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003303 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003304 if (!adapter->tx_scrq)
3305 goto tx_failed;
3306
3307 for (i = 0; i < adapter->req_tx_queues; i++) {
3308 adapter->tx_scrq[i] = allqueues[i];
3309 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003310 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003311 }
3312
3313 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003314 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003315 if (!adapter->rx_scrq)
3316 goto rx_failed;
3317
3318 for (i = 0; i < adapter->req_rx_queues; i++) {
3319 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3320 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003321 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003322 }
3323
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003324 kfree(allqueues);
3325 return 0;
3326
3327rx_failed:
3328 kfree(adapter->tx_scrq);
3329 adapter->tx_scrq = NULL;
3330tx_failed:
3331 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003332 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003333 kfree(allqueues);
3334 return -1;
3335}
3336
Lijun Pan09081b92020-09-27 20:13:27 -05003337static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003338{
3339 struct device *dev = &adapter->vdev->dev;
3340 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003341 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003342
3343 if (!retry) {
3344 /* Sub-CRQ entries are 32 byte long */
3345 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3346
3347 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3348 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3349 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3350 return;
3351 }
3352
John Allenc26eba02017-10-26 16:23:25 -05003353 if (adapter->desired.mtu)
3354 adapter->req_mtu = adapter->desired.mtu;
3355 else
3356 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003357
John Allenc26eba02017-10-26 16:23:25 -05003358 if (!adapter->desired.tx_entries)
3359 adapter->desired.tx_entries =
3360 adapter->max_tx_entries_per_subcrq;
3361 if (!adapter->desired.rx_entries)
3362 adapter->desired.rx_entries =
3363 adapter->max_rx_add_entries_per_subcrq;
3364
3365 max_entries = IBMVNIC_MAX_LTB_SIZE /
3366 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3367
3368 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3369 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3370 adapter->desired.tx_entries = max_entries;
3371 }
3372
3373 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3374 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3375 adapter->desired.rx_entries = max_entries;
3376 }
3377
3378 if (adapter->desired.tx_entries)
3379 adapter->req_tx_entries_per_subcrq =
3380 adapter->desired.tx_entries;
3381 else
3382 adapter->req_tx_entries_per_subcrq =
3383 adapter->max_tx_entries_per_subcrq;
3384
3385 if (adapter->desired.rx_entries)
3386 adapter->req_rx_add_entries_per_subcrq =
3387 adapter->desired.rx_entries;
3388 else
3389 adapter->req_rx_add_entries_per_subcrq =
3390 adapter->max_rx_add_entries_per_subcrq;
3391
3392 if (adapter->desired.tx_queues)
3393 adapter->req_tx_queues =
3394 adapter->desired.tx_queues;
3395 else
3396 adapter->req_tx_queues =
3397 adapter->opt_tx_comp_sub_queues;
3398
3399 if (adapter->desired.rx_queues)
3400 adapter->req_rx_queues =
3401 adapter->desired.rx_queues;
3402 else
3403 adapter->req_rx_queues =
3404 adapter->opt_rx_comp_queues;
3405
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003406 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003407 }
3408
Thomas Falcon032c5e82015-12-21 11:26:06 -06003409 memset(&crq, 0, sizeof(crq));
3410 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3411 crq.request_capability.cmd = REQUEST_CAPABILITY;
3412
3413 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003414 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003415 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003416 ibmvnic_send_crq(adapter, &crq);
3417
3418 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003419 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003420 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003421 ibmvnic_send_crq(adapter, &crq);
3422
3423 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003424 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003425 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003426 ibmvnic_send_crq(adapter, &crq);
3427
3428 crq.request_capability.capability =
3429 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3430 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003431 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003432 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003433 ibmvnic_send_crq(adapter, &crq);
3434
3435 crq.request_capability.capability =
3436 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3437 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003438 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003439 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003440 ibmvnic_send_crq(adapter, &crq);
3441
3442 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003443 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003444 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003445 ibmvnic_send_crq(adapter, &crq);
3446
3447 if (adapter->netdev->flags & IFF_PROMISC) {
3448 if (adapter->promisc_supported) {
3449 crq.request_capability.capability =
3450 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003451 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003452 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003453 ibmvnic_send_crq(adapter, &crq);
3454 }
3455 } else {
3456 crq.request_capability.capability =
3457 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003458 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003459 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003460 ibmvnic_send_crq(adapter, &crq);
3461 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003462}
3463
3464static int pending_scrq(struct ibmvnic_adapter *adapter,
3465 struct ibmvnic_sub_crq_queue *scrq)
3466{
3467 union sub_crq *entry = &scrq->msgs[scrq->cur];
3468
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05003469 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003470 return 1;
3471 else
3472 return 0;
3473}
3474
3475static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3476 struct ibmvnic_sub_crq_queue *scrq)
3477{
3478 union sub_crq *entry;
3479 unsigned long flags;
3480
3481 spin_lock_irqsave(&scrq->lock, flags);
3482 entry = &scrq->msgs[scrq->cur];
3483 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3484 if (++scrq->cur == scrq->size)
3485 scrq->cur = 0;
3486 } else {
3487 entry = NULL;
3488 }
3489 spin_unlock_irqrestore(&scrq->lock, flags);
3490
3491 return entry;
3492}
3493
3494static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3495{
3496 struct ibmvnic_crq_queue *queue = &adapter->crq;
3497 union ibmvnic_crq *crq;
3498
3499 crq = &queue->msgs[queue->cur];
3500 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3501 if (++queue->cur == queue->size)
3502 queue->cur = 0;
3503 } else {
3504 crq = NULL;
3505 }
3506
3507 return crq;
3508}
3509
Thomas Falcon2d14d372018-07-13 12:03:32 -05003510static void print_subcrq_error(struct device *dev, int rc, const char *func)
3511{
3512 switch (rc) {
3513 case H_PARAMETER:
3514 dev_warn_ratelimited(dev,
3515 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3516 func, rc);
3517 break;
3518 case H_CLOSED:
3519 dev_warn_ratelimited(dev,
3520 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3521 func, rc);
3522 break;
3523 default:
3524 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3525 break;
3526 }
3527}
3528
Thomas Falcon032c5e82015-12-21 11:26:06 -06003529static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3530 union sub_crq *sub_crq)
3531{
3532 unsigned int ua = adapter->vdev->unit_address;
3533 struct device *dev = &adapter->vdev->dev;
3534 u64 *u64_crq = (u64 *)sub_crq;
3535 int rc;
3536
3537 netdev_dbg(adapter->netdev,
3538 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3539 (unsigned long int)cpu_to_be64(remote_handle),
3540 (unsigned long int)cpu_to_be64(u64_crq[0]),
3541 (unsigned long int)cpu_to_be64(u64_crq[1]),
3542 (unsigned long int)cpu_to_be64(u64_crq[2]),
3543 (unsigned long int)cpu_to_be64(u64_crq[3]));
3544
3545 /* Make sure the hypervisor sees the complete request */
3546 mb();
3547
3548 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3549 cpu_to_be64(remote_handle),
3550 cpu_to_be64(u64_crq[0]),
3551 cpu_to_be64(u64_crq[1]),
3552 cpu_to_be64(u64_crq[2]),
3553 cpu_to_be64(u64_crq[3]));
3554
Thomas Falcon2d14d372018-07-13 12:03:32 -05003555 if (rc)
3556 print_subcrq_error(dev, rc, __func__);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003557
3558 return rc;
3559}
3560
Thomas Falconad7775d2016-04-01 17:20:34 -05003561static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3562 u64 remote_handle, u64 ioba, u64 num_entries)
3563{
3564 unsigned int ua = adapter->vdev->unit_address;
3565 struct device *dev = &adapter->vdev->dev;
3566 int rc;
3567
3568 /* Make sure the hypervisor sees the complete request */
3569 mb();
3570 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3571 cpu_to_be64(remote_handle),
3572 ioba, num_entries);
3573
Thomas Falcon2d14d372018-07-13 12:03:32 -05003574 if (rc)
3575 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003576
3577 return rc;
3578}
3579
Thomas Falcon032c5e82015-12-21 11:26:06 -06003580static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3581 union ibmvnic_crq *crq)
3582{
3583 unsigned int ua = adapter->vdev->unit_address;
3584 struct device *dev = &adapter->vdev->dev;
3585 u64 *u64_crq = (u64 *)crq;
3586 int rc;
3587
3588 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3589 (unsigned long int)cpu_to_be64(u64_crq[0]),
3590 (unsigned long int)cpu_to_be64(u64_crq[1]));
3591
Thomas Falcon51536982018-05-23 13:37:56 -05003592 if (!adapter->crq.active &&
3593 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3594 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3595 return -EINVAL;
3596 }
3597
Thomas Falcon032c5e82015-12-21 11:26:06 -06003598 /* Make sure the hypervisor sees the complete request */
3599 mb();
3600
3601 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3602 cpu_to_be64(u64_crq[0]),
3603 cpu_to_be64(u64_crq[1]));
3604
3605 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003606 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003607 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05003608 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003609 }
3610
Thomas Falcon032c5e82015-12-21 11:26:06 -06003611 dev_warn(dev, "Send error (rc=%d)\n", rc);
3612 }
3613
3614 return rc;
3615}
3616
3617static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3618{
Thomas Falcon36a782f2020-08-31 11:59:57 -05003619 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003620 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05003621 int retries = 100;
3622 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003623
3624 memset(&crq, 0, sizeof(crq));
3625 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3626 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3627 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3628
Thomas Falcon36a782f2020-08-31 11:59:57 -05003629 do {
3630 rc = ibmvnic_send_crq(adapter, &crq);
3631 if (rc != H_CLOSED)
3632 break;
3633 retries--;
3634 msleep(50);
3635
3636 } while (retries > 0);
3637
3638 if (rc) {
3639 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3640 return rc;
3641 }
3642
3643 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003644}
3645
Thomas Falcon032c5e82015-12-21 11:26:06 -06003646static int send_version_xchg(struct ibmvnic_adapter *adapter)
3647{
3648 union ibmvnic_crq crq;
3649
3650 memset(&crq, 0, sizeof(crq));
3651 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3652 crq.version_exchange.cmd = VERSION_EXCHANGE;
3653 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3654
3655 return ibmvnic_send_crq(adapter, &crq);
3656}
3657
Nathan Fontenot37798d02017-11-08 11:23:56 -06003658struct vnic_login_client_data {
3659 u8 type;
3660 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003661 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003662} __packed;
3663
3664static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3665{
3666 int len;
3667
3668 /* Calculate the amount of buffer space needed for the
3669 * vnic client data in the login buffer. There are four entries,
3670 * OS name, LPAR name, device name, and a null last entry.
3671 */
3672 len = 4 * sizeof(struct vnic_login_client_data);
3673 len += 6; /* "Linux" plus NULL */
3674 len += strlen(utsname()->nodename) + 1;
3675 len += strlen(adapter->netdev->name) + 1;
3676
3677 return len;
3678}
3679
3680static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3681 struct vnic_login_client_data *vlcd)
3682{
3683 const char *os_name = "Linux";
3684 int len;
3685
3686 /* Type 1 - LPAR OS */
3687 vlcd->type = 1;
3688 len = strlen(os_name) + 1;
3689 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003690 strncpy(vlcd->name, os_name, len);
3691 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003692
3693 /* Type 2 - LPAR name */
3694 vlcd->type = 2;
3695 len = strlen(utsname()->nodename) + 1;
3696 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003697 strncpy(vlcd->name, utsname()->nodename, len);
3698 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003699
3700 /* Type 3 - device name */
3701 vlcd->type = 3;
3702 len = strlen(adapter->netdev->name) + 1;
3703 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003704 strncpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003705}
3706
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003707static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003708{
3709 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3710 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003711 struct device *dev = &adapter->vdev->dev;
3712 dma_addr_t rsp_buffer_token;
3713 dma_addr_t buffer_token;
3714 size_t rsp_buffer_size;
3715 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003716 size_t buffer_size;
3717 __be64 *tx_list_p;
3718 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003719 int client_data_len;
3720 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003721 int i;
3722
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003723 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3724 netdev_err(adapter->netdev,
3725 "RX or TX queues are not allocated, device login failed\n");
3726 return -1;
3727 }
3728
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003729 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003730 client_data_len = vnic_client_data_len(adapter);
3731
Thomas Falcon032c5e82015-12-21 11:26:06 -06003732 buffer_size =
3733 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003734 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3735 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003736
Nathan Fontenot37798d02017-11-08 11:23:56 -06003737 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003738 if (!login_buffer)
3739 goto buf_alloc_failed;
3740
3741 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3742 DMA_TO_DEVICE);
3743 if (dma_mapping_error(dev, buffer_token)) {
3744 dev_err(dev, "Couldn't map login buffer\n");
3745 goto buf_map_failed;
3746 }
3747
John Allen498cd8e2016-04-06 11:49:55 -05003748 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3749 sizeof(u64) * adapter->req_tx_queues +
3750 sizeof(u64) * adapter->req_rx_queues +
3751 sizeof(u64) * adapter->req_rx_queues +
3752 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003753
3754 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3755 if (!login_rsp_buffer)
3756 goto buf_rsp_alloc_failed;
3757
3758 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3759 rsp_buffer_size, DMA_FROM_DEVICE);
3760 if (dma_mapping_error(dev, rsp_buffer_token)) {
3761 dev_err(dev, "Couldn't map login rsp buffer\n");
3762 goto buf_rsp_map_failed;
3763 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003764
Thomas Falcon032c5e82015-12-21 11:26:06 -06003765 adapter->login_buf = login_buffer;
3766 adapter->login_buf_token = buffer_token;
3767 adapter->login_buf_sz = buffer_size;
3768 adapter->login_rsp_buf = login_rsp_buffer;
3769 adapter->login_rsp_buf_token = rsp_buffer_token;
3770 adapter->login_rsp_buf_sz = rsp_buffer_size;
3771
3772 login_buffer->len = cpu_to_be32(buffer_size);
3773 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3774 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3775 login_buffer->off_txcomp_subcrqs =
3776 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3777 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3778 login_buffer->off_rxcomp_subcrqs =
3779 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3780 sizeof(u64) * adapter->req_tx_queues);
3781 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3782 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3783
3784 tx_list_p = (__be64 *)((char *)login_buffer +
3785 sizeof(struct ibmvnic_login_buffer));
3786 rx_list_p = (__be64 *)((char *)login_buffer +
3787 sizeof(struct ibmvnic_login_buffer) +
3788 sizeof(u64) * adapter->req_tx_queues);
3789
3790 for (i = 0; i < adapter->req_tx_queues; i++) {
3791 if (adapter->tx_scrq[i]) {
3792 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3793 crq_num);
3794 }
3795 }
3796
3797 for (i = 0; i < adapter->req_rx_queues; i++) {
3798 if (adapter->rx_scrq[i]) {
3799 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3800 crq_num);
3801 }
3802 }
3803
Nathan Fontenot37798d02017-11-08 11:23:56 -06003804 /* Insert vNIC login client data */
3805 vlcd = (struct vnic_login_client_data *)
3806 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3807 login_buffer->client_data_offset =
3808 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3809 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3810
3811 vnic_add_client_data(adapter, vlcd);
3812
Thomas Falcon032c5e82015-12-21 11:26:06 -06003813 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3814 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3815 netdev_dbg(adapter->netdev, "%016lx\n",
3816 ((unsigned long int *)(adapter->login_buf))[i]);
3817 }
3818
3819 memset(&crq, 0, sizeof(crq));
3820 crq.login.first = IBMVNIC_CRQ_CMD;
3821 crq.login.cmd = LOGIN;
3822 crq.login.ioba = cpu_to_be32(buffer_token);
3823 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003824 ibmvnic_send_crq(adapter, &crq);
3825
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003826 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003827
Thomas Falcon032c5e82015-12-21 11:26:06 -06003828buf_rsp_map_failed:
3829 kfree(login_rsp_buffer);
3830buf_rsp_alloc_failed:
3831 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3832buf_map_failed:
3833 kfree(login_buffer);
3834buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003835 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003836}
3837
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003838static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3839 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003840{
3841 union ibmvnic_crq crq;
3842
3843 memset(&crq, 0, sizeof(crq));
3844 crq.request_map.first = IBMVNIC_CRQ_CMD;
3845 crq.request_map.cmd = REQUEST_MAP;
3846 crq.request_map.map_id = map_id;
3847 crq.request_map.ioba = cpu_to_be32(addr);
3848 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003849 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003850}
3851
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003852static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003853{
3854 union ibmvnic_crq crq;
3855
3856 memset(&crq, 0, sizeof(crq));
3857 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3858 crq.request_unmap.cmd = REQUEST_UNMAP;
3859 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003860 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003861}
3862
Lijun Pan69980d02020-09-27 20:13:28 -05003863static void send_query_map(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003864{
3865 union ibmvnic_crq crq;
3866
3867 memset(&crq, 0, sizeof(crq));
3868 crq.query_map.first = IBMVNIC_CRQ_CMD;
3869 crq.query_map.cmd = QUERY_MAP;
3870 ibmvnic_send_crq(adapter, &crq);
3871}
3872
3873/* Send a series of CRQs requesting various capabilities of the VNIC server */
Lijun Pan491099a2020-09-27 20:13:26 -05003874static void send_query_cap(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003875{
3876 union ibmvnic_crq crq;
3877
Thomas Falcon901e0402017-02-15 12:17:59 -06003878 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003879 memset(&crq, 0, sizeof(crq));
3880 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3881 crq.query_capability.cmd = QUERY_CAPABILITY;
3882
3883 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003884 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003885 ibmvnic_send_crq(adapter, &crq);
3886
3887 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003888 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003889 ibmvnic_send_crq(adapter, &crq);
3890
3891 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003892 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003893 ibmvnic_send_crq(adapter, &crq);
3894
3895 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003896 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003897 ibmvnic_send_crq(adapter, &crq);
3898
3899 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003900 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003901 ibmvnic_send_crq(adapter, &crq);
3902
3903 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003904 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003905 ibmvnic_send_crq(adapter, &crq);
3906
3907 crq.query_capability.capability =
3908 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003909 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003910 ibmvnic_send_crq(adapter, &crq);
3911
3912 crq.query_capability.capability =
3913 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003914 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003915 ibmvnic_send_crq(adapter, &crq);
3916
3917 crq.query_capability.capability =
3918 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003919 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003920 ibmvnic_send_crq(adapter, &crq);
3921
3922 crq.query_capability.capability =
3923 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003924 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003925 ibmvnic_send_crq(adapter, &crq);
3926
3927 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06003928 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003929 ibmvnic_send_crq(adapter, &crq);
3930
3931 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003932 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003933 ibmvnic_send_crq(adapter, &crq);
3934
3935 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003936 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003937 ibmvnic_send_crq(adapter, &crq);
3938
3939 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003940 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003941 ibmvnic_send_crq(adapter, &crq);
3942
3943 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06003944 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003945 ibmvnic_send_crq(adapter, &crq);
3946
3947 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06003948 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003949 ibmvnic_send_crq(adapter, &crq);
3950
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003951 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3952 atomic_inc(&adapter->running_cap_crqs);
3953 ibmvnic_send_crq(adapter, &crq);
3954
Thomas Falcon032c5e82015-12-21 11:26:06 -06003955 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003956 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003957 ibmvnic_send_crq(adapter, &crq);
3958
3959 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003960 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003961 ibmvnic_send_crq(adapter, &crq);
3962
3963 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003964 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003965 ibmvnic_send_crq(adapter, &crq);
3966
3967 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003968 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003969 ibmvnic_send_crq(adapter, &crq);
3970
3971 crq.query_capability.capability =
3972 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06003973 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003974 ibmvnic_send_crq(adapter, &crq);
3975
3976 crq.query_capability.capability =
3977 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003978 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003979 ibmvnic_send_crq(adapter, &crq);
3980
3981 crq.query_capability.capability =
3982 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003983 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003984 ibmvnic_send_crq(adapter, &crq);
3985
3986 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003987 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003988 ibmvnic_send_crq(adapter, &crq);
3989}
3990
Lijun Pan16e811f2020-09-27 20:13:29 -05003991static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
3992{
3993 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3994 struct device *dev = &adapter->vdev->dev;
3995 union ibmvnic_crq crq;
3996
3997 adapter->ip_offload_tok =
3998 dma_map_single(dev,
3999 &adapter->ip_offload_buf,
4000 buf_sz,
4001 DMA_FROM_DEVICE);
4002
4003 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4004 if (!firmware_has_feature(FW_FEATURE_CMO))
4005 dev_err(dev, "Couldn't map offload buffer\n");
4006 return;
4007 }
4008
4009 memset(&crq, 0, sizeof(crq));
4010 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4011 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4012 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4013 crq.query_ip_offload.ioba =
4014 cpu_to_be32(adapter->ip_offload_tok);
4015
4016 ibmvnic_send_crq(adapter, &crq);
4017}
4018
Lijun Pan46899bd2020-09-27 20:13:30 -05004019static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4020{
4021 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4022 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4023 struct device *dev = &adapter->vdev->dev;
4024 netdev_features_t old_hw_features = 0;
4025 union ibmvnic_crq crq;
4026
4027 adapter->ip_offload_ctrl_tok =
4028 dma_map_single(dev,
4029 ctrl_buf,
4030 sizeof(adapter->ip_offload_ctrl),
4031 DMA_TO_DEVICE);
4032
4033 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4034 dev_err(dev, "Couldn't map ip offload control buffer\n");
4035 return;
4036 }
4037
4038 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4039 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4040 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4041 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4042 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4043 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4044 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4045 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4046 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4047 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4048
4049 /* large_rx disabled for now, additional features needed */
4050 ctrl_buf->large_rx_ipv4 = 0;
4051 ctrl_buf->large_rx_ipv6 = 0;
4052
4053 if (adapter->state != VNIC_PROBING) {
4054 old_hw_features = adapter->netdev->hw_features;
4055 adapter->netdev->hw_features = 0;
4056 }
4057
4058 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4059
4060 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4061 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4062
4063 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4064 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4065
4066 if ((adapter->netdev->features &
4067 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4068 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4069
4070 if (buf->large_tx_ipv4)
4071 adapter->netdev->hw_features |= NETIF_F_TSO;
4072 if (buf->large_tx_ipv6)
4073 adapter->netdev->hw_features |= NETIF_F_TSO6;
4074
4075 if (adapter->state == VNIC_PROBING) {
4076 adapter->netdev->features |= adapter->netdev->hw_features;
4077 } else if (old_hw_features != adapter->netdev->hw_features) {
4078 netdev_features_t tmp = 0;
4079
4080 /* disable features no longer supported */
4081 adapter->netdev->features &= adapter->netdev->hw_features;
4082 /* turn on features now supported if previously enabled */
4083 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4084 adapter->netdev->hw_features;
4085 adapter->netdev->features |=
4086 tmp & adapter->netdev->wanted_features;
4087 }
4088
4089 memset(&crq, 0, sizeof(crq));
4090 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4091 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4092 crq.control_ip_offload.len =
4093 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4094 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4095 ibmvnic_send_crq(adapter, &crq);
4096}
4097
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004098static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4099 struct ibmvnic_adapter *adapter)
4100{
4101 struct device *dev = &adapter->vdev->dev;
4102
4103 if (crq->get_vpd_size_rsp.rc.code) {
4104 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4105 crq->get_vpd_size_rsp.rc.code);
4106 complete(&adapter->fw_done);
4107 return;
4108 }
4109
4110 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4111 complete(&adapter->fw_done);
4112}
4113
4114static void handle_vpd_rsp(union ibmvnic_crq *crq,
4115 struct ibmvnic_adapter *adapter)
4116{
4117 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004118 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004119 u8 fw_level_len = 0;
4120
4121 memset(adapter->fw_version, 0, 32);
4122
4123 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4124 DMA_FROM_DEVICE);
4125
4126 if (crq->get_vpd_rsp.rc.code) {
4127 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4128 crq->get_vpd_rsp.rc.code);
4129 goto complete;
4130 }
4131
4132 /* get the position of the firmware version info
4133 * located after the ASCII 'RM' substring in the buffer
4134 */
4135 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4136 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02004137 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004138 goto complete;
4139 }
4140
4141 /* get length of firmware level ASCII substring */
4142 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4143 fw_level_len = *(substr + 2);
4144 } else {
4145 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4146 goto complete;
4147 }
4148
4149 /* copy firmware version string from vpd into adapter */
4150 if ((substr + 3 + fw_level_len) <
4151 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004152 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004153 } else {
4154 dev_info(dev, "FW substr extrapolated VPD buff\n");
4155 }
4156
4157complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004158 if (adapter->fw_version[0] == '\0')
4159 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004160 complete(&adapter->fw_done);
4161}
4162
Thomas Falcon032c5e82015-12-21 11:26:06 -06004163static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4164{
4165 struct device *dev = &adapter->vdev->dev;
4166 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004167 int i;
4168
4169 dma_unmap_single(dev, adapter->ip_offload_tok,
4170 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4171
4172 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4173 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4174 netdev_dbg(adapter->netdev, "%016lx\n",
4175 ((unsigned long int *)(buf))[i]);
4176
4177 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4178 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4179 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4180 buf->tcp_ipv4_chksum);
4181 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4182 buf->tcp_ipv6_chksum);
4183 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4184 buf->udp_ipv4_chksum);
4185 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4186 buf->udp_ipv6_chksum);
4187 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4188 buf->large_tx_ipv4);
4189 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4190 buf->large_tx_ipv6);
4191 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4192 buf->large_rx_ipv4);
4193 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4194 buf->large_rx_ipv6);
4195 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4196 buf->max_ipv4_header_size);
4197 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4198 buf->max_ipv6_header_size);
4199 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4200 buf->max_tcp_header_size);
4201 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4202 buf->max_udp_header_size);
4203 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4204 buf->max_large_tx_size);
4205 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4206 buf->max_large_rx_size);
4207 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4208 buf->ipv6_extension_header);
4209 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4210 buf->tcp_pseudosum_req);
4211 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4212 buf->num_ipv6_ext_headers);
4213 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4214 buf->off_ipv6_ext_headers);
4215
Lijun Pan46899bd2020-09-27 20:13:30 -05004216 send_control_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004217}
4218
Thomas Falconc9008d32018-08-06 21:39:59 -05004219static const char *ibmvnic_fw_err_cause(u16 cause)
4220{
4221 switch (cause) {
4222 case ADAPTER_PROBLEM:
4223 return "adapter problem";
4224 case BUS_PROBLEM:
4225 return "bus problem";
4226 case FW_PROBLEM:
4227 return "firmware problem";
4228 case DD_PROBLEM:
4229 return "device driver problem";
4230 case EEH_RECOVERY:
4231 return "EEH recovery";
4232 case FW_UPDATED:
4233 return "firmware updated";
4234 case LOW_MEMORY:
4235 return "low Memory";
4236 default:
4237 return "unknown";
4238 }
4239}
4240
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004241static void handle_error_indication(union ibmvnic_crq *crq,
4242 struct ibmvnic_adapter *adapter)
4243{
4244 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004245 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004246
Thomas Falconc9008d32018-08-06 21:39:59 -05004247 cause = be16_to_cpu(crq->error_indication.error_cause);
4248
4249 dev_warn_ratelimited(dev,
4250 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4251 crq->error_indication.flags
4252 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4253 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004254
Nathan Fontenoted651a12017-05-03 14:04:38 -04004255 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4256 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004257 else
4258 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004259}
4260
Thomas Falconf8136142018-01-29 13:45:05 -06004261static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4262 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004263{
4264 struct net_device *netdev = adapter->netdev;
4265 struct device *dev = &adapter->vdev->dev;
4266 long rc;
4267
4268 rc = crq->change_mac_addr_rsp.rc.code;
4269 if (rc) {
4270 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004271 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004272 }
Lijun Pand9b0e592020-10-20 17:39:19 -05004273 /* crq->change_mac_addr.mac_addr is the requested one
4274 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4275 */
Thomas Falcon62740e92019-05-09 23:13:43 -05004276 ether_addr_copy(netdev->dev_addr,
4277 &crq->change_mac_addr_rsp.mac_addr[0]);
Lijun Pand9b0e592020-10-20 17:39:19 -05004278 ether_addr_copy(adapter->mac_addr,
4279 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004280out:
4281 complete(&adapter->fw_done);
4282 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004283}
4284
4285static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4286 struct ibmvnic_adapter *adapter)
4287{
4288 struct device *dev = &adapter->vdev->dev;
4289 u64 *req_value;
4290 char *name;
4291
Thomas Falcon901e0402017-02-15 12:17:59 -06004292 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004293 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4294 case REQ_TX_QUEUES:
4295 req_value = &adapter->req_tx_queues;
4296 name = "tx";
4297 break;
4298 case REQ_RX_QUEUES:
4299 req_value = &adapter->req_rx_queues;
4300 name = "rx";
4301 break;
4302 case REQ_RX_ADD_QUEUES:
4303 req_value = &adapter->req_rx_add_queues;
4304 name = "rx_add";
4305 break;
4306 case REQ_TX_ENTRIES_PER_SUBCRQ:
4307 req_value = &adapter->req_tx_entries_per_subcrq;
4308 name = "tx_entries_per_subcrq";
4309 break;
4310 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4311 req_value = &adapter->req_rx_add_entries_per_subcrq;
4312 name = "rx_add_entries_per_subcrq";
4313 break;
4314 case REQ_MTU:
4315 req_value = &adapter->req_mtu;
4316 name = "mtu";
4317 break;
4318 case PROMISC_REQUESTED:
4319 req_value = &adapter->promisc;
4320 name = "promisc";
4321 break;
4322 default:
4323 dev_err(dev, "Got invalid cap request rsp %d\n",
4324 crq->request_capability.capability);
4325 return;
4326 }
4327
4328 switch (crq->request_capability_rsp.rc.code) {
4329 case SUCCESS:
4330 break;
4331 case PARTIALSUCCESS:
4332 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4333 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06004334 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06004335 number), name);
John Allene7913802018-01-18 16:27:12 -06004336
4337 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4338 REQ_MTU) {
4339 pr_err("mtu of %llu is not supported. Reverting.\n",
4340 *req_value);
4341 *req_value = adapter->fallback.mtu;
4342 } else {
4343 *req_value =
4344 be64_to_cpu(crq->request_capability_rsp.number);
4345 }
4346
Lijun Pan09081b92020-09-27 20:13:27 -05004347 send_request_cap(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004348 return;
4349 default:
4350 dev_err(dev, "Error %d in request cap rsp\n",
4351 crq->request_capability_rsp.rc.code);
4352 return;
4353 }
4354
4355 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004356 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon249168a2017-02-15 12:18:00 -06004357 adapter->wait_capability = false;
Lijun Pan16e811f2020-09-27 20:13:29 -05004358 send_query_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004359 }
4360}
4361
4362static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4363 struct ibmvnic_adapter *adapter)
4364{
4365 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004366 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004367 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4368 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004369 u64 *tx_handle_array;
4370 u64 *rx_handle_array;
4371 int num_tx_pools;
4372 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004373 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004374 int i;
4375
4376 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004377 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004378 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004379 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004380
John Allen498cd8e2016-04-06 11:49:55 -05004381 /* If the number of queues requested can't be allocated by the
4382 * server, the login response will return with code 1. We will need
4383 * to resend the login buffer with fewer queues requested.
4384 */
4385 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004386 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004387 complete(&adapter->init_done);
4388 return 0;
4389 }
4390
John Allenc26eba02017-10-26 16:23:25 -05004391 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4392
Thomas Falcon032c5e82015-12-21 11:26:06 -06004393 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4394 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4395 netdev_dbg(adapter->netdev, "%016lx\n",
4396 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4397 }
4398
4399 /* Sanity checks */
4400 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4401 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4402 adapter->req_rx_add_queues !=
4403 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4404 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4405 ibmvnic_remove(adapter->vdev);
4406 return -EIO;
4407 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004408 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4409 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4410 /* variable buffer sizes are not supported, so just read the
4411 * first entry.
4412 */
4413 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004414
4415 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4416 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4417
4418 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4419 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4420 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4421 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4422
4423 for (i = 0; i < num_tx_pools; i++)
4424 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4425
4426 for (i = 0; i < num_rx_pools; i++)
4427 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4428
Thomas Falcon507ebe62020-08-21 13:39:01 -05004429 adapter->num_active_tx_scrqs = num_tx_pools;
4430 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004431 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004432 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004433 complete(&adapter->init_done);
4434
Thomas Falcon032c5e82015-12-21 11:26:06 -06004435 return 0;
4436}
4437
Thomas Falcon032c5e82015-12-21 11:26:06 -06004438static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4439 struct ibmvnic_adapter *adapter)
4440{
4441 struct device *dev = &adapter->vdev->dev;
4442 long rc;
4443
4444 rc = crq->request_unmap_rsp.rc.code;
4445 if (rc)
4446 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4447}
4448
4449static void handle_query_map_rsp(union ibmvnic_crq *crq,
4450 struct ibmvnic_adapter *adapter)
4451{
4452 struct net_device *netdev = adapter->netdev;
4453 struct device *dev = &adapter->vdev->dev;
4454 long rc;
4455
4456 rc = crq->query_map_rsp.rc.code;
4457 if (rc) {
4458 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4459 return;
4460 }
4461 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4462 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4463 crq->query_map_rsp.free_pages);
4464}
4465
4466static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4467 struct ibmvnic_adapter *adapter)
4468{
4469 struct net_device *netdev = adapter->netdev;
4470 struct device *dev = &adapter->vdev->dev;
4471 long rc;
4472
Thomas Falcon901e0402017-02-15 12:17:59 -06004473 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004474 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004475 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004476 rc = crq->query_capability.rc.code;
4477 if (rc) {
4478 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4479 goto out;
4480 }
4481
4482 switch (be16_to_cpu(crq->query_capability.capability)) {
4483 case MIN_TX_QUEUES:
4484 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004485 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004486 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4487 adapter->min_tx_queues);
4488 break;
4489 case MIN_RX_QUEUES:
4490 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004491 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004492 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4493 adapter->min_rx_queues);
4494 break;
4495 case MIN_RX_ADD_QUEUES:
4496 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004497 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004498 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4499 adapter->min_rx_add_queues);
4500 break;
4501 case MAX_TX_QUEUES:
4502 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004503 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004504 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4505 adapter->max_tx_queues);
4506 break;
4507 case MAX_RX_QUEUES:
4508 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004509 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004510 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4511 adapter->max_rx_queues);
4512 break;
4513 case MAX_RX_ADD_QUEUES:
4514 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004515 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004516 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4517 adapter->max_rx_add_queues);
4518 break;
4519 case MIN_TX_ENTRIES_PER_SUBCRQ:
4520 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004521 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004522 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4523 adapter->min_tx_entries_per_subcrq);
4524 break;
4525 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4526 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004527 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004528 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4529 adapter->min_rx_add_entries_per_subcrq);
4530 break;
4531 case MAX_TX_ENTRIES_PER_SUBCRQ:
4532 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004533 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004534 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4535 adapter->max_tx_entries_per_subcrq);
4536 break;
4537 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4538 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004539 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004540 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4541 adapter->max_rx_add_entries_per_subcrq);
4542 break;
4543 case TCP_IP_OFFLOAD:
4544 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004545 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004546 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4547 adapter->tcp_ip_offload);
4548 break;
4549 case PROMISC_SUPPORTED:
4550 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004551 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004552 netdev_dbg(netdev, "promisc_supported = %lld\n",
4553 adapter->promisc_supported);
4554 break;
4555 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004556 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004557 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004558 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4559 break;
4560 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004561 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004562 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004563 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4564 break;
4565 case MAX_MULTICAST_FILTERS:
4566 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004567 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004568 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4569 adapter->max_multicast_filters);
4570 break;
4571 case VLAN_HEADER_INSERTION:
4572 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004573 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004574 if (adapter->vlan_header_insertion)
4575 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4576 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4577 adapter->vlan_header_insertion);
4578 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004579 case RX_VLAN_HEADER_INSERTION:
4580 adapter->rx_vlan_header_insertion =
4581 be64_to_cpu(crq->query_capability.number);
4582 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4583 adapter->rx_vlan_header_insertion);
4584 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004585 case MAX_TX_SG_ENTRIES:
4586 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004587 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004588 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4589 adapter->max_tx_sg_entries);
4590 break;
4591 case RX_SG_SUPPORTED:
4592 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004593 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004594 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4595 adapter->rx_sg_supported);
4596 break;
4597 case OPT_TX_COMP_SUB_QUEUES:
4598 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004599 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004600 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4601 adapter->opt_tx_comp_sub_queues);
4602 break;
4603 case OPT_RX_COMP_QUEUES:
4604 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004605 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004606 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4607 adapter->opt_rx_comp_queues);
4608 break;
4609 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4610 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004611 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004612 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4613 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4614 break;
4615 case OPT_TX_ENTRIES_PER_SUBCRQ:
4616 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004617 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004618 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4619 adapter->opt_tx_entries_per_subcrq);
4620 break;
4621 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4622 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004623 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004624 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4625 adapter->opt_rxba_entries_per_subcrq);
4626 break;
4627 case TX_RX_DESC_REQ:
4628 adapter->tx_rx_desc_req = crq->query_capability.number;
4629 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4630 adapter->tx_rx_desc_req);
4631 break;
4632
4633 default:
4634 netdev_err(netdev, "Got invalid cap rsp %d\n",
4635 crq->query_capability.capability);
4636 }
4637
4638out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004639 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4640 adapter->wait_capability = false;
Lijun Pan09081b92020-09-27 20:13:27 -05004641 send_request_cap(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004642 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004643}
4644
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004645static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4646{
4647 union ibmvnic_crq crq;
4648 int rc;
4649
4650 memset(&crq, 0, sizeof(crq));
4651 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4652 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004653
4654 mutex_lock(&adapter->fw_lock);
4655 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06004656 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004657
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004658 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004659 if (rc) {
4660 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004661 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004662 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004663
4664 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004665 if (rc) {
4666 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06004667 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004668 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004669
Thomas Falconff25dcb2019-11-25 17:12:56 -06004670 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004671 return adapter->fw_done_rc ? -EIO : 0;
4672}
4673
4674static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4675 struct ibmvnic_adapter *adapter)
4676{
4677 struct net_device *netdev = adapter->netdev;
4678 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004679 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004680
4681 rc = crq->query_phys_parms_rsp.rc.code;
4682 if (rc) {
4683 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4684 return rc;
4685 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004686 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004687 case IBMVNIC_10MBPS:
4688 adapter->speed = SPEED_10;
4689 break;
4690 case IBMVNIC_100MBPS:
4691 adapter->speed = SPEED_100;
4692 break;
4693 case IBMVNIC_1GBPS:
4694 adapter->speed = SPEED_1000;
4695 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05004696 case IBMVNIC_10GBPS:
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004697 adapter->speed = SPEED_10000;
4698 break;
4699 case IBMVNIC_25GBPS:
4700 adapter->speed = SPEED_25000;
4701 break;
4702 case IBMVNIC_40GBPS:
4703 adapter->speed = SPEED_40000;
4704 break;
4705 case IBMVNIC_50GBPS:
4706 adapter->speed = SPEED_50000;
4707 break;
4708 case IBMVNIC_100GBPS:
4709 adapter->speed = SPEED_100000;
4710 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05004711 case IBMVNIC_200GBPS:
4712 adapter->speed = SPEED_200000;
4713 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004714 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004715 if (netif_carrier_ok(netdev))
4716 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004717 adapter->speed = SPEED_UNKNOWN;
4718 }
4719 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4720 adapter->duplex = DUPLEX_FULL;
4721 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4722 adapter->duplex = DUPLEX_HALF;
4723 else
4724 adapter->duplex = DUPLEX_UNKNOWN;
4725
4726 return rc;
4727}
4728
Thomas Falcon032c5e82015-12-21 11:26:06 -06004729static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4730 struct ibmvnic_adapter *adapter)
4731{
4732 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4733 struct net_device *netdev = adapter->netdev;
4734 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004735 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004736 long rc;
4737
4738 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004739 (unsigned long int)cpu_to_be64(u64_crq[0]),
4740 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004741 switch (gen_crq->first) {
4742 case IBMVNIC_CRQ_INIT_RSP:
4743 switch (gen_crq->cmd) {
4744 case IBMVNIC_CRQ_INIT:
4745 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004746 adapter->from_passive_init = true;
Thomas Falcon17c87052018-05-23 13:37:58 -05004747 if (!completion_done(&adapter->init_done)) {
4748 complete(&adapter->init_done);
4749 adapter->init_done_rc = -EIO;
4750 }
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004751 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004752 break;
4753 case IBMVNIC_CRQ_INIT_COMPLETE:
4754 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05004755 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004756 send_version_xchg(adapter);
4757 break;
4758 default:
4759 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4760 }
4761 return;
4762 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004763 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05004764 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06004765 /* terminate any thread waiting for a response
4766 * from the device
4767 */
4768 if (!completion_done(&adapter->fw_done)) {
4769 adapter->fw_done_rc = -EIO;
4770 complete(&adapter->fw_done);
4771 }
4772 if (!completion_done(&adapter->stats_done))
4773 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04004774 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05004775 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004776 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004777 dev_info(dev, "Migrated, re-enabling adapter\n");
4778 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004779 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4780 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004781 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004782 } else {
4783 /* The adapter lost the connection */
4784 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4785 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004786 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004787 }
4788 return;
4789 case IBMVNIC_CRQ_CMD_RSP:
4790 break;
4791 default:
4792 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4793 gen_crq->first);
4794 return;
4795 }
4796
4797 switch (gen_crq->cmd) {
4798 case VERSION_EXCHANGE_RSP:
4799 rc = crq->version_exchange_rsp.rc.code;
4800 if (rc) {
4801 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4802 break;
4803 }
Thomas Falcon78468892020-05-28 11:19:17 -05004804 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06004805 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05004806 dev_info(dev, "Partner protocol version is %d\n",
4807 ibmvnic_version);
Lijun Pan491099a2020-09-27 20:13:26 -05004808 send_query_cap(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004809 break;
4810 case QUERY_CAPABILITY_RSP:
4811 handle_query_cap_rsp(crq, adapter);
4812 break;
4813 case QUERY_MAP_RSP:
4814 handle_query_map_rsp(crq, adapter);
4815 break;
4816 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004817 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4818 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004819 break;
4820 case REQUEST_UNMAP_RSP:
4821 handle_request_unmap_rsp(crq, adapter);
4822 break;
4823 case REQUEST_CAPABILITY_RSP:
4824 handle_request_cap_rsp(crq, adapter);
4825 break;
4826 case LOGIN_RSP:
4827 netdev_dbg(netdev, "Got Login Response\n");
4828 handle_login_rsp(crq, adapter);
4829 break;
4830 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004831 netdev_dbg(netdev,
4832 "Got Logical Link State Response, state: %d rc: %d\n",
4833 crq->logical_link_state_rsp.link_state,
4834 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004835 adapter->logical_link_state =
4836 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004837 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4838 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004839 break;
4840 case LINK_STATE_INDICATION:
4841 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4842 adapter->phys_link_state =
4843 crq->link_state_indication.phys_link_state;
4844 adapter->logical_link_state =
4845 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05004846 if (adapter->phys_link_state && adapter->logical_link_state)
4847 netif_carrier_on(netdev);
4848 else
4849 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004850 break;
4851 case CHANGE_MAC_ADDR_RSP:
4852 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004853 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004854 break;
4855 case ERROR_INDICATION:
4856 netdev_dbg(netdev, "Got Error Indication\n");
4857 handle_error_indication(crq, adapter);
4858 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004859 case REQUEST_STATISTICS_RSP:
4860 netdev_dbg(netdev, "Got Statistics Response\n");
4861 complete(&adapter->stats_done);
4862 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004863 case QUERY_IP_OFFLOAD_RSP:
4864 netdev_dbg(netdev, "Got Query IP offload Response\n");
4865 handle_query_ip_offload_rsp(adapter);
4866 break;
4867 case MULTICAST_CTRL_RSP:
4868 netdev_dbg(netdev, "Got multicast control Response\n");
4869 break;
4870 case CONTROL_IP_OFFLOAD_RSP:
4871 netdev_dbg(netdev, "Got Control IP offload Response\n");
4872 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4873 sizeof(adapter->ip_offload_ctrl),
4874 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004875 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004876 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004877 case COLLECT_FW_TRACE_RSP:
4878 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4879 complete(&adapter->fw_done);
4880 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004881 case GET_VPD_SIZE_RSP:
4882 handle_vpd_size_rsp(crq, adapter);
4883 break;
4884 case GET_VPD_RSP:
4885 handle_vpd_rsp(crq, adapter);
4886 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004887 case QUERY_PHYS_PARMS_RSP:
4888 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4889 complete(&adapter->fw_done);
4890 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004891 default:
4892 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4893 gen_crq->cmd);
4894 }
4895}
4896
4897static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4898{
4899 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004900
Thomas Falcon6c267b32017-02-15 12:17:58 -06004901 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004902 return IRQ_HANDLED;
4903}
4904
Allen Paisaa7c3fe2020-09-14 12:59:29 +05304905static void ibmvnic_tasklet(struct tasklet_struct *t)
Thomas Falcon6c267b32017-02-15 12:17:58 -06004906{
Allen Paisaa7c3fe2020-09-14 12:59:29 +05304907 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004908 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004909 union ibmvnic_crq *crq;
4910 unsigned long flags;
4911 bool done = false;
4912
4913 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004914 while (!done) {
4915 /* Pull all the valid messages off the CRQ */
4916 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4917 ibmvnic_handle_crq(crq, adapter);
4918 crq->generic.first = 0;
4919 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004920
4921 /* remain in tasklet until all
4922 * capabilities responses are received
4923 */
4924 if (!adapter->wait_capability)
4925 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004926 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004927 /* if capabilities CRQ's were sent in this tasklet, the following
4928 * tasklet must wait until all responses are received
4929 */
4930 if (atomic_read(&adapter->running_cap_crqs) != 0)
4931 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004932 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004933}
4934
4935static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4936{
4937 struct vio_dev *vdev = adapter->vdev;
4938 int rc;
4939
4940 do {
4941 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4942 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4943
4944 if (rc)
4945 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4946
4947 return rc;
4948}
4949
4950static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4951{
4952 struct ibmvnic_crq_queue *crq = &adapter->crq;
4953 struct device *dev = &adapter->vdev->dev;
4954 struct vio_dev *vdev = adapter->vdev;
4955 int rc;
4956
4957 /* Close the CRQ */
4958 do {
4959 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4960 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4961
4962 /* Clean out the queue */
4963 memset(crq->msgs, 0, PAGE_SIZE);
4964 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05004965 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004966
4967 /* And re-open it again */
4968 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4969 crq->msg_token, PAGE_SIZE);
4970
4971 if (rc == H_CLOSED)
4972 /* Adapter is good, but other end is not ready */
4973 dev_warn(dev, "Partner adapter not ready\n");
4974 else if (rc != 0)
4975 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4976
4977 return rc;
4978}
4979
Nathan Fontenotf9928872017-03-30 02:48:54 -04004980static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004981{
4982 struct ibmvnic_crq_queue *crq = &adapter->crq;
4983 struct vio_dev *vdev = adapter->vdev;
4984 long rc;
4985
Nathan Fontenotf9928872017-03-30 02:48:54 -04004986 if (!crq->msgs)
4987 return;
4988
Thomas Falcon032c5e82015-12-21 11:26:06 -06004989 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4990 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004991 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004992 do {
4993 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4994 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4995
4996 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4997 DMA_BIDIRECTIONAL);
4998 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004999 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05005000 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005001}
5002
Nathan Fontenotf9928872017-03-30 02:48:54 -04005003static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005004{
5005 struct ibmvnic_crq_queue *crq = &adapter->crq;
5006 struct device *dev = &adapter->vdev->dev;
5007 struct vio_dev *vdev = adapter->vdev;
5008 int rc, retrc = -ENOMEM;
5009
Nathan Fontenotf9928872017-03-30 02:48:54 -04005010 if (crq->msgs)
5011 return 0;
5012
Thomas Falcon032c5e82015-12-21 11:26:06 -06005013 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5014 /* Should we allocate more than one page? */
5015
5016 if (!crq->msgs)
5017 return -ENOMEM;
5018
5019 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5020 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5021 DMA_BIDIRECTIONAL);
5022 if (dma_mapping_error(dev, crq->msg_token))
5023 goto map_failed;
5024
5025 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5026 crq->msg_token, PAGE_SIZE);
5027
5028 if (rc == H_RESOURCE)
5029 /* maybe kexecing and resource is busy. try a reset */
5030 rc = ibmvnic_reset_crq(adapter);
5031 retrc = rc;
5032
5033 if (rc == H_CLOSED) {
5034 dev_warn(dev, "Partner adapter not ready\n");
5035 } else if (rc) {
5036 dev_warn(dev, "Error %d opening adapter\n", rc);
5037 goto reg_crq_failed;
5038 }
5039
5040 retrc = 0;
5041
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305042 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005043
Thomas Falcon032c5e82015-12-21 11:26:06 -06005044 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03005045 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5046 adapter->vdev->unit_address);
5047 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005048 if (rc) {
5049 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5050 vdev->irq, rc);
5051 goto req_irq_failed;
5052 }
5053
5054 rc = vio_enable_interrupts(vdev);
5055 if (rc) {
5056 dev_err(dev, "Error %d enabling interrupts\n", rc);
5057 goto req_irq_failed;
5058 }
5059
5060 crq->cur = 0;
5061 spin_lock_init(&crq->lock);
5062
5063 return retrc;
5064
5065req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06005066 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005067 do {
5068 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5069 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5070reg_crq_failed:
5071 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5072map_failed:
5073 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005074 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005075 return retrc;
5076}
5077
Lijun Pan635e4422020-08-19 17:52:26 -05005078static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005079{
5080 struct device *dev = &adapter->vdev->dev;
5081 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005082 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005083 int rc;
5084
John Allen017892c12017-05-26 10:30:19 -04005085 adapter->from_passive_init = false;
5086
Lijun Pan635e4422020-08-19 17:52:26 -05005087 if (reset) {
5088 old_num_rx_queues = adapter->req_rx_queues;
5089 old_num_tx_queues = adapter->req_tx_queues;
5090 reinit_completion(&adapter->init_done);
5091 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005092
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005093 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005094 rc = ibmvnic_send_crq_init(adapter);
5095 if (rc) {
5096 dev_err(dev, "Send crq init failed with error %d\n", rc);
5097 return rc;
5098 }
5099
John Allenf6ef6402017-03-17 17:13:42 -05005100 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5101 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04005102 return -1;
5103 }
5104
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005105 if (adapter->init_done_rc) {
5106 release_crq_queue(adapter);
5107 return adapter->init_done_rc;
5108 }
5109
Lijun Pan785a2b12020-09-17 21:12:46 -05005110 if (adapter->from_passive_init) {
5111 adapter->state = VNIC_OPEN;
5112 adapter->from_passive_init = false;
5113 return -1;
5114 }
5115
Lijun Pan635e4422020-08-19 17:52:26 -05005116 if (reset &&
5117 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005118 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005119 if (adapter->req_rx_queues != old_num_rx_queues ||
5120 adapter->req_tx_queues != old_num_tx_queues) {
5121 release_sub_crqs(adapter, 0);
5122 rc = init_sub_crqs(adapter);
5123 } else {
5124 rc = reset_sub_crq_queues(adapter);
5125 }
5126 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005127 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005128 }
5129
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005130 if (rc) {
5131 dev_err(dev, "Initialization of sub crqs failed\n");
5132 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005133 return rc;
5134 }
5135
5136 rc = init_sub_crq_irqs(adapter);
5137 if (rc) {
5138 dev_err(dev, "Failed to initialize sub crq irqs\n");
5139 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005140 }
5141
5142 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005143}
5144
Thomas Falcon40c9db82017-06-12 12:35:04 -05005145static struct device_attribute dev_attr_failover;
5146
Thomas Falcon032c5e82015-12-21 11:26:06 -06005147static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5148{
5149 struct ibmvnic_adapter *adapter;
5150 struct net_device *netdev;
5151 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005152 int rc;
5153
5154 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5155 dev->unit_address);
5156
5157 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5158 VETH_MAC_ADDR, NULL);
5159 if (!mac_addr_p) {
5160 dev_err(&dev->dev,
5161 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5162 __FILE__, __LINE__);
5163 return 0;
5164 }
5165
5166 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005167 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005168 if (!netdev)
5169 return -ENOMEM;
5170
5171 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005172 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005173 dev_set_drvdata(&dev->dev, netdev);
5174 adapter->vdev = dev;
5175 adapter->netdev = netdev;
5176
5177 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5178 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5179 netdev->irq = dev->irq;
5180 netdev->netdev_ops = &ibmvnic_netdev_ops;
5181 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5182 SET_NETDEV_DEV(netdev, &dev->dev);
5183
5184 spin_lock_init(&adapter->stats_lock);
5185
Nathan Fontenoted651a12017-05-03 14:04:38 -04005186 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005187 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5188 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005189 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005190 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005191 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005192 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005193 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005194 init_completion(&adapter->fw_done);
5195 init_completion(&adapter->reset_done);
5196 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005197 clear_bit(0, &adapter->resetting);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005198
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005199 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005200 rc = init_crq_queue(adapter);
5201 if (rc) {
5202 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5203 rc);
5204 goto ibmvnic_init_fail;
5205 }
5206
Lijun Pan635e4422020-08-19 17:52:26 -05005207 rc = ibmvnic_reset_init(adapter, false);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005208 if (rc && rc != EAGAIN)
5209 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005210 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005211
Thomas Falcon07184212018-05-16 15:49:05 -05005212 rc = init_stats_buffers(adapter);
5213 if (rc)
5214 goto ibmvnic_init_fail;
5215
5216 rc = init_stats_token(adapter);
5217 if (rc)
5218 goto ibmvnic_stats_fail;
5219
Thomas Falconf39f0d12017-02-14 10:22:59 -06005220 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05005221 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5222 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005223
Thomas Falcon40c9db82017-06-12 12:35:04 -05005224 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005225 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005226 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005227
Mick Tarsele876a8a2017-09-28 13:53:18 -07005228 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005229 rc = register_netdev(netdev);
5230 if (rc) {
5231 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005232 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005233 }
5234 dev_info(&dev->dev, "ibmvnic registered\n");
5235
Nathan Fontenot90c80142017-05-03 14:04:32 -04005236 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05005237
5238 adapter->wait_for_reset = false;
5239
Thomas Falcon032c5e82015-12-21 11:26:06 -06005240 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005241
5242ibmvnic_register_fail:
5243 device_remove_file(&dev->dev, &dev_attr_failover);
5244
Thomas Falcon07184212018-05-16 15:49:05 -05005245ibmvnic_dev_file_err:
5246 release_stats_token(adapter);
5247
5248ibmvnic_stats_fail:
5249 release_stats_buffers(adapter);
5250
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005251ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005252 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005253 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005254 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005255 free_netdev(netdev);
5256
5257 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005258}
5259
5260static int ibmvnic_remove(struct vio_dev *dev)
5261{
5262 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005263 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005264 unsigned long flags;
5265
5266 spin_lock_irqsave(&adapter->state_lock, flags);
5267 if (adapter->state == VNIC_RESETTING) {
5268 spin_unlock_irqrestore(&adapter->state_lock, flags);
5269 return -EBUSY;
5270 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06005271
Nathan Fontenot90c80142017-05-03 14:04:32 -04005272 adapter->state = VNIC_REMOVING;
Juliet Kim7d7195a2020-03-10 09:23:58 -05005273 spin_unlock_irqrestore(&adapter->state_lock, flags);
5274
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005275 flush_work(&adapter->ibmvnic_reset);
5276 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5277
Juliet Kima5681e22018-11-19 15:59:22 -06005278 rtnl_lock();
5279 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005280
5281 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005282 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005283 release_crq_queue(adapter);
5284
Thomas Falcon53cc7722018-02-26 18:10:56 -06005285 release_stats_token(adapter);
5286 release_stats_buffers(adapter);
5287
Nathan Fontenot90c80142017-05-03 14:04:32 -04005288 adapter->state = VNIC_REMOVED;
5289
Juliet Kima5681e22018-11-19 15:59:22 -06005290 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005291 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005292 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005293 free_netdev(netdev);
5294 dev_set_drvdata(&dev->dev, NULL);
5295
5296 return 0;
5297}
5298
Thomas Falcon40c9db82017-06-12 12:35:04 -05005299static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5300 const char *buf, size_t count)
5301{
5302 struct net_device *netdev = dev_get_drvdata(dev);
5303 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5304 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5305 __be64 session_token;
5306 long rc;
5307
5308 if (!sysfs_streq(buf, "1"))
5309 return -EINVAL;
5310
5311 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5312 H_GET_SESSION_TOKEN, 0, 0, 0);
5313 if (rc) {
5314 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5315 rc);
5316 return -EINVAL;
5317 }
5318
5319 session_token = (__be64)retbuf[0];
5320 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5321 be64_to_cpu(session_token));
5322 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5323 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5324 if (rc) {
5325 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5326 rc);
5327 return -EINVAL;
5328 }
5329
5330 return count;
5331}
5332
Joe Perches6cbaefb2017-12-19 10:15:09 -08005333static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005334
Thomas Falcon032c5e82015-12-21 11:26:06 -06005335static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5336{
5337 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5338 struct ibmvnic_adapter *adapter;
5339 struct iommu_table *tbl;
5340 unsigned long ret = 0;
5341 int i;
5342
5343 tbl = get_iommu_table_base(&vdev->dev);
5344
5345 /* netdev inits at probe time along with the structures we need below*/
5346 if (!netdev)
5347 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5348
5349 adapter = netdev_priv(netdev);
5350
5351 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005352 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5353
5354 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5355 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5356
Thomas Falcon507ebe62020-08-21 13:39:01 -05005357 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005358 ret += adapter->rx_pool[i].size *
5359 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5360
5361 return ret;
5362}
5363
5364static int ibmvnic_resume(struct device *dev)
5365{
5366 struct net_device *netdev = dev_get_drvdata(dev);
5367 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005368
John Allencb89ba22017-06-19 11:27:53 -05005369 if (adapter->state != VNIC_OPEN)
5370 return 0;
5371
John Allena2488782017-07-24 13:26:06 -05005372 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005373
5374 return 0;
5375}
5376
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305377static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005378 {"network", "IBM,vnic"},
5379 {"", "" }
5380};
5381MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5382
5383static const struct dev_pm_ops ibmvnic_pm_ops = {
5384 .resume = ibmvnic_resume
5385};
5386
5387static struct vio_driver ibmvnic_driver = {
5388 .id_table = ibmvnic_device_table,
5389 .probe = ibmvnic_probe,
5390 .remove = ibmvnic_remove,
5391 .get_desired_dma = ibmvnic_get_desired_dma,
5392 .name = ibmvnic_driver_name,
5393 .pm = &ibmvnic_pm_ops,
5394};
5395
5396/* module functions */
5397static int __init ibmvnic_module_init(void)
5398{
5399 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5400 IBMVNIC_DRIVER_VERSION);
5401
5402 return vio_register_driver(&ibmvnic_driver);
5403}
5404
5405static void __exit ibmvnic_module_exit(void)
5406{
5407 vio_unregister_driver(&ibmvnic_driver);
5408}
5409
5410module_init(ibmvnic_module_init);
5411module_exit(ibmvnic_module_exit);