blob: 994358689de9920c1ed8fe85f358eb9084c7d81b [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060082static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060083static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050089static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060090static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99static int ibmvnic_poll(struct napi_struct *napi, int data);
100static void send_map_query(struct ibmvnic_adapter *adapter);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600103static int send_login(struct ibmvnic_adapter *adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500104static void send_cap_queries(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500105static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500107static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400108static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500109static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500110static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300111static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600112
113struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
115 int offset;
116};
117
118#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121
122static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145};
146
147static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
149 unsigned long *irq)
150{
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 long rc;
153
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 *number = retbuf[0];
156 *irq = retbuf[1];
157
158 return rc;
159}
160
Thomas Falcon476d96c2019-11-25 17:12:55 -0600161/**
162 * ibmvnic_wait_for_completion - Check device state and wait for completion
163 * @adapter: private device data
164 * @comp_done: completion structure to wait for
165 * @timeout: time to wait in milliseconds
166 *
167 * Wait for a completion signal or until the timeout limit is reached
168 * while checking that the device is still active.
169 */
170static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
171 struct completion *comp_done,
172 unsigned long timeout)
173{
174 struct net_device *netdev;
175 unsigned long div_timeout;
176 u8 retry;
177
178 netdev = adapter->netdev;
179 retry = 5;
180 div_timeout = msecs_to_jiffies(timeout / retry);
181 while (true) {
182 if (!adapter->crq.active) {
183 netdev_err(netdev, "Device down!\n");
184 return -ENODEV;
185 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600186 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600187 break;
188 if (wait_for_completion_timeout(comp_done, div_timeout))
189 return 0;
190 }
191 netdev_err(netdev, "Operation timed out.\n");
192 return -ETIMEDOUT;
193}
194
Thomas Falcon032c5e82015-12-21 11:26:06 -0600195static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
196 struct ibmvnic_long_term_buff *ltb, int size)
197{
198 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500199 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600200
201 ltb->size = size;
202 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
203 GFP_KERNEL);
204
205 if (!ltb->buff) {
206 dev_err(dev, "Couldn't alloc long term buffer\n");
207 return -ENOMEM;
208 }
209 ltb->map_id = adapter->map_id;
210 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500211
Thomas Falconff25dcb2019-11-25 17:12:56 -0600212 mutex_lock(&adapter->fw_lock);
213 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600214 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500215 rc = send_request_map(adapter, ltb->addr,
216 ltb->size, ltb->map_id);
217 if (rc) {
218 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600219 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500220 return rc;
221 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600222
223 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224 if (rc) {
225 dev_err(dev,
226 "Long term map request aborted or timed out,rc = %d\n",
227 rc);
228 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600229 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600230 return rc;
231 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500232
233 if (adapter->fw_done_rc) {
234 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
235 adapter->fw_done_rc);
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500236 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600237 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500238 return -1;
239 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600240 mutex_unlock(&adapter->fw_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600241 return 0;
242}
243
244static void free_long_term_buff(struct ibmvnic_adapter *adapter,
245 struct ibmvnic_long_term_buff *ltb)
246{
247 struct device *dev = &adapter->vdev->dev;
248
Nathan Fontenotc657e322017-03-30 02:49:06 -0400249 if (!ltb->buff)
250 return;
251
Nathan Fontenoted651a12017-05-03 14:04:38 -0400252 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
253 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500254 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400255 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600256}
257
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500258static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
259 struct ibmvnic_long_term_buff *ltb)
260{
Thomas Falcon476d96c2019-11-25 17:12:55 -0600261 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500262 int rc;
263
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500264 memset(ltb->buff, 0, ltb->size);
265
Thomas Falconff25dcb2019-11-25 17:12:56 -0600266 mutex_lock(&adapter->fw_lock);
267 adapter->fw_done_rc = 0;
268
Thomas Falcon070eca92019-11-25 17:12:53 -0600269 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500270 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600271 if (rc) {
272 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500273 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -0600274 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600275
276 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
277 if (rc) {
278 dev_info(dev,
279 "Reset failed, long term map request timed out or aborted\n");
Thomas Falconff25dcb2019-11-25 17:12:56 -0600280 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600281 return rc;
282 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500283
284 if (adapter->fw_done_rc) {
Thomas Falcon476d96c2019-11-25 17:12:55 -0600285 dev_info(dev,
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500286 "Reset failed, attempting to free and reallocate buffer\n");
287 free_long_term_buff(adapter, ltb);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600288 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500289 return alloc_long_term_buff(adapter, ltb, ltb->size);
290 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600291 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500292 return 0;
293}
294
Thomas Falconf185a492017-05-26 10:30:48 -0400295static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
296{
297 int i;
298
Thomas Falcon507ebe62020-08-21 13:39:01 -0500299 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400300 adapter->rx_pool[i].active = 0;
301}
302
Thomas Falcon032c5e82015-12-21 11:26:06 -0600303static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
304 struct ibmvnic_rx_pool *pool)
305{
306 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500307 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600308 struct device *dev = &adapter->vdev->dev;
309 int buffers_added = 0;
310 unsigned long lpar_rc;
311 union sub_crq sub_crq;
312 struct sk_buff *skb;
313 unsigned int offset;
314 dma_addr_t dma_addr;
315 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600316 int shift = 0;
317 int index;
318 int i;
319
Thomas Falconf185a492017-05-26 10:30:48 -0400320 if (!pool->active)
321 return;
322
Thomas Falcon032c5e82015-12-21 11:26:06 -0600323 for (i = 0; i < count; ++i) {
324 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
325 if (!skb) {
326 dev_err(dev, "Couldn't replenish rx buff\n");
327 adapter->replenish_no_mem++;
328 break;
329 }
330
331 index = pool->free_map[pool->next_free];
332
333 if (pool->rx_buff[index].skb)
334 dev_err(dev, "Inconsistent free_map!\n");
335
336 /* Copy the skb to the long term mapped DMA buffer */
337 offset = index * pool->buff_size;
338 dst = pool->long_term_buff.buff + offset;
339 memset(dst, 0, pool->buff_size);
340 dma_addr = pool->long_term_buff.addr + offset;
341 pool->rx_buff[index].data = dst;
342
343 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
344 pool->rx_buff[index].dma = dma_addr;
345 pool->rx_buff[index].skb = skb;
346 pool->rx_buff[index].pool_index = pool->index;
347 pool->rx_buff[index].size = pool->buff_size;
348
349 memset(&sub_crq, 0, sizeof(sub_crq));
350 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
351 sub_crq.rx_add.correlator =
352 cpu_to_be64((u64)&pool->rx_buff[index]);
353 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
354 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
355
356 /* The length field of the sCRQ is defined to be 24 bits so the
357 * buffer size needs to be left shifted by a byte before it is
358 * converted to big endian to prevent the last byte from being
359 * truncated.
360 */
361#ifdef __LITTLE_ENDIAN__
362 shift = 8;
363#endif
364 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
365
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500366 lpar_rc = send_subcrq(adapter, handle, &sub_crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600367 if (lpar_rc != H_SUCCESS)
368 goto failure;
369
370 buffers_added++;
371 adapter->replenish_add_buff_success++;
372 pool->next_free = (pool->next_free + 1) % pool->size;
373 }
374 atomic_add(buffers_added, &pool->available);
375 return;
376
377failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500378 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
379 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -0600380 pool->free_map[pool->next_free] = index;
381 pool->rx_buff[index].skb = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600382
383 dev_kfree_skb_any(skb);
384 adapter->replenish_add_buff_failure++;
385 atomic_add(buffers_added, &pool->available);
Thomas Falconf185a492017-05-26 10:30:48 -0400386
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500387 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400388 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500389 * queue is closed or pending failover.
390 * Firmware guarantees that a signal will be sent to the
391 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400392 */
393 deactivate_rx_pools(adapter);
394 netif_carrier_off(adapter->netdev);
395 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600396}
397
398static void replenish_pools(struct ibmvnic_adapter *adapter)
399{
400 int i;
401
Thomas Falcon032c5e82015-12-21 11:26:06 -0600402 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500403 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600404 if (adapter->rx_pool[i].active)
405 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
406 }
407}
408
John Allen3d52b592017-08-02 16:44:14 -0500409static void release_stats_buffers(struct ibmvnic_adapter *adapter)
410{
411 kfree(adapter->tx_stats_buffers);
412 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600413 adapter->tx_stats_buffers = NULL;
414 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500415}
416
417static int init_stats_buffers(struct ibmvnic_adapter *adapter)
418{
419 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600420 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500421 sizeof(struct ibmvnic_tx_queue_stats),
422 GFP_KERNEL);
423 if (!adapter->tx_stats_buffers)
424 return -ENOMEM;
425
426 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600427 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500428 sizeof(struct ibmvnic_rx_queue_stats),
429 GFP_KERNEL);
430 if (!adapter->rx_stats_buffers)
431 return -ENOMEM;
432
433 return 0;
434}
435
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400436static void release_stats_token(struct ibmvnic_adapter *adapter)
437{
438 struct device *dev = &adapter->vdev->dev;
439
440 if (!adapter->stats_token)
441 return;
442
443 dma_unmap_single(dev, adapter->stats_token,
444 sizeof(struct ibmvnic_statistics),
445 DMA_FROM_DEVICE);
446 adapter->stats_token = 0;
447}
448
449static int init_stats_token(struct ibmvnic_adapter *adapter)
450{
451 struct device *dev = &adapter->vdev->dev;
452 dma_addr_t stok;
453
454 stok = dma_map_single(dev, &adapter->stats,
455 sizeof(struct ibmvnic_statistics),
456 DMA_FROM_DEVICE);
457 if (dma_mapping_error(dev, stok)) {
458 dev_err(dev, "Couldn't map stats buffer\n");
459 return -1;
460 }
461
462 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500463 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400464 return 0;
465}
466
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400467static int reset_rx_pools(struct ibmvnic_adapter *adapter)
468{
469 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500470 u64 buff_size;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400471 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500472 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600473
Thomas Falcon507ebe62020-08-21 13:39:01 -0500474 buff_size = adapter->cur_rx_buf_sz;
475 rx_scrqs = adapter->num_active_rx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400476 for (i = 0; i < rx_scrqs; i++) {
477 rx_pool = &adapter->rx_pool[i];
478
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500479 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
480
Thomas Falcon507ebe62020-08-21 13:39:01 -0500481 if (rx_pool->buff_size != buff_size) {
John Allen896d8692018-01-18 16:26:31 -0600482 free_long_term_buff(adapter, &rx_pool->long_term_buff);
Thomas Falcon507ebe62020-08-21 13:39:01 -0500483 rx_pool->buff_size = buff_size;
Thomas Falcon7c940b12019-06-07 16:03:55 -0500484 rc = alloc_long_term_buff(adapter,
485 &rx_pool->long_term_buff,
486 rx_pool->size *
487 rx_pool->buff_size);
John Allen896d8692018-01-18 16:26:31 -0600488 } else {
489 rc = reset_long_term_buff(adapter,
490 &rx_pool->long_term_buff);
491 }
492
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500493 if (rc)
494 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400495
496 for (j = 0; j < rx_pool->size; j++)
497 rx_pool->free_map[j] = j;
498
499 memset(rx_pool->rx_buff, 0,
500 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
501
502 atomic_set(&rx_pool->available, 0);
503 rx_pool->next_alloc = 0;
504 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500505 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400506 }
507
508 return 0;
509}
510
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400511static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600512{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400513 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400514 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600515
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400516 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600517 return;
518
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600519 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400520 rx_pool = &adapter->rx_pool[i];
521
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500522 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
523
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400524 kfree(rx_pool->free_map);
525 free_long_term_buff(adapter, &rx_pool->long_term_buff);
526
527 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400528 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400529
530 for (j = 0; j < rx_pool->size; j++) {
531 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600532 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
533 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400534 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600535 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400536
537 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600538 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400539
540 kfree(adapter->rx_pool);
541 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600542 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400543}
544
545static int init_rx_pools(struct net_device *netdev)
546{
547 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
548 struct device *dev = &adapter->vdev->dev;
549 struct ibmvnic_rx_pool *rx_pool;
550 int rxadd_subcrqs;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500551 u64 buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400552 int i, j;
553
Thomas Falcon507ebe62020-08-21 13:39:01 -0500554 rxadd_subcrqs = adapter->num_active_rx_scrqs;
555 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400556
557 adapter->rx_pool = kcalloc(rxadd_subcrqs,
558 sizeof(struct ibmvnic_rx_pool),
559 GFP_KERNEL);
560 if (!adapter->rx_pool) {
561 dev_err(dev, "Failed to allocate rx pools\n");
562 return -1;
563 }
564
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600565 adapter->num_active_rx_pools = rxadd_subcrqs;
566
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400567 for (i = 0; i < rxadd_subcrqs; i++) {
568 rx_pool = &adapter->rx_pool[i];
569
570 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500571 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400572 i, adapter->req_rx_add_entries_per_subcrq,
Thomas Falcon507ebe62020-08-21 13:39:01 -0500573 buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400574
575 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
576 rx_pool->index = i;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500577 rx_pool->buff_size = buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400578 rx_pool->active = 1;
579
580 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
581 GFP_KERNEL);
582 if (!rx_pool->free_map) {
583 release_rx_pools(adapter);
584 return -1;
585 }
586
587 rx_pool->rx_buff = kcalloc(rx_pool->size,
588 sizeof(struct ibmvnic_rx_buff),
589 GFP_KERNEL);
590 if (!rx_pool->rx_buff) {
591 dev_err(dev, "Couldn't alloc rx buffers\n");
592 release_rx_pools(adapter);
593 return -1;
594 }
595
596 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
597 rx_pool->size * rx_pool->buff_size)) {
598 release_rx_pools(adapter);
599 return -1;
600 }
601
602 for (j = 0; j < rx_pool->size; ++j)
603 rx_pool->free_map[j] = j;
604
605 atomic_set(&rx_pool->available, 0);
606 rx_pool->next_alloc = 0;
607 rx_pool->next_free = 0;
608 }
609
610 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600611}
612
Thomas Falcone26dc252018-03-16 20:00:25 -0500613static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
614 struct ibmvnic_tx_pool *tx_pool)
615{
616 int rc, i;
617
618 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
619 if (rc)
620 return rc;
621
622 memset(tx_pool->tx_buff, 0,
623 tx_pool->num_buffers *
624 sizeof(struct ibmvnic_tx_buff));
625
626 for (i = 0; i < tx_pool->num_buffers; i++)
627 tx_pool->free_map[i] = i;
628
629 tx_pool->consumer_index = 0;
630 tx_pool->producer_index = 0;
631
632 return 0;
633}
634
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400635static int reset_tx_pools(struct ibmvnic_adapter *adapter)
636{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400637 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500638 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400639
Thomas Falcon507ebe62020-08-21 13:39:01 -0500640 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400641 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500642 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500643 if (rc)
644 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500645 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500646 if (rc)
647 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400648 }
649
650 return 0;
651}
652
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200653static void release_vpd_data(struct ibmvnic_adapter *adapter)
654{
655 if (!adapter->vpd)
656 return;
657
658 kfree(adapter->vpd->buff);
659 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600660
661 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200662}
663
Thomas Falconfb794212018-03-16 20:00:26 -0500664static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
665 struct ibmvnic_tx_pool *tx_pool)
666{
667 kfree(tx_pool->tx_buff);
668 kfree(tx_pool->free_map);
669 free_long_term_buff(adapter, &tx_pool->long_term_buff);
670}
671
Nathan Fontenotc657e322017-03-30 02:49:06 -0400672static void release_tx_pools(struct ibmvnic_adapter *adapter)
673{
John Allen896d8692018-01-18 16:26:31 -0600674 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400675
676 if (!adapter->tx_pool)
677 return;
678
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600679 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500680 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
681 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400682 }
683
684 kfree(adapter->tx_pool);
685 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500686 kfree(adapter->tso_pool);
687 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600688 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400689}
690
Thomas Falcon32053062018-03-16 20:00:27 -0500691static int init_one_tx_pool(struct net_device *netdev,
692 struct ibmvnic_tx_pool *tx_pool,
693 int num_entries, int buf_size)
694{
695 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
696 int i;
697
698 tx_pool->tx_buff = kcalloc(num_entries,
699 sizeof(struct ibmvnic_tx_buff),
700 GFP_KERNEL);
701 if (!tx_pool->tx_buff)
702 return -1;
703
704 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
705 num_entries * buf_size))
706 return -1;
707
708 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
709 if (!tx_pool->free_map)
710 return -1;
711
712 for (i = 0; i < num_entries; i++)
713 tx_pool->free_map[i] = i;
714
715 tx_pool->consumer_index = 0;
716 tx_pool->producer_index = 0;
717 tx_pool->num_buffers = num_entries;
718 tx_pool->buf_size = buf_size;
719
720 return 0;
721}
722
Nathan Fontenotc657e322017-03-30 02:49:06 -0400723static int init_tx_pools(struct net_device *netdev)
724{
725 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400726 int tx_subcrqs;
Thomas Falcon32053062018-03-16 20:00:27 -0500727 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400728
Thomas Falcon507ebe62020-08-21 13:39:01 -0500729 tx_subcrqs = adapter->num_active_tx_scrqs;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400730 adapter->tx_pool = kcalloc(tx_subcrqs,
731 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
732 if (!adapter->tx_pool)
733 return -1;
734
Thomas Falcon32053062018-03-16 20:00:27 -0500735 adapter->tso_pool = kcalloc(tx_subcrqs,
736 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
737 if (!adapter->tso_pool)
738 return -1;
739
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600740 adapter->num_active_tx_pools = tx_subcrqs;
741
Nathan Fontenotc657e322017-03-30 02:49:06 -0400742 for (i = 0; i < tx_subcrqs; i++) {
Thomas Falcon32053062018-03-16 20:00:27 -0500743 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
744 adapter->req_tx_entries_per_subcrq,
745 adapter->req_mtu + VLAN_HLEN);
746 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400747 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500748 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400749 }
750
Thomas Falcon7c940b12019-06-07 16:03:55 -0500751 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
752 IBMVNIC_TSO_BUFS,
753 IBMVNIC_TSO_BUF_SZ);
Thomas Falcon32053062018-03-16 20:00:27 -0500754 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400755 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500756 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400757 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400758 }
759
760 return 0;
761}
762
John Allend944c3d62017-05-26 10:30:13 -0400763static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
764{
765 int i;
766
767 if (adapter->napi_enabled)
768 return;
769
770 for (i = 0; i < adapter->req_rx_queues; i++)
771 napi_enable(&adapter->napi[i]);
772
773 adapter->napi_enabled = true;
774}
775
776static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
777{
778 int i;
779
780 if (!adapter->napi_enabled)
781 return;
782
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500783 for (i = 0; i < adapter->req_rx_queues; i++) {
784 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400785 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500786 }
John Allend944c3d62017-05-26 10:30:13 -0400787
788 adapter->napi_enabled = false;
789}
790
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600791static int init_napi(struct ibmvnic_adapter *adapter)
792{
793 int i;
794
795 adapter->napi = kcalloc(adapter->req_rx_queues,
796 sizeof(struct napi_struct), GFP_KERNEL);
797 if (!adapter->napi)
798 return -ENOMEM;
799
800 for (i = 0; i < adapter->req_rx_queues; i++) {
801 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
802 netif_napi_add(adapter->netdev, &adapter->napi[i],
803 ibmvnic_poll, NAPI_POLL_WEIGHT);
804 }
805
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600806 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600807 return 0;
808}
809
810static void release_napi(struct ibmvnic_adapter *adapter)
811{
812 int i;
813
814 if (!adapter->napi)
815 return;
816
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600817 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800818 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
819 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600820 }
821
822 kfree(adapter->napi);
823 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600824 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500825 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600826}
827
John Allena57a5d22017-03-17 17:13:41 -0500828static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600829{
830 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500831 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500832 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -0500833 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -0500834 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500835 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600836
John Allenbd0b6722017-03-17 17:13:40 -0500837 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500838 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -0500839 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500840 netdev_warn(netdev, "Login attempts exceeded\n");
841 return -1;
842 }
843
844 adapter->init_done_rc = 0;
845 reinit_completion(&adapter->init_done);
846 rc = send_login(adapter);
847 if (rc) {
848 netdev_warn(netdev, "Unable to login\n");
849 return rc;
850 }
851
852 if (!wait_for_completion_timeout(&adapter->init_done,
853 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -0500854 netdev_warn(netdev, "Login timed out, retrying...\n");
855 retry = true;
856 adapter->init_done_rc = 0;
857 retry_count++;
858 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500859 }
860
Thomas Falcondff515a32020-06-15 10:29:23 -0500861 if (adapter->init_done_rc == ABORTED) {
862 netdev_warn(netdev, "Login aborted, retrying...\n");
863 retry = true;
864 adapter->init_done_rc = 0;
865 retry_count++;
866 /* FW or device may be busy, so
867 * wait a bit before retrying login
868 */
869 msleep(500);
870 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500871 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600872 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500873
Thomas Falconeb110412018-05-24 14:37:53 -0500874 retry = true;
875 netdev_dbg(netdev,
876 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500877 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -0500878 reinit_completion(&adapter->init_done);
879 send_cap_queries(adapter);
880 if (!wait_for_completion_timeout(&adapter->init_done,
881 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500882 netdev_warn(netdev,
883 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -0500884 return -1;
885 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500886
Thomas Falcon4d96f122017-08-01 15:04:36 -0500887 rc = init_sub_crqs(adapter);
888 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500889 netdev_warn(netdev,
890 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500891 return -1;
892 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500893
Thomas Falcon4d96f122017-08-01 15:04:36 -0500894 rc = init_sub_crq_irqs(adapter);
895 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500896 netdev_warn(netdev,
897 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500898 return -1;
899 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500900 } else if (adapter->init_done_rc) {
901 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -0500902 return -1;
903 }
Thomas Falconeb110412018-05-24 14:37:53 -0500904 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -0500905
Thomas Falcon62740e92019-05-09 23:13:43 -0500906 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -0600907
John Allena57a5d22017-03-17 17:13:41 -0500908 return 0;
909}
910
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600911static void release_login_buffer(struct ibmvnic_adapter *adapter)
912{
913 kfree(adapter->login_buf);
914 adapter->login_buf = NULL;
915}
916
917static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
918{
919 kfree(adapter->login_rsp_buf);
920 adapter->login_rsp_buf = NULL;
921}
922
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400923static void release_resources(struct ibmvnic_adapter *adapter)
924{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200925 release_vpd_data(adapter);
926
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400927 release_tx_pools(adapter);
928 release_rx_pools(adapter);
929
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600930 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600931 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400932}
933
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400934static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
935{
936 struct net_device *netdev = adapter->netdev;
937 unsigned long timeout = msecs_to_jiffies(30000);
938 union ibmvnic_crq crq;
939 bool resend;
940 int rc;
941
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500942 netdev_dbg(netdev, "setting link state %d\n", link_state);
943
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400944 memset(&crq, 0, sizeof(crq));
945 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
946 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
947 crq.logical_link_state.link_state = link_state;
948
949 do {
950 resend = false;
951
952 reinit_completion(&adapter->init_done);
953 rc = ibmvnic_send_crq(adapter, &crq);
954 if (rc) {
955 netdev_err(netdev, "Failed to set link state\n");
956 return rc;
957 }
958
959 if (!wait_for_completion_timeout(&adapter->init_done,
960 timeout)) {
961 netdev_err(netdev, "timeout setting link state\n");
962 return -1;
963 }
964
Lijun Pan4c5f6af2020-08-19 17:52:23 -0500965 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400966 /* Partuial success, delay and re-send */
967 mdelay(1000);
968 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -0500969 } else if (adapter->init_done_rc) {
970 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
971 adapter->init_done_rc);
972 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400973 }
974 } while (resend);
975
976 return 0;
977}
978
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400979static int set_real_num_queues(struct net_device *netdev)
980{
981 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
982 int rc;
983
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500984 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
985 adapter->req_tx_queues, adapter->req_rx_queues);
986
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400987 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
988 if (rc) {
989 netdev_err(netdev, "failed to set the number of tx queues\n");
990 return rc;
991 }
992
993 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
994 if (rc)
995 netdev_err(netdev, "failed to set the number of rx queues\n");
996
997 return rc;
998}
999
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001000static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1001{
1002 struct device *dev = &adapter->vdev->dev;
1003 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001004 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001005 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001006
1007 if (adapter->vpd->buff)
1008 len = adapter->vpd->len;
1009
Thomas Falconff25dcb2019-11-25 17:12:56 -06001010 mutex_lock(&adapter->fw_lock);
1011 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001012 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001013
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001014 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1015 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001016 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001017 if (rc) {
1018 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001019 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001020 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001021
1022 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1023 if (rc) {
1024 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001025 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001026 return rc;
1027 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001028 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001029
1030 if (!adapter->vpd->len)
1031 return -ENODATA;
1032
1033 if (!adapter->vpd->buff)
1034 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1035 else if (adapter->vpd->len != len)
1036 adapter->vpd->buff =
1037 krealloc(adapter->vpd->buff,
1038 adapter->vpd->len, GFP_KERNEL);
1039
1040 if (!adapter->vpd->buff) {
1041 dev_err(dev, "Could allocate VPD buffer\n");
1042 return -ENOMEM;
1043 }
1044
1045 adapter->vpd->dma_addr =
1046 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1047 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001048 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001049 dev_err(dev, "Could not map VPD buffer\n");
1050 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001051 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001052 return -ENOMEM;
1053 }
1054
Thomas Falconff25dcb2019-11-25 17:12:56 -06001055 mutex_lock(&adapter->fw_lock);
1056 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001057 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001058
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001059 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1060 crq.get_vpd.cmd = GET_VPD;
1061 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1062 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001063 rc = ibmvnic_send_crq(adapter, &crq);
1064 if (rc) {
1065 kfree(adapter->vpd->buff);
1066 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001067 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001068 return rc;
1069 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001070
1071 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1072 if (rc) {
1073 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1074 kfree(adapter->vpd->buff);
1075 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001076 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001077 return rc;
1078 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001079
Thomas Falconff25dcb2019-11-25 17:12:56 -06001080 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001081 return 0;
1082}
1083
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001084static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001085{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001086 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001087 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001088
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001089 rc = set_real_num_queues(netdev);
1090 if (rc)
1091 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001092
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001093 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1094 if (!adapter->vpd)
1095 return -ENOMEM;
1096
John Allen69d08dc2018-01-18 16:27:58 -06001097 /* Vital Product Data (VPD) */
1098 rc = ibmvnic_get_vpd(adapter);
1099 if (rc) {
1100 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1101 return rc;
1102 }
1103
Thomas Falcon032c5e82015-12-21 11:26:06 -06001104 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001105
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001106 rc = init_napi(adapter);
1107 if (rc)
1108 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001109
Thomas Falcon032c5e82015-12-21 11:26:06 -06001110 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001111
1112 rc = init_rx_pools(netdev);
1113 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001114 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001115
Nathan Fontenotc657e322017-03-30 02:49:06 -04001116 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001117 return rc;
1118}
1119
Nathan Fontenoted651a12017-05-03 14:04:38 -04001120static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001121{
1122 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001123 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001124 int i, rc;
1125
Nathan Fontenot90c80142017-05-03 14:04:32 -04001126 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001127 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001128 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001129
Thomas Falcon032c5e82015-12-21 11:26:06 -06001130 /* We're ready to receive frames, enable the sub-crq interrupts and
1131 * set the logical link state to up
1132 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001133 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001134 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001135 if (prev_state == VNIC_CLOSED)
1136 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001137 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001138 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001139
Nathan Fontenoted651a12017-05-03 14:04:38 -04001140 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001141 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001142 if (prev_state == VNIC_CLOSED)
1143 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001144 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001145 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001146
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001147 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001148 if (rc) {
1149 for (i = 0; i < adapter->req_rx_queues; i++)
1150 napi_disable(&adapter->napi[i]);
1151 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001152 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001153 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001154
Nathan Fontenoted651a12017-05-03 14:04:38 -04001155 netif_tx_start_all_queues(netdev);
1156
1157 if (prev_state == VNIC_CLOSED) {
1158 for (i = 0; i < adapter->req_rx_queues; i++)
1159 napi_schedule(&adapter->napi[i]);
1160 }
1161
1162 adapter->state = VNIC_OPEN;
1163 return rc;
1164}
1165
1166static int ibmvnic_open(struct net_device *netdev)
1167{
1168 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001169 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001170
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001171 /* If device failover is pending, just set device state and return.
1172 * Device operation will be handled by reset routine.
1173 */
1174 if (adapter->failover_pending) {
1175 adapter->state = VNIC_OPEN;
1176 return 0;
1177 }
1178
Nathan Fontenoted651a12017-05-03 14:04:38 -04001179 if (adapter->state != VNIC_CLOSED) {
1180 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001181 if (rc)
Nathan Fontenoted651a12017-05-03 14:04:38 -04001182 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001183
1184 rc = init_resources(adapter);
1185 if (rc) {
1186 netdev_err(netdev, "failed to initialize resources\n");
1187 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001188 return rc;
1189 }
1190 }
1191
1192 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001193
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001194 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001195}
1196
Thomas Falcond0869c02018-02-13 18:23:43 -06001197static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1198{
1199 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001200 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001201 u64 rx_entries;
1202 int rx_scrqs;
1203 int i, j;
1204
1205 if (!adapter->rx_pool)
1206 return;
1207
Thomas Falcon660e3092018-04-20 14:25:32 -05001208 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001209 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1210
1211 /* Free any remaining skbs in the rx buffer pools */
1212 for (i = 0; i < rx_scrqs; i++) {
1213 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001214 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001215 continue;
1216
1217 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1218 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001219 rx_buff = &rx_pool->rx_buff[j];
1220 if (rx_buff && rx_buff->skb) {
1221 dev_kfree_skb_any(rx_buff->skb);
1222 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001223 }
1224 }
1225 }
1226}
1227
Thomas Falcone9e1e972018-03-16 20:00:30 -05001228static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1229 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001230{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001231 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001232 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001233 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001234
Dan Carpenter050e85c2018-03-23 14:36:15 +03001235 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001236 return;
1237
1238 tx_entries = tx_pool->num_buffers;
1239
1240 for (i = 0; i < tx_entries; i++) {
1241 tx_buff = &tx_pool->tx_buff[i];
1242 if (tx_buff && tx_buff->skb) {
1243 dev_kfree_skb_any(tx_buff->skb);
1244 tx_buff->skb = NULL;
1245 }
1246 }
1247}
1248
1249static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1250{
1251 int tx_scrqs;
1252 int i;
1253
1254 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001255 return;
1256
Thomas Falcon660e3092018-04-20 14:25:32 -05001257 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001258
1259 /* Free any remaining skbs in the tx buffer pools */
1260 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001261 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001262 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1263 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001264 }
1265}
1266
John Allen6095e592018-03-30 13:44:21 -05001267static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001268{
John Allen6095e592018-03-30 13:44:21 -05001269 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001270 int i;
1271
Nathan Fontenot46293b92017-05-03 14:05:02 -04001272 if (adapter->tx_scrq) {
1273 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001274 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001275 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001276 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001277 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001278 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001279 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001280 }
1281
Nathan Fontenot46293b92017-05-03 14:05:02 -04001282 if (adapter->rx_scrq) {
1283 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001284 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001285 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001286 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001287 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001288 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001289 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001290 }
1291 }
John Allen6095e592018-03-30 13:44:21 -05001292}
1293
1294static void ibmvnic_cleanup(struct net_device *netdev)
1295{
1296 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1297
1298 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001299 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001300 netif_tx_disable(netdev);
1301 else
1302 netif_tx_stop_all_queues(netdev);
1303
1304 ibmvnic_napi_disable(adapter);
1305 ibmvnic_disable_irqs(adapter);
1306
Thomas Falcond0869c02018-02-13 18:23:43 -06001307 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001308 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001309}
1310
1311static int __ibmvnic_close(struct net_device *netdev)
1312{
1313 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1314 int rc = 0;
1315
1316 adapter->state = VNIC_CLOSING;
1317 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1318 if (rc)
1319 return rc;
Nathan Fontenot90c80142017-05-03 14:04:32 -04001320 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001321 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001322}
1323
Nathan Fontenoted651a12017-05-03 14:04:38 -04001324static int ibmvnic_close(struct net_device *netdev)
1325{
1326 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1327 int rc;
1328
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001329 /* If device failover is pending, just set device state and return.
1330 * Device operation will be handled by reset routine.
1331 */
1332 if (adapter->failover_pending) {
1333 adapter->state = VNIC_CLOSED;
1334 return 0;
1335 }
1336
Nathan Fontenoted651a12017-05-03 14:04:38 -04001337 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001338 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001339
1340 return rc;
1341}
1342
Thomas Falconad7775d2016-04-01 17:20:34 -05001343/**
1344 * build_hdr_data - creates L2/L3/L4 header data buffer
1345 * @hdr_field - bitfield determining needed headers
1346 * @skb - socket buffer
1347 * @hdr_len - array of header lengths
1348 * @tot_len - total length of data
1349 *
1350 * Reads hdr_field to determine which headers are needed by firmware.
1351 * Builds a buffer containing these headers. Saves individual header
1352 * lengths and total buffer length to be used to build descriptors.
1353 */
1354static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1355 int *hdr_len, u8 *hdr_data)
1356{
1357 int len = 0;
1358 u8 *hdr;
1359
Thomas Falconda75e3b2018-03-12 11:51:02 -05001360 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1361 hdr_len[0] = sizeof(struct vlan_ethhdr);
1362 else
1363 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001364
1365 if (skb->protocol == htons(ETH_P_IP)) {
1366 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1367 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1368 hdr_len[2] = tcp_hdrlen(skb);
1369 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1370 hdr_len[2] = sizeof(struct udphdr);
1371 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1372 hdr_len[1] = sizeof(struct ipv6hdr);
1373 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1374 hdr_len[2] = tcp_hdrlen(skb);
1375 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1376 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001377 } else if (skb->protocol == htons(ETH_P_ARP)) {
1378 hdr_len[1] = arp_hdr_len(skb->dev);
1379 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001380 }
1381
1382 memset(hdr_data, 0, 120);
1383 if ((hdr_field >> 6) & 1) {
1384 hdr = skb_mac_header(skb);
1385 memcpy(hdr_data, hdr, hdr_len[0]);
1386 len += hdr_len[0];
1387 }
1388
1389 if ((hdr_field >> 5) & 1) {
1390 hdr = skb_network_header(skb);
1391 memcpy(hdr_data + len, hdr, hdr_len[1]);
1392 len += hdr_len[1];
1393 }
1394
1395 if ((hdr_field >> 4) & 1) {
1396 hdr = skb_transport_header(skb);
1397 memcpy(hdr_data + len, hdr, hdr_len[2]);
1398 len += hdr_len[2];
1399 }
1400 return len;
1401}
1402
1403/**
1404 * create_hdr_descs - create header and header extension descriptors
1405 * @hdr_field - bitfield determining needed headers
1406 * @data - buffer containing header data
1407 * @len - length of data buffer
1408 * @hdr_len - array of individual header lengths
1409 * @scrq_arr - descriptor array
1410 *
1411 * Creates header and, if needed, header extension descriptors and
1412 * places them in a descriptor array, scrq_arr
1413 */
1414
Thomas Falcon2de09682017-10-16 10:02:11 -05001415static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1416 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001417{
1418 union sub_crq hdr_desc;
1419 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001420 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001421 u8 *data, *cur;
1422 int tmp;
1423
1424 while (tmp_len > 0) {
1425 cur = hdr_data + len - tmp_len;
1426
1427 memset(&hdr_desc, 0, sizeof(hdr_desc));
1428 if (cur != hdr_data) {
1429 data = hdr_desc.hdr_ext.data;
1430 tmp = tmp_len > 29 ? 29 : tmp_len;
1431 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1432 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1433 hdr_desc.hdr_ext.len = tmp;
1434 } else {
1435 data = hdr_desc.hdr.data;
1436 tmp = tmp_len > 24 ? 24 : tmp_len;
1437 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1438 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1439 hdr_desc.hdr.len = tmp;
1440 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1441 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1442 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1443 hdr_desc.hdr.flag = hdr_field << 1;
1444 }
1445 memcpy(data, cur, tmp);
1446 tmp_len -= tmp;
1447 *scrq_arr = hdr_desc;
1448 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001449 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001450 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001451
1452 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001453}
1454
1455/**
1456 * build_hdr_descs_arr - build a header descriptor array
1457 * @skb - socket buffer
1458 * @num_entries - number of descriptors to be sent
1459 * @subcrq - first TX descriptor
1460 * @hdr_field - bit field determining which headers will be sent
1461 *
1462 * This function will build a TX descriptor array with applicable
1463 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1464 */
1465
1466static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1467 int *num_entries, u8 hdr_field)
1468{
1469 int hdr_len[3] = {0, 0, 0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001470 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001471 u8 *hdr_data = txbuff->hdr_data;
1472
1473 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1474 txbuff->hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001475 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconad7775d2016-04-01 17:20:34 -05001476 txbuff->indir_arr + 1);
1477}
1478
Thomas Falcon1f247a62018-03-12 11:51:04 -05001479static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1480 struct net_device *netdev)
1481{
1482 /* For some backing devices, mishandling of small packets
1483 * can result in a loss of connection or TX stall. Device
1484 * architects recommend that no packet should be smaller
1485 * than the minimum MTU value provided to the driver, so
1486 * pad any packets to that length
1487 */
1488 if (skb->len < netdev->min_mtu)
1489 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001490
1491 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001492}
1493
YueHaibing94b2bb22018-09-18 14:35:47 +08001494static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001495{
1496 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1497 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001498 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001499 struct device *dev = &adapter->vdev->dev;
1500 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001501 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001502 struct ibmvnic_tx_pool *tx_pool;
1503 unsigned int tx_send_failed = 0;
1504 unsigned int tx_map_failed = 0;
1505 unsigned int tx_dropped = 0;
1506 unsigned int tx_packets = 0;
1507 unsigned int tx_bytes = 0;
1508 dma_addr_t data_dma_addr;
1509 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001510 unsigned long lpar_rc;
1511 union sub_crq tx_crq;
1512 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001513 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001514 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001515 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001516 u8 proto = 0;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001517 u64 handle;
YueHaibing94b2bb22018-09-18 14:35:47 +08001518 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001519
Juliet Kim7ed5b312019-09-20 16:11:23 -04001520 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001521 if (!netif_subqueue_stopped(netdev, skb))
1522 netif_stop_subqueue(netdev, queue_num);
1523 dev_kfree_skb_any(skb);
1524
Thomas Falcon032c5e82015-12-21 11:26:06 -06001525 tx_send_failed++;
1526 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001527 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001528 goto out;
1529 }
1530
Thomas Falcon7083a452018-03-12 21:05:26 -05001531 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001532 tx_dropped++;
1533 tx_send_failed++;
1534 ret = NETDEV_TX_OK;
1535 goto out;
1536 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001537 if (skb_is_gso(skb))
1538 tx_pool = &adapter->tso_pool[queue_num];
1539 else
1540 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001541
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001542 tx_scrq = adapter->tx_scrq[queue_num];
1543 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001544 handle = tx_scrq->handle;
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001545
Thomas Falcon032c5e82015-12-21 11:26:06 -06001546 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001547
Thomas Falcon86b61a52018-03-16 20:00:29 -05001548 if (index == IBMVNIC_INVALID_MAP) {
1549 dev_kfree_skb_any(skb);
1550 tx_send_failed++;
1551 tx_dropped++;
1552 ret = NETDEV_TX_OK;
1553 goto out;
1554 }
1555
1556 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1557
Thomas Falcon06b3e352018-03-16 20:00:28 -05001558 offset = index * tx_pool->buf_size;
1559 dst = tx_pool->long_term_buff.buff + offset;
1560 memset(dst, 0, tx_pool->buf_size);
1561 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001562
Thomas Falcon15482052017-10-17 12:36:54 -05001563 if (skb_shinfo(skb)->nr_frags) {
1564 int cur, i;
1565
1566 /* Copy the head */
1567 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1568 cur = skb_headlen(skb);
1569
1570 /* Copy the frags */
1571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1572 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1573
1574 memcpy(dst + cur,
1575 page_address(skb_frag_page(frag)) +
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07001576 skb_frag_off(frag), skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001577 cur += skb_frag_size(frag);
1578 }
1579 } else {
1580 skb_copy_from_linear_data(skb, dst, skb->len);
1581 }
1582
Thomas Falcon032c5e82015-12-21 11:26:06 -06001583 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001584 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001585
1586 tx_buff = &tx_pool->tx_buff[index];
1587 tx_buff->skb = skb;
1588 tx_buff->data_dma[0] = data_dma_addr;
1589 tx_buff->data_len[0] = skb->len;
1590 tx_buff->index = index;
1591 tx_buff->pool_index = queue_num;
1592 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001593
1594 memset(&tx_crq, 0, sizeof(tx_crq));
1595 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1596 tx_crq.v1.type = IBMVNIC_TX_DESC;
1597 tx_crq.v1.n_crq_elem = 1;
1598 tx_crq.v1.n_sge = 1;
1599 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001600
Thomas Falconfdb06102017-10-17 12:36:55 -05001601 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001602 tx_crq.v1.correlator =
1603 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001604 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001605 tx_crq.v1.correlator = cpu_to_be32(index);
1606 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001607 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1608 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1609
Michał Mirosławe84b4792018-11-07 17:50:52 +01001610 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001611 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1612 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1613 }
1614
1615 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001616 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1617 proto = ip_hdr(skb)->protocol;
1618 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1619 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1620 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001621 }
1622
Thomas Falcona0dca102018-01-18 19:29:48 -06001623 if (proto == IPPROTO_TCP)
1624 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1625 else if (proto == IPPROTO_UDP)
1626 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1627
Thomas Falconad7775d2016-04-01 17:20:34 -05001628 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001629 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001630 hdrs += 2;
1631 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001632 if (skb_is_gso(skb)) {
1633 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1634 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1635 hdrs += 2;
1636 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001637 /* determine if l2/3/4 headers are sent to firmware */
John Allen2fa56a42018-02-09 13:19:46 -06001638 if ((*hdrs >> 7) & 1) {
Thomas Falconad7775d2016-04-01 17:20:34 -05001639 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1640 tx_crq.v1.n_crq_elem = num_entries;
Thomas Falconecba6162018-02-26 18:10:55 -06001641 tx_buff->num_entries = num_entries;
Thomas Falconad7775d2016-04-01 17:20:34 -05001642 tx_buff->indir_arr[0] = tx_crq;
1643 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1644 sizeof(tx_buff->indir_arr),
1645 DMA_TO_DEVICE);
1646 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001647 dev_kfree_skb_any(skb);
1648 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001649 if (!firmware_has_feature(FW_FEATURE_CMO))
1650 dev_err(dev, "tx: unable to map descriptor array\n");
1651 tx_map_failed++;
1652 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001653 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001654 goto tx_err_out;
Thomas Falconad7775d2016-04-01 17:20:34 -05001655 }
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001656 lpar_rc = send_subcrq_indirect(adapter, handle,
Thomas Falconad7775d2016-04-01 17:20:34 -05001657 (u64)tx_buff->indir_dma,
1658 (u64)num_entries);
Thomas Falcon80f0fe02019-08-14 14:57:05 -05001659 dma_unmap_single(dev, tx_buff->indir_dma,
1660 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
Thomas Falconad7775d2016-04-01 17:20:34 -05001661 } else {
Thomas Falconecba6162018-02-26 18:10:55 -06001662 tx_buff->num_entries = num_entries;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05001663 lpar_rc = send_subcrq(adapter, handle,
John Allen498cd8e2016-04-06 11:49:55 -05001664 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001665 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001666 if (lpar_rc != H_SUCCESS) {
Thomas Falcon2d14d372018-07-13 12:03:32 -05001667 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1668 dev_err_ratelimited(dev, "tx: send failed\n");
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001669 dev_kfree_skb_any(skb);
1670 tx_buff->skb = NULL;
1671
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001672 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1673 /* Disable TX and report carrier off if queue is closed
1674 * or pending failover.
Thomas Falconb8c80b82017-05-26 10:30:42 -04001675 * Firmware guarantees that a signal will be sent to the
1676 * driver, triggering a reset or some other action.
1677 */
1678 netif_tx_stop_all_queues(netdev);
1679 netif_carrier_off(netdev);
1680 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001681
Thomas Falcon032c5e82015-12-21 11:26:06 -06001682 tx_send_failed++;
1683 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001684 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001685 goto tx_err_out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001686 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001687
Thomas Falconffc385b2018-02-18 10:08:41 -06001688 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001689 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001690 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001691 netif_stop_subqueue(netdev, queue_num);
1692 }
1693
Thomas Falcon032c5e82015-12-21 11:26:06 -06001694 tx_packets++;
1695 tx_bytes += skb->len;
1696 txq->trans_start = jiffies;
1697 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001698 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001699
Thomas Falcon86b61a52018-03-16 20:00:29 -05001700tx_err_out:
1701 /* roll back consumer index and map array*/
1702 if (tx_pool->consumer_index == 0)
1703 tx_pool->consumer_index =
1704 tx_pool->num_buffers - 1;
1705 else
1706 tx_pool->consumer_index--;
1707 tx_pool->free_map[tx_pool->consumer_index] = index;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001708out:
1709 netdev->stats.tx_dropped += tx_dropped;
1710 netdev->stats.tx_bytes += tx_bytes;
1711 netdev->stats.tx_packets += tx_packets;
1712 adapter->tx_send_failed += tx_send_failed;
1713 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001714 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1715 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1716 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001717
1718 return ret;
1719}
1720
1721static void ibmvnic_set_multi(struct net_device *netdev)
1722{
1723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1724 struct netdev_hw_addr *ha;
1725 union ibmvnic_crq crq;
1726
1727 memset(&crq, 0, sizeof(crq));
1728 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1729 crq.request_capability.cmd = REQUEST_CAPABILITY;
1730
1731 if (netdev->flags & IFF_PROMISC) {
1732 if (!adapter->promisc_supported)
1733 return;
1734 } else {
1735 if (netdev->flags & IFF_ALLMULTI) {
1736 /* Accept all multicast */
1737 memset(&crq, 0, sizeof(crq));
1738 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1739 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1740 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1741 ibmvnic_send_crq(adapter, &crq);
1742 } else if (netdev_mc_empty(netdev)) {
1743 /* Reject all multicast */
1744 memset(&crq, 0, sizeof(crq));
1745 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1746 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1747 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1748 ibmvnic_send_crq(adapter, &crq);
1749 } else {
1750 /* Accept one or more multicast(s) */
1751 netdev_for_each_mc_addr(ha, netdev) {
1752 memset(&crq, 0, sizeof(crq));
1753 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1754 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1755 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1756 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1757 ha->addr);
1758 ibmvnic_send_crq(adapter, &crq);
1759 }
1760 }
1761 }
1762}
1763
Thomas Falcon62740e92019-05-09 23:13:43 -05001764static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001765{
1766 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001767 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001768 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001769
Thomas Falcon62740e92019-05-09 23:13:43 -05001770 if (!is_valid_ether_addr(dev_addr)) {
1771 rc = -EADDRNOTAVAIL;
1772 goto err;
1773 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001774
1775 memset(&crq, 0, sizeof(crq));
1776 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1777 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05001778 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06001779
Thomas Falconff25dcb2019-11-25 17:12:56 -06001780 mutex_lock(&adapter->fw_lock);
1781 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001782 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001783
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001784 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05001785 if (rc) {
1786 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001787 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001788 goto err;
1789 }
1790
Thomas Falcon476d96c2019-11-25 17:12:55 -06001791 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001792 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06001793 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05001794 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001795 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001796 goto err;
1797 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001798 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001799 return 0;
1800err:
1801 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1802 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001803}
1804
John Allenc26eba02017-10-26 16:23:25 -05001805static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1806{
1807 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1808 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001809 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001810
Thomas Falcon62740e92019-05-09 23:13:43 -05001811 rc = 0;
1812 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1813 if (adapter->state != VNIC_PROBED)
1814 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
John Allenc26eba02017-10-26 16:23:25 -05001815
Thomas Falconf8136142018-01-29 13:45:05 -06001816 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001817}
1818
Nathan Fontenoted651a12017-05-03 14:04:38 -04001819/**
Juliet Kimb27507b2019-09-20 16:11:22 -04001820 * do_change_param_reset returns zero if we are able to keep processing reset
1821 * events, or non-zero if we hit a fatal error and must halt.
1822 */
1823static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1824 struct ibmvnic_rwi *rwi,
1825 u32 reset_state)
1826{
1827 struct net_device *netdev = adapter->netdev;
1828 int i, rc;
1829
1830 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1831 rwi->reset_reason);
1832
1833 netif_carrier_off(netdev);
1834 adapter->reset_reason = rwi->reset_reason;
1835
1836 ibmvnic_cleanup(netdev);
1837
1838 if (reset_state == VNIC_OPEN) {
1839 rc = __ibmvnic_close(netdev);
1840 if (rc)
1841 return rc;
1842 }
1843
1844 release_resources(adapter);
1845 release_sub_crqs(adapter, 1);
1846 release_crq_queue(adapter);
1847
1848 adapter->state = VNIC_PROBED;
1849
1850 rc = init_crq_queue(adapter);
1851
1852 if (rc) {
1853 netdev_err(adapter->netdev,
1854 "Couldn't initialize crq. rc=%d\n", rc);
1855 return rc;
1856 }
1857
Lijun Pan635e4422020-08-19 17:52:26 -05001858 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04001859 if (rc)
1860 return IBMVNIC_INIT_FAILED;
1861
1862 /* If the adapter was in PROBE state prior to the reset,
1863 * exit here.
1864 */
1865 if (reset_state == VNIC_PROBED)
1866 return 0;
1867
1868 rc = ibmvnic_login(netdev);
1869 if (rc) {
1870 adapter->state = reset_state;
1871 return rc;
1872 }
1873
1874 rc = init_resources(adapter);
1875 if (rc)
1876 return rc;
1877
1878 ibmvnic_disable_irqs(adapter);
1879
1880 adapter->state = VNIC_CLOSED;
1881
1882 if (reset_state == VNIC_CLOSED)
1883 return 0;
1884
1885 rc = __ibmvnic_open(netdev);
1886 if (rc)
1887 return IBMVNIC_OPEN_FAILED;
1888
1889 /* refresh device's multicast list */
1890 ibmvnic_set_multi(netdev);
1891
1892 /* kick napi */
1893 for (i = 0; i < adapter->req_rx_queues; i++)
1894 napi_schedule(&adapter->napi[i]);
1895
1896 return 0;
1897}
1898
1899/**
Nathan Fontenoted651a12017-05-03 14:04:38 -04001900 * do_reset returns zero if we are able to keep processing reset events, or
1901 * non-zero if we hit a fatal error and must halt.
1902 */
1903static int do_reset(struct ibmvnic_adapter *adapter,
1904 struct ibmvnic_rwi *rwi, u32 reset_state)
1905{
John Allen896d8692018-01-18 16:26:31 -06001906 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001907 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001908 struct net_device *netdev = adapter->netdev;
1909 int i, rc;
1910
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001911 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1912 rwi->reset_reason);
1913
Juliet Kimb27507b2019-09-20 16:11:22 -04001914 rtnl_lock();
1915
Nathan Fontenoted651a12017-05-03 14:04:38 -04001916 netif_carrier_off(netdev);
1917 adapter->reset_reason = rwi->reset_reason;
1918
John Allen896d8692018-01-18 16:26:31 -06001919 old_num_rx_queues = adapter->req_rx_queues;
1920 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001921 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1922 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06001923
Nathan Fontenot30f79622018-04-06 18:37:06 -05001924 ibmvnic_cleanup(netdev);
1925
Thomas Falcon1f946082019-06-07 16:03:53 -05001926 if (reset_state == VNIC_OPEN &&
1927 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05001928 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Juliet Kimb27507b2019-09-20 16:11:22 -04001929 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001930
Juliet Kimb27507b2019-09-20 16:11:22 -04001931 /* Release the RTNL lock before link state change and
1932 * re-acquire after the link state change to allow
1933 * linkwatch_event to grab the RTNL lock and run during
1934 * a reset.
1935 */
1936 rtnl_unlock();
1937 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1938 rtnl_lock();
1939 if (rc)
1940 goto out;
1941
1942 if (adapter->state != VNIC_CLOSING) {
1943 rc = -1;
1944 goto out;
1945 }
1946
1947 adapter->state = VNIC_CLOSED;
John Allenc26eba02017-10-26 16:23:25 -05001948 }
1949
John Allen8cb31cf2017-05-26 10:30:37 -04001950 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1951 /* remove the closed state so when we call open it appears
1952 * we are coming from the probed state.
1953 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001954 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04001955
Juliet Kimb27507b2019-09-20 16:11:22 -04001956 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001957 rc = ibmvnic_reenable_crq_queue(adapter);
1958 release_sub_crqs(adapter, 1);
1959 } else {
1960 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04001961 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001962 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04001963 if (rc)
1964 netdev_err(adapter->netdev,
1965 "Reset failed to enable interrupts. rc=%d\n",
1966 rc);
1967 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05001968 }
1969
1970 if (rc) {
1971 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04001972 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04001973 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05001974 }
1975
Lijun Pan635e4422020-08-19 17:52:26 -05001976 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04001977 if (rc) {
1978 rc = IBMVNIC_INIT_FAILED;
1979 goto out;
1980 }
John Allen8cb31cf2017-05-26 10:30:37 -04001981
1982 /* If the adapter was in PROBE state prior to the reset,
1983 * exit here.
1984 */
Juliet Kimb27507b2019-09-20 16:11:22 -04001985 if (reset_state == VNIC_PROBED) {
1986 rc = 0;
1987 goto out;
1988 }
John Allen8cb31cf2017-05-26 10:30:37 -04001989
1990 rc = ibmvnic_login(netdev);
1991 if (rc) {
John Allen3578a7e2018-07-16 10:29:30 -05001992 adapter->state = reset_state;
Juliet Kimb27507b2019-09-20 16:11:22 -04001993 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04001994 }
1995
Juliet Kimb27507b2019-09-20 16:11:22 -04001996 if (adapter->req_rx_queues != old_num_rx_queues ||
1997 adapter->req_tx_queues != old_num_tx_queues ||
1998 adapter->req_rx_add_entries_per_subcrq !=
1999 old_num_rx_slots ||
2000 adapter->req_tx_entries_per_subcrq !=
2001 old_num_tx_slots) {
John Allen896d8692018-01-18 16:26:31 -06002002 release_rx_pools(adapter);
2003 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06002004 release_napi(adapter);
2005 release_vpd_data(adapter);
2006
2007 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002008 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002009 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002010
John Allenc26eba02017-10-26 16:23:25 -05002011 } else {
2012 rc = reset_tx_pools(adapter);
2013 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002014 goto out;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002015
John Allenc26eba02017-10-26 16:23:25 -05002016 rc = reset_rx_pools(adapter);
2017 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002018 goto out;
John Allenc26eba02017-10-26 16:23:25 -05002019 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002020 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002021 }
John Allene676d812018-03-14 10:41:29 -05002022 adapter->state = VNIC_CLOSED;
2023
Juliet Kimb27507b2019-09-20 16:11:22 -04002024 if (reset_state == VNIC_CLOSED) {
2025 rc = 0;
2026 goto out;
2027 }
John Allene676d812018-03-14 10:41:29 -05002028
Nathan Fontenoted651a12017-05-03 14:04:38 -04002029 rc = __ibmvnic_open(netdev);
2030 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002031 rc = IBMVNIC_OPEN_FAILED;
2032 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002033 }
2034
Thomas Falconbe32a242019-06-07 16:03:54 -05002035 /* refresh device's multicast list */
2036 ibmvnic_set_multi(netdev);
2037
Nathan Fontenoted651a12017-05-03 14:04:38 -04002038 /* kick napi */
2039 for (i = 0; i < adapter->req_rx_queues; i++)
2040 napi_schedule(&adapter->napi[i]);
2041
Juliet Kimb27507b2019-09-20 16:11:22 -04002042 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
Thomas Falcon986103e2018-11-30 10:59:08 -06002043 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002044
Juliet Kimb27507b2019-09-20 16:11:22 -04002045 rc = 0;
2046
2047out:
2048 rtnl_unlock();
2049
2050 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002051}
2052
Thomas Falcon2770a792018-05-23 13:38:02 -05002053static int do_hard_reset(struct ibmvnic_adapter *adapter,
2054 struct ibmvnic_rwi *rwi, u32 reset_state)
2055{
2056 struct net_device *netdev = adapter->netdev;
2057 int rc;
2058
2059 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2060 rwi->reset_reason);
2061
2062 netif_carrier_off(netdev);
2063 adapter->reset_reason = rwi->reset_reason;
2064
2065 ibmvnic_cleanup(netdev);
2066 release_resources(adapter);
2067 release_sub_crqs(adapter, 0);
2068 release_crq_queue(adapter);
2069
2070 /* remove the closed state so when we call open it appears
2071 * we are coming from the probed state.
2072 */
2073 adapter->state = VNIC_PROBED;
2074
Thomas Falconbbd669a2019-04-04 18:58:26 -05002075 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002076 rc = init_crq_queue(adapter);
2077 if (rc) {
2078 netdev_err(adapter->netdev,
2079 "Couldn't initialize crq. rc=%d\n", rc);
2080 return rc;
2081 }
2082
Lijun Pan635e4422020-08-19 17:52:26 -05002083 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002084 if (rc)
2085 return rc;
2086
2087 /* If the adapter was in PROBE state prior to the reset,
2088 * exit here.
2089 */
2090 if (reset_state == VNIC_PROBED)
2091 return 0;
2092
2093 rc = ibmvnic_login(netdev);
2094 if (rc) {
2095 adapter->state = VNIC_PROBED;
2096 return 0;
2097 }
Juliet Kima5681e22018-11-19 15:59:22 -06002098
2099 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002100 if (rc)
2101 return rc;
2102
2103 ibmvnic_disable_irqs(adapter);
2104 adapter->state = VNIC_CLOSED;
2105
2106 if (reset_state == VNIC_CLOSED)
2107 return 0;
2108
2109 rc = __ibmvnic_open(netdev);
Juliet Kimb27507b2019-09-20 16:11:22 -04002110 if (rc)
2111 return IBMVNIC_OPEN_FAILED;
Thomas Falcon2770a792018-05-23 13:38:02 -05002112
Thomas Falcon2770a792018-05-23 13:38:02 -05002113 return 0;
2114}
2115
Nathan Fontenoted651a12017-05-03 14:04:38 -04002116static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2117{
2118 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002119 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002120
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002121 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002122
2123 if (!list_empty(&adapter->rwi_list)) {
2124 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2125 list);
2126 list_del(&rwi->list);
2127 } else {
2128 rwi = NULL;
2129 }
2130
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002131 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002132 return rwi;
2133}
2134
2135static void free_all_rwi(struct ibmvnic_adapter *adapter)
2136{
2137 struct ibmvnic_rwi *rwi;
2138
2139 rwi = get_next_rwi(adapter);
2140 while (rwi) {
2141 kfree(rwi);
2142 rwi = get_next_rwi(adapter);
2143 }
2144}
2145
2146static void __ibmvnic_reset(struct work_struct *work)
2147{
2148 struct ibmvnic_rwi *rwi;
2149 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002150 bool saved_state = false;
2151 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002152 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002153 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002154
2155 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002156
Juliet Kim7ed5b312019-09-20 16:11:23 -04002157 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2158 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2159 IBMVNIC_RESET_DELAY);
2160 return;
2161 }
2162
Nathan Fontenoted651a12017-05-03 14:04:38 -04002163 rwi = get_next_rwi(adapter);
2164 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002165 spin_lock_irqsave(&adapter->state_lock, flags);
2166
Thomas Falcon36f10312019-08-27 11:10:04 -05002167 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002168 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002169 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002170 kfree(rwi);
2171 rc = EBUSY;
2172 break;
2173 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002174
Juliet Kim7d7195a2020-03-10 09:23:58 -05002175 if (!saved_state) {
2176 reset_state = adapter->state;
2177 adapter->state = VNIC_RESETTING;
2178 saved_state = true;
2179 }
2180 spin_unlock_irqrestore(&adapter->state_lock, flags);
2181
Juliet Kimb27507b2019-09-20 16:11:22 -04002182 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2183 /* CHANGE_PARAM requestor holds rtnl_lock */
2184 rc = do_change_param_reset(adapter, rwi, reset_state);
2185 } else if (adapter->force_reset_recovery) {
2186 /* Transport event occurred during previous reset */
2187 if (adapter->wait_for_reset) {
2188 /* Previous was CHANGE_PARAM; caller locked */
2189 adapter->force_reset_recovery = false;
2190 rc = do_hard_reset(adapter, rwi, reset_state);
2191 } else {
2192 rtnl_lock();
2193 adapter->force_reset_recovery = false;
2194 rc = do_hard_reset(adapter, rwi, reset_state);
2195 rtnl_unlock();
2196 }
Juliet Kimf9c6cea2020-04-30 13:22:11 -05002197 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2198 adapter->from_passive_init)) {
Thomas Falcon2770a792018-05-23 13:38:02 -05002199 rc = do_reset(adapter, rwi, reset_state);
2200 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002201 kfree(rwi);
Juliet Kimb27507b2019-09-20 16:11:22 -04002202 if (rc == IBMVNIC_OPEN_FAILED) {
2203 if (list_empty(&adapter->rwi_list))
2204 adapter->state = VNIC_CLOSED;
2205 else
2206 adapter->state = reset_state;
2207 rc = 0;
2208 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
Thomas Falcon2770a792018-05-23 13:38:02 -05002209 !adapter->force_reset_recovery)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002210 break;
2211
2212 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002213
2214 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2215 rwi->reset_reason == VNIC_RESET_MOBILITY))
2216 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002217 }
2218
John Allenc26eba02017-10-26 16:23:25 -05002219 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002220 adapter->reset_done_rc = rc;
2221 complete(&adapter->reset_done);
2222 }
2223
Nathan Fontenoted651a12017-05-03 14:04:38 -04002224 if (rc) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002225 netdev_dbg(adapter->netdev, "Reset failed\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04002226 free_all_rwi(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002227 }
Juliet Kim1c2977c2019-09-05 17:30:01 -04002228
Juliet Kim7ed5b312019-09-20 16:11:23 -04002229 clear_bit_unlock(0, &adapter->resetting);
2230}
2231
2232static void __ibmvnic_delayed_reset(struct work_struct *work)
2233{
2234 struct ibmvnic_adapter *adapter;
2235
2236 adapter = container_of(work, struct ibmvnic_adapter,
2237 ibmvnic_delayed_reset.work);
2238 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002239}
2240
Thomas Falconaf894d22018-04-06 18:37:04 -05002241static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2242 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002243{
Thomas Falcon2770a792018-05-23 13:38:02 -05002244 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002245 struct ibmvnic_rwi *rwi, *tmp;
2246 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002247 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002248 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002249
2250 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002251 adapter->state == VNIC_REMOVED ||
2252 adapter->failover_pending) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002253 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002254 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002255 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002256 }
2257
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002258 if (adapter->state == VNIC_PROBING) {
2259 netdev_warn(netdev, "Adapter reset during probe\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002260 ret = adapter->init_done_rc = EAGAIN;
2261 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002262 }
2263
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002264 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002265
2266 list_for_each(entry, &adapter->rwi_list) {
2267 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2268 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002269 netdev_dbg(netdev, "Skipping matching reset\n");
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002270 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falconaf894d22018-04-06 18:37:04 -05002271 ret = EBUSY;
2272 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002273 }
2274 }
2275
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002276 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002277 if (!rwi) {
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002278 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002279 ibmvnic_close(netdev);
Thomas Falconaf894d22018-04-06 18:37:04 -05002280 ret = ENOMEM;
2281 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002282 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002283 /* if we just received a transport event,
2284 * flush reset queue and process this reset
2285 */
2286 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2287 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2288 list_del(entry);
2289 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002290 rwi->reset_reason = reason;
2291 list_add_tail(&rwi->list, &adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002292 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002293 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002294 schedule_work(&adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002295
2296 return 0;
2297err:
Thomas Falconaf894d22018-04-06 18:37:04 -05002298 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002299}
2300
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002301static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002302{
2303 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002304
Nathan Fontenoted651a12017-05-03 14:04:38 -04002305 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002306}
2307
2308static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2309 struct ibmvnic_rx_buff *rx_buff)
2310{
2311 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2312
2313 rx_buff->skb = NULL;
2314
2315 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2316 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2317
2318 atomic_dec(&pool->available);
2319}
2320
2321static int ibmvnic_poll(struct napi_struct *napi, int budget)
2322{
2323 struct net_device *netdev = napi->dev;
2324 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2325 int scrq_num = (int)(napi - adapter->napi);
2326 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04002327
Thomas Falcon032c5e82015-12-21 11:26:06 -06002328restart_poll:
2329 while (frames_processed < budget) {
2330 struct sk_buff *skb;
2331 struct ibmvnic_rx_buff *rx_buff;
2332 union sub_crq *next;
2333 u32 length;
2334 u16 offset;
2335 u8 flags = 0;
2336
Juliet Kim7ed5b312019-09-20 16:11:23 -04002337 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002338 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05002339 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2340 napi_complete_done(napi, frames_processed);
2341 return frames_processed;
2342 }
2343
Thomas Falcon032c5e82015-12-21 11:26:06 -06002344 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2345 break;
2346 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2347 rx_buff =
2348 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2349 rx_comp.correlator);
2350 /* do error checking */
2351 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002352 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2353 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002354 /* free the entry */
2355 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002356 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002357 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002358 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002359 } else if (!rx_buff->skb) {
2360 /* free the entry */
2361 next->rx_comp.first = 0;
2362 remove_buff_from_pool(adapter, rx_buff);
2363 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002364 }
2365
2366 length = be32_to_cpu(next->rx_comp.len);
2367 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2368 flags = next->rx_comp.flags;
2369 skb = rx_buff->skb;
2370 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2371 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002372
2373 /* VLAN Header has been stripped by the system firmware and
2374 * needs to be inserted by the driver
2375 */
2376 if (adapter->rx_vlan_header_insertion &&
2377 (flags & IBMVNIC_VLAN_STRIPPED))
2378 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2379 ntohs(next->rx_comp.vlan_tci));
2380
Thomas Falcon032c5e82015-12-21 11:26:06 -06002381 /* free the entry */
2382 next->rx_comp.first = 0;
2383 remove_buff_from_pool(adapter, rx_buff);
2384
2385 skb_put(skb, length);
2386 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002387 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002388
2389 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2390 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2391 skb->ip_summed = CHECKSUM_UNNECESSARY;
2392 }
2393
2394 length = skb->len;
2395 napi_gro_receive(napi, skb); /* send it up */
2396 netdev->stats.rx_packets++;
2397 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002398 adapter->rx_stats_buffers[scrq_num].packets++;
2399 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002400 frames_processed++;
2401 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002402
2403 if (adapter->state != VNIC_CLOSING)
2404 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002405
2406 if (frames_processed < budget) {
2407 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002408 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002409 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2410 napi_reschedule(napi)) {
2411 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2412 goto restart_poll;
2413 }
2414 }
2415 return frames_processed;
2416}
2417
John Allenc26eba02017-10-26 16:23:25 -05002418static int wait_for_reset(struct ibmvnic_adapter *adapter)
2419{
Thomas Falconaf894d22018-04-06 18:37:04 -05002420 int rc, ret;
2421
John Allenc26eba02017-10-26 16:23:25 -05002422 adapter->fallback.mtu = adapter->req_mtu;
2423 adapter->fallback.rx_queues = adapter->req_rx_queues;
2424 adapter->fallback.tx_queues = adapter->req_tx_queues;
2425 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2426 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2427
Thomas Falcon070eca92019-11-25 17:12:53 -06002428 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002429 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002430 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002431
2432 if (rc) {
2433 ret = rc;
2434 goto out;
2435 }
2436 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2437 if (rc) {
2438 ret = -ENODEV;
2439 goto out;
2440 }
John Allenc26eba02017-10-26 16:23:25 -05002441
Thomas Falconaf894d22018-04-06 18:37:04 -05002442 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002443 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002444 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002445 adapter->desired.mtu = adapter->fallback.mtu;
2446 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2447 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2448 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2449 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2450
Thomas Falcon070eca92019-11-25 17:12:53 -06002451 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002452 adapter->wait_for_reset = true;
2453 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002454 if (rc) {
2455 ret = rc;
2456 goto out;
2457 }
2458 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2459 60000);
2460 if (rc) {
2461 ret = -ENODEV;
2462 goto out;
2463 }
John Allenc26eba02017-10-26 16:23:25 -05002464 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002465out:
John Allenc26eba02017-10-26 16:23:25 -05002466 adapter->wait_for_reset = false;
2467
Thomas Falconaf894d22018-04-06 18:37:04 -05002468 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002469}
2470
John Allen3a807b72017-06-06 16:55:52 -05002471static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2472{
John Allenc26eba02017-10-26 16:23:25 -05002473 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2474
2475 adapter->desired.mtu = new_mtu + ETH_HLEN;
2476
2477 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002478}
2479
Thomas Falconf10b09e2018-03-12 11:51:05 -05002480static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2481 struct net_device *dev,
2482 netdev_features_t features)
2483{
2484 /* Some backing hardware adapters can not
2485 * handle packets with a MSS less than 224
2486 * or with only one segment.
2487 */
2488 if (skb_is_gso(skb)) {
2489 if (skb_shinfo(skb)->gso_size < 224 ||
2490 skb_shinfo(skb)->gso_segs == 1)
2491 features &= ~NETIF_F_GSO_MASK;
2492 }
2493
2494 return features;
2495}
2496
Thomas Falcon032c5e82015-12-21 11:26:06 -06002497static const struct net_device_ops ibmvnic_netdev_ops = {
2498 .ndo_open = ibmvnic_open,
2499 .ndo_stop = ibmvnic_close,
2500 .ndo_start_xmit = ibmvnic_xmit,
2501 .ndo_set_rx_mode = ibmvnic_set_multi,
2502 .ndo_set_mac_address = ibmvnic_set_mac,
2503 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002504 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002505 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002506 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002507};
2508
2509/* ethtool functions */
2510
Philippe Reynes8a433792017-01-07 22:37:29 +01002511static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2512 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002513{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002514 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2515 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002516
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002517 rc = send_query_phys_parms(adapter);
2518 if (rc) {
2519 adapter->speed = SPEED_UNKNOWN;
2520 adapter->duplex = DUPLEX_UNKNOWN;
2521 }
2522 cmd->base.speed = adapter->speed;
2523 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002524 cmd->base.port = PORT_FIBRE;
2525 cmd->base.phy_address = 0;
2526 cmd->base.autoneg = AUTONEG_ENABLE;
2527
Thomas Falcon032c5e82015-12-21 11:26:06 -06002528 return 0;
2529}
2530
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002531static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002532 struct ethtool_drvinfo *info)
2533{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002534 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2535
Thomas Falcon032c5e82015-12-21 11:26:06 -06002536 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2537 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002538 strlcpy(info->fw_version, adapter->fw_version,
2539 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002540}
2541
2542static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2543{
2544 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2545
2546 return adapter->msg_enable;
2547}
2548
2549static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2550{
2551 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2552
2553 adapter->msg_enable = data;
2554}
2555
2556static u32 ibmvnic_get_link(struct net_device *netdev)
2557{
2558 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2559
2560 /* Don't need to send a query because we request a logical link up at
2561 * init and then we wait for link state indications
2562 */
2563 return adapter->logical_link_state;
2564}
2565
2566static void ibmvnic_get_ringparam(struct net_device *netdev,
2567 struct ethtool_ringparam *ring)
2568{
John Allenbc131b32017-08-02 16:46:30 -05002569 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2570
Thomas Falcon723ad912018-09-28 18:38:26 -05002571 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2572 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2573 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2574 } else {
2575 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2576 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2577 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002578 ring->rx_mini_max_pending = 0;
2579 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002580 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2581 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002582 ring->rx_mini_pending = 0;
2583 ring->rx_jumbo_pending = 0;
2584}
2585
John Allenc26eba02017-10-26 16:23:25 -05002586static int ibmvnic_set_ringparam(struct net_device *netdev,
2587 struct ethtool_ringparam *ring)
2588{
2589 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002590 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002591
Thomas Falcon723ad912018-09-28 18:38:26 -05002592 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002593 adapter->desired.rx_entries = ring->rx_pending;
2594 adapter->desired.tx_entries = ring->tx_pending;
2595
Thomas Falcon723ad912018-09-28 18:38:26 -05002596 ret = wait_for_reset(adapter);
2597
2598 if (!ret &&
2599 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2600 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2601 netdev_info(netdev,
2602 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2603 ring->rx_pending, ring->tx_pending,
2604 adapter->req_rx_add_entries_per_subcrq,
2605 adapter->req_tx_entries_per_subcrq);
2606 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002607}
2608
John Allenc2dbeb62017-08-02 16:47:17 -05002609static void ibmvnic_get_channels(struct net_device *netdev,
2610 struct ethtool_channels *channels)
2611{
2612 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2613
Thomas Falcon723ad912018-09-28 18:38:26 -05002614 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2615 channels->max_rx = adapter->max_rx_queues;
2616 channels->max_tx = adapter->max_tx_queues;
2617 } else {
2618 channels->max_rx = IBMVNIC_MAX_QUEUES;
2619 channels->max_tx = IBMVNIC_MAX_QUEUES;
2620 }
2621
John Allenc2dbeb62017-08-02 16:47:17 -05002622 channels->max_other = 0;
2623 channels->max_combined = 0;
2624 channels->rx_count = adapter->req_rx_queues;
2625 channels->tx_count = adapter->req_tx_queues;
2626 channels->other_count = 0;
2627 channels->combined_count = 0;
2628}
2629
John Allenc26eba02017-10-26 16:23:25 -05002630static int ibmvnic_set_channels(struct net_device *netdev,
2631 struct ethtool_channels *channels)
2632{
2633 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002634 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002635
Thomas Falcon723ad912018-09-28 18:38:26 -05002636 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002637 adapter->desired.rx_queues = channels->rx_count;
2638 adapter->desired.tx_queues = channels->tx_count;
2639
Thomas Falcon723ad912018-09-28 18:38:26 -05002640 ret = wait_for_reset(adapter);
2641
2642 if (!ret &&
2643 (adapter->req_rx_queues != channels->rx_count ||
2644 adapter->req_tx_queues != channels->tx_count))
2645 netdev_info(netdev,
2646 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2647 channels->rx_count, channels->tx_count,
2648 adapter->req_rx_queues, adapter->req_tx_queues);
2649 return ret;
2650
John Allenc26eba02017-10-26 16:23:25 -05002651}
2652
Thomas Falcon032c5e82015-12-21 11:26:06 -06002653static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2654{
John Allen3d52b592017-08-02 16:44:14 -05002655 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002656 int i;
2657
Thomas Falcon723ad912018-09-28 18:38:26 -05002658 switch (stringset) {
2659 case ETH_SS_STATS:
2660 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2661 i++, data += ETH_GSTRING_LEN)
2662 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2663
2664 for (i = 0; i < adapter->req_tx_queues; i++) {
2665 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2666 data += ETH_GSTRING_LEN;
2667
2668 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2669 data += ETH_GSTRING_LEN;
2670
2671 snprintf(data, ETH_GSTRING_LEN,
2672 "tx%d_dropped_packets", i);
2673 data += ETH_GSTRING_LEN;
2674 }
2675
2676 for (i = 0; i < adapter->req_rx_queues; i++) {
2677 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2678 data += ETH_GSTRING_LEN;
2679
2680 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2681 data += ETH_GSTRING_LEN;
2682
2683 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2684 data += ETH_GSTRING_LEN;
2685 }
2686 break;
2687
2688 case ETH_SS_PRIV_FLAGS:
2689 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2690 strcpy(data + i * ETH_GSTRING_LEN,
2691 ibmvnic_priv_flags[i]);
2692 break;
2693 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06002694 return;
John Allen3d52b592017-08-02 16:44:14 -05002695 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002696}
2697
2698static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2699{
John Allen3d52b592017-08-02 16:44:14 -05002700 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2701
Thomas Falcon032c5e82015-12-21 11:26:06 -06002702 switch (sset) {
2703 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002704 return ARRAY_SIZE(ibmvnic_stats) +
2705 adapter->req_tx_queues * NUM_TX_STATS +
2706 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05002707 case ETH_SS_PRIV_FLAGS:
2708 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002709 default:
2710 return -EOPNOTSUPP;
2711 }
2712}
2713
2714static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2715 struct ethtool_stats *stats, u64 *data)
2716{
2717 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2718 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002719 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002720 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002721
2722 memset(&crq, 0, sizeof(crq));
2723 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2724 crq.request_statistics.cmd = REQUEST_STATISTICS;
2725 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2726 crq.request_statistics.len =
2727 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002728
2729 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06002730 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002731 rc = ibmvnic_send_crq(adapter, &crq);
2732 if (rc)
2733 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06002734 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2735 if (rc)
2736 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002737
2738 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002739 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2740 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002741
2742 for (j = 0; j < adapter->req_tx_queues; j++) {
2743 data[i] = adapter->tx_stats_buffers[j].packets;
2744 i++;
2745 data[i] = adapter->tx_stats_buffers[j].bytes;
2746 i++;
2747 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2748 i++;
2749 }
2750
2751 for (j = 0; j < adapter->req_rx_queues; j++) {
2752 data[i] = adapter->rx_stats_buffers[j].packets;
2753 i++;
2754 data[i] = adapter->rx_stats_buffers[j].bytes;
2755 i++;
2756 data[i] = adapter->rx_stats_buffers[j].interrupts;
2757 i++;
2758 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002759}
2760
Thomas Falcon723ad912018-09-28 18:38:26 -05002761static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2762{
2763 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2764
2765 return adapter->priv_flags;
2766}
2767
2768static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2769{
2770 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2771 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2772
2773 if (which_maxes)
2774 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2775 else
2776 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2777
2778 return 0;
2779}
Thomas Falcon032c5e82015-12-21 11:26:06 -06002780static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002781 .get_drvinfo = ibmvnic_get_drvinfo,
2782 .get_msglevel = ibmvnic_get_msglevel,
2783 .set_msglevel = ibmvnic_set_msglevel,
2784 .get_link = ibmvnic_get_link,
2785 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002786 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002787 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002788 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002789 .get_strings = ibmvnic_get_strings,
2790 .get_sset_count = ibmvnic_get_sset_count,
2791 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002792 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05002793 .get_priv_flags = ibmvnic_get_priv_flags,
2794 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002795};
2796
2797/* Routines for managing CRQs/sCRQs */
2798
Nathan Fontenot57a49432017-05-26 10:31:12 -04002799static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2800 struct ibmvnic_sub_crq_queue *scrq)
2801{
2802 int rc;
2803
2804 if (scrq->irq) {
2805 free_irq(scrq->irq, scrq);
2806 irq_dispose_mapping(scrq->irq);
2807 scrq->irq = 0;
2808 }
2809
Thomas Falconc8b2ad02017-06-14 23:50:07 -05002810 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
Thomas Falcon41f71462018-04-06 18:37:03 -05002811 atomic_set(&scrq->used, 0);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002812 scrq->cur = 0;
2813
2814 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2815 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2816 return rc;
2817}
2818
2819static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2820{
2821 int i, rc;
2822
2823 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002824 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002825 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2826 if (rc)
2827 return rc;
2828 }
2829
2830 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002831 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002832 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2833 if (rc)
2834 return rc;
2835 }
2836
Nathan Fontenot57a49432017-05-26 10:31:12 -04002837 return rc;
2838}
2839
Thomas Falcon032c5e82015-12-21 11:26:06 -06002840static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002841 struct ibmvnic_sub_crq_queue *scrq,
2842 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002843{
2844 struct device *dev = &adapter->vdev->dev;
2845 long rc;
2846
2847 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2848
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002849 if (do_h_free) {
2850 /* Close the sub-crqs */
2851 do {
2852 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2853 adapter->vdev->unit_address,
2854 scrq->crq_num);
2855 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002856
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002857 if (rc) {
2858 netdev_err(adapter->netdev,
2859 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2860 scrq->crq_num, rc);
2861 }
Thomas Falconffa73852017-04-19 13:44:29 -04002862 }
2863
Thomas Falcon032c5e82015-12-21 11:26:06 -06002864 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2865 DMA_BIDIRECTIONAL);
2866 free_pages((unsigned long)scrq->msgs, 2);
2867 kfree(scrq);
2868}
2869
2870static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2871 *adapter)
2872{
2873 struct device *dev = &adapter->vdev->dev;
2874 struct ibmvnic_sub_crq_queue *scrq;
2875 int rc;
2876
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002877 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002878 if (!scrq)
2879 return NULL;
2880
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04002881 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002882 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002883 if (!scrq->msgs) {
2884 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2885 goto zero_page_failed;
2886 }
2887
2888 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2889 DMA_BIDIRECTIONAL);
2890 if (dma_mapping_error(dev, scrq->msg_token)) {
2891 dev_warn(dev, "Couldn't map crq queue messages page\n");
2892 goto map_failed;
2893 }
2894
2895 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2896 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2897
2898 if (rc == H_RESOURCE)
2899 rc = ibmvnic_reset_crq(adapter);
2900
2901 if (rc == H_CLOSED) {
2902 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2903 } else if (rc) {
2904 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2905 goto reg_failed;
2906 }
2907
Thomas Falcon032c5e82015-12-21 11:26:06 -06002908 scrq->adapter = adapter;
2909 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002910 spin_lock_init(&scrq->lock);
2911
2912 netdev_dbg(adapter->netdev,
2913 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2914 scrq->crq_num, scrq->hw_irq, scrq->irq);
2915
2916 return scrq;
2917
Thomas Falcon032c5e82015-12-21 11:26:06 -06002918reg_failed:
2919 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2920 DMA_BIDIRECTIONAL);
2921map_failed:
2922 free_pages((unsigned long)scrq->msgs, 2);
2923zero_page_failed:
2924 kfree(scrq);
2925
2926 return NULL;
2927}
2928
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002929static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002930{
2931 int i;
2932
2933 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002934 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002935 if (!adapter->tx_scrq[i])
2936 continue;
2937
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002938 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2939 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002940 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002941 free_irq(adapter->tx_scrq[i]->irq,
2942 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002943 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002944 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002945 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002946
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002947 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2948 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002949 }
2950
Nathan Fontenot9501df32017-03-15 23:38:07 -04002951 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002952 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002953 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002954 }
2955
2956 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002957 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002958 if (!adapter->rx_scrq[i])
2959 continue;
2960
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002961 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2962 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002963 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002964 free_irq(adapter->rx_scrq[i]->irq,
2965 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002966 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002967 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002968 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002969
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002970 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2971 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002972 }
2973
Nathan Fontenot9501df32017-03-15 23:38:07 -04002974 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002975 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002976 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002977 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002978}
2979
2980static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2981 struct ibmvnic_sub_crq_queue *scrq)
2982{
2983 struct device *dev = &adapter->vdev->dev;
2984 unsigned long rc;
2985
2986 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2987 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2988 if (rc)
2989 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2990 scrq->hw_irq, rc);
2991 return rc;
2992}
2993
2994static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2995 struct ibmvnic_sub_crq_queue *scrq)
2996{
2997 struct device *dev = &adapter->vdev->dev;
2998 unsigned long rc;
2999
3000 if (scrq->hw_irq > 0x100000000ULL) {
3001 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3002 return 1;
3003 }
3004
Juliet Kim7ed5b312019-09-20 16:11:23 -04003005 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003006 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003007 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003008
Juliet Kim284f87d2019-11-20 10:50:03 -05003009 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003010 /* H_EOI would fail with rc = H_FUNCTION when running
3011 * in XIVE mode which is expected, but not an error.
3012 */
3013 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003014 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3015 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003016 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003017
Thomas Falcon032c5e82015-12-21 11:26:06 -06003018 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3019 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3020 if (rc)
3021 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3022 scrq->hw_irq, rc);
3023 return rc;
3024}
3025
3026static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3027 struct ibmvnic_sub_crq_queue *scrq)
3028{
3029 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003030 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003031 struct ibmvnic_tx_buff *txbuff;
3032 union sub_crq *next;
3033 int index;
3034 int i, j;
3035
3036restart_loop:
3037 while (pending_scrq(adapter, scrq)) {
3038 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003039 int num_entries = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003040
3041 next = ibmvnic_next_scrq(adapter, scrq);
3042 for (i = 0; i < next->tx_comp.num_comps; i++) {
3043 if (next->tx_comp.rcs[i]) {
3044 dev_err(dev, "tx error %x\n",
3045 next->tx_comp.rcs[i]);
3046 continue;
3047 }
3048 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003049 if (index & IBMVNIC_TSO_POOL_MASK) {
3050 tx_pool = &adapter->tso_pool[pool];
3051 index &= ~IBMVNIC_TSO_POOL_MASK;
3052 } else {
3053 tx_pool = &adapter->tx_pool[pool];
3054 }
3055
3056 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon032c5e82015-12-21 11:26:06 -06003057
3058 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3059 if (!txbuff->data_dma[j])
3060 continue;
3061
3062 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003063 }
3064
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003065 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003066 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003067 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003068 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003069
Thomas Falconffc385b2018-02-18 10:08:41 -06003070 num_entries += txbuff->num_entries;
3071
Thomas Falcon06b3e352018-03-16 20:00:28 -05003072 tx_pool->free_map[tx_pool->producer_index] = index;
3073 tx_pool->producer_index =
3074 (tx_pool->producer_index + 1) %
3075 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003076 }
3077 /* remove tx_comp scrq*/
3078 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003079
Thomas Falconffc385b2018-02-18 10:08:41 -06003080 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003081 (adapter->req_tx_entries_per_subcrq / 2) &&
3082 __netif_subqueue_stopped(adapter->netdev,
3083 scrq->pool_index)) {
3084 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003085 netdev_dbg(adapter->netdev, "Started queue %d\n",
3086 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003087 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003088 }
3089
3090 enable_scrq_irq(adapter, scrq);
3091
3092 if (pending_scrq(adapter, scrq)) {
3093 disable_scrq_irq(adapter, scrq);
3094 goto restart_loop;
3095 }
3096
3097 return 0;
3098}
3099
3100static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3101{
3102 struct ibmvnic_sub_crq_queue *scrq = instance;
3103 struct ibmvnic_adapter *adapter = scrq->adapter;
3104
3105 disable_scrq_irq(adapter, scrq);
3106 ibmvnic_complete_tx(adapter, scrq);
3107
3108 return IRQ_HANDLED;
3109}
3110
3111static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3112{
3113 struct ibmvnic_sub_crq_queue *scrq = instance;
3114 struct ibmvnic_adapter *adapter = scrq->adapter;
3115
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003116 /* When booting a kdump kernel we can hit pending interrupts
3117 * prior to completing driver initialization.
3118 */
3119 if (unlikely(adapter->state != VNIC_OPEN))
3120 return IRQ_NONE;
3121
John Allen3d52b592017-08-02 16:44:14 -05003122 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3123
Thomas Falcon032c5e82015-12-21 11:26:06 -06003124 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3125 disable_scrq_irq(adapter, scrq);
3126 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3127 }
3128
3129 return IRQ_HANDLED;
3130}
3131
Thomas Falconea22d512016-07-06 15:35:17 -05003132static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3133{
3134 struct device *dev = &adapter->vdev->dev;
3135 struct ibmvnic_sub_crq_queue *scrq;
3136 int i = 0, j = 0;
3137 int rc = 0;
3138
3139 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003140 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3141 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003142 scrq = adapter->tx_scrq[i];
3143 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3144
Michael Ellerman99c17902016-09-10 19:59:05 +10003145 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003146 rc = -EINVAL;
3147 dev_err(dev, "Error mapping irq\n");
3148 goto req_tx_irq_failed;
3149 }
3150
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003151 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3152 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003153 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003154 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003155
3156 if (rc) {
3157 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3158 scrq->irq, rc);
3159 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003160 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003161 }
3162 }
3163
3164 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003165 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3166 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003167 scrq = adapter->rx_scrq[i];
3168 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003169 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003170 rc = -EINVAL;
3171 dev_err(dev, "Error mapping irq\n");
3172 goto req_rx_irq_failed;
3173 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003174 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3175 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003176 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003177 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003178 if (rc) {
3179 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3180 scrq->irq, rc);
3181 irq_dispose_mapping(scrq->irq);
3182 goto req_rx_irq_failed;
3183 }
3184 }
3185 return rc;
3186
3187req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003188 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003189 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3190 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003191 }
Thomas Falconea22d512016-07-06 15:35:17 -05003192 i = adapter->req_tx_queues;
3193req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003194 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003195 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003196 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003197 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003198 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003199 return rc;
3200}
3201
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003202static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003203{
3204 struct device *dev = &adapter->vdev->dev;
3205 struct ibmvnic_sub_crq_queue **allqueues;
3206 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003207 int total_queues;
3208 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003209 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003210
Thomas Falcon032c5e82015-12-21 11:26:06 -06003211 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3212
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003213 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003214 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003215 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003216
3217 for (i = 0; i < total_queues; i++) {
3218 allqueues[i] = init_sub_crq_queue(adapter);
3219 if (!allqueues[i]) {
3220 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3221 break;
3222 }
3223 registered_queues++;
3224 }
3225
3226 /* Make sure we were able to register the minimum number of queues */
3227 if (registered_queues <
3228 adapter->min_tx_queues + adapter->min_rx_queues) {
3229 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3230 goto tx_failed;
3231 }
3232
3233 /* Distribute the failed allocated queues*/
3234 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3235 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3236 switch (i % 3) {
3237 case 0:
3238 if (adapter->req_rx_queues > adapter->min_rx_queues)
3239 adapter->req_rx_queues--;
3240 else
3241 more++;
3242 break;
3243 case 1:
3244 if (adapter->req_tx_queues > adapter->min_tx_queues)
3245 adapter->req_tx_queues--;
3246 else
3247 more++;
3248 break;
3249 }
3250 }
3251
3252 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003253 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003254 if (!adapter->tx_scrq)
3255 goto tx_failed;
3256
3257 for (i = 0; i < adapter->req_tx_queues; i++) {
3258 adapter->tx_scrq[i] = allqueues[i];
3259 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003260 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003261 }
3262
3263 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003264 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003265 if (!adapter->rx_scrq)
3266 goto rx_failed;
3267
3268 for (i = 0; i < adapter->req_rx_queues; i++) {
3269 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3270 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003271 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003272 }
3273
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003274 kfree(allqueues);
3275 return 0;
3276
3277rx_failed:
3278 kfree(adapter->tx_scrq);
3279 adapter->tx_scrq = NULL;
3280tx_failed:
3281 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003282 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003283 kfree(allqueues);
3284 return -1;
3285}
3286
3287static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3288{
3289 struct device *dev = &adapter->vdev->dev;
3290 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003291 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003292
3293 if (!retry) {
3294 /* Sub-CRQ entries are 32 byte long */
3295 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3296
3297 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3298 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3299 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3300 return;
3301 }
3302
John Allenc26eba02017-10-26 16:23:25 -05003303 if (adapter->desired.mtu)
3304 adapter->req_mtu = adapter->desired.mtu;
3305 else
3306 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003307
John Allenc26eba02017-10-26 16:23:25 -05003308 if (!adapter->desired.tx_entries)
3309 adapter->desired.tx_entries =
3310 adapter->max_tx_entries_per_subcrq;
3311 if (!adapter->desired.rx_entries)
3312 adapter->desired.rx_entries =
3313 adapter->max_rx_add_entries_per_subcrq;
3314
3315 max_entries = IBMVNIC_MAX_LTB_SIZE /
3316 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3317
3318 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3319 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3320 adapter->desired.tx_entries = max_entries;
3321 }
3322
3323 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3324 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3325 adapter->desired.rx_entries = max_entries;
3326 }
3327
3328 if (adapter->desired.tx_entries)
3329 adapter->req_tx_entries_per_subcrq =
3330 adapter->desired.tx_entries;
3331 else
3332 adapter->req_tx_entries_per_subcrq =
3333 adapter->max_tx_entries_per_subcrq;
3334
3335 if (adapter->desired.rx_entries)
3336 adapter->req_rx_add_entries_per_subcrq =
3337 adapter->desired.rx_entries;
3338 else
3339 adapter->req_rx_add_entries_per_subcrq =
3340 adapter->max_rx_add_entries_per_subcrq;
3341
3342 if (adapter->desired.tx_queues)
3343 adapter->req_tx_queues =
3344 adapter->desired.tx_queues;
3345 else
3346 adapter->req_tx_queues =
3347 adapter->opt_tx_comp_sub_queues;
3348
3349 if (adapter->desired.rx_queues)
3350 adapter->req_rx_queues =
3351 adapter->desired.rx_queues;
3352 else
3353 adapter->req_rx_queues =
3354 adapter->opt_rx_comp_queues;
3355
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003356 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003357 }
3358
Thomas Falcon032c5e82015-12-21 11:26:06 -06003359 memset(&crq, 0, sizeof(crq));
3360 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3361 crq.request_capability.cmd = REQUEST_CAPABILITY;
3362
3363 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003364 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003365 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003366 ibmvnic_send_crq(adapter, &crq);
3367
3368 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003369 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003370 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003371 ibmvnic_send_crq(adapter, &crq);
3372
3373 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003374 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003375 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003376 ibmvnic_send_crq(adapter, &crq);
3377
3378 crq.request_capability.capability =
3379 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3380 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003381 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003382 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003383 ibmvnic_send_crq(adapter, &crq);
3384
3385 crq.request_capability.capability =
3386 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3387 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003388 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003389 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003390 ibmvnic_send_crq(adapter, &crq);
3391
3392 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003393 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003394 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003395 ibmvnic_send_crq(adapter, &crq);
3396
3397 if (adapter->netdev->flags & IFF_PROMISC) {
3398 if (adapter->promisc_supported) {
3399 crq.request_capability.capability =
3400 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003401 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003402 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003403 ibmvnic_send_crq(adapter, &crq);
3404 }
3405 } else {
3406 crq.request_capability.capability =
3407 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003408 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003409 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003410 ibmvnic_send_crq(adapter, &crq);
3411 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003412}
3413
3414static int pending_scrq(struct ibmvnic_adapter *adapter,
3415 struct ibmvnic_sub_crq_queue *scrq)
3416{
3417 union sub_crq *entry = &scrq->msgs[scrq->cur];
3418
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05003419 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003420 return 1;
3421 else
3422 return 0;
3423}
3424
3425static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3426 struct ibmvnic_sub_crq_queue *scrq)
3427{
3428 union sub_crq *entry;
3429 unsigned long flags;
3430
3431 spin_lock_irqsave(&scrq->lock, flags);
3432 entry = &scrq->msgs[scrq->cur];
3433 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3434 if (++scrq->cur == scrq->size)
3435 scrq->cur = 0;
3436 } else {
3437 entry = NULL;
3438 }
3439 spin_unlock_irqrestore(&scrq->lock, flags);
3440
3441 return entry;
3442}
3443
3444static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3445{
3446 struct ibmvnic_crq_queue *queue = &adapter->crq;
3447 union ibmvnic_crq *crq;
3448
3449 crq = &queue->msgs[queue->cur];
3450 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3451 if (++queue->cur == queue->size)
3452 queue->cur = 0;
3453 } else {
3454 crq = NULL;
3455 }
3456
3457 return crq;
3458}
3459
Thomas Falcon2d14d372018-07-13 12:03:32 -05003460static void print_subcrq_error(struct device *dev, int rc, const char *func)
3461{
3462 switch (rc) {
3463 case H_PARAMETER:
3464 dev_warn_ratelimited(dev,
3465 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3466 func, rc);
3467 break;
3468 case H_CLOSED:
3469 dev_warn_ratelimited(dev,
3470 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3471 func, rc);
3472 break;
3473 default:
3474 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3475 break;
3476 }
3477}
3478
Thomas Falcon032c5e82015-12-21 11:26:06 -06003479static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3480 union sub_crq *sub_crq)
3481{
3482 unsigned int ua = adapter->vdev->unit_address;
3483 struct device *dev = &adapter->vdev->dev;
3484 u64 *u64_crq = (u64 *)sub_crq;
3485 int rc;
3486
3487 netdev_dbg(adapter->netdev,
3488 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3489 (unsigned long int)cpu_to_be64(remote_handle),
3490 (unsigned long int)cpu_to_be64(u64_crq[0]),
3491 (unsigned long int)cpu_to_be64(u64_crq[1]),
3492 (unsigned long int)cpu_to_be64(u64_crq[2]),
3493 (unsigned long int)cpu_to_be64(u64_crq[3]));
3494
3495 /* Make sure the hypervisor sees the complete request */
3496 mb();
3497
3498 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3499 cpu_to_be64(remote_handle),
3500 cpu_to_be64(u64_crq[0]),
3501 cpu_to_be64(u64_crq[1]),
3502 cpu_to_be64(u64_crq[2]),
3503 cpu_to_be64(u64_crq[3]));
3504
Thomas Falcon2d14d372018-07-13 12:03:32 -05003505 if (rc)
3506 print_subcrq_error(dev, rc, __func__);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003507
3508 return rc;
3509}
3510
Thomas Falconad7775d2016-04-01 17:20:34 -05003511static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3512 u64 remote_handle, u64 ioba, u64 num_entries)
3513{
3514 unsigned int ua = adapter->vdev->unit_address;
3515 struct device *dev = &adapter->vdev->dev;
3516 int rc;
3517
3518 /* Make sure the hypervisor sees the complete request */
3519 mb();
3520 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3521 cpu_to_be64(remote_handle),
3522 ioba, num_entries);
3523
Thomas Falcon2d14d372018-07-13 12:03:32 -05003524 if (rc)
3525 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003526
3527 return rc;
3528}
3529
Thomas Falcon032c5e82015-12-21 11:26:06 -06003530static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3531 union ibmvnic_crq *crq)
3532{
3533 unsigned int ua = adapter->vdev->unit_address;
3534 struct device *dev = &adapter->vdev->dev;
3535 u64 *u64_crq = (u64 *)crq;
3536 int rc;
3537
3538 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3539 (unsigned long int)cpu_to_be64(u64_crq[0]),
3540 (unsigned long int)cpu_to_be64(u64_crq[1]));
3541
Thomas Falcon51536982018-05-23 13:37:56 -05003542 if (!adapter->crq.active &&
3543 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3544 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3545 return -EINVAL;
3546 }
3547
Thomas Falcon032c5e82015-12-21 11:26:06 -06003548 /* Make sure the hypervisor sees the complete request */
3549 mb();
3550
3551 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3552 cpu_to_be64(u64_crq[0]),
3553 cpu_to_be64(u64_crq[1]));
3554
3555 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003556 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003557 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05003558 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003559 }
3560
Thomas Falcon032c5e82015-12-21 11:26:06 -06003561 dev_warn(dev, "Send error (rc=%d)\n", rc);
3562 }
3563
3564 return rc;
3565}
3566
3567static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3568{
Thomas Falcon36a782f2020-08-31 11:59:57 -05003569 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003570 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05003571 int retries = 100;
3572 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003573
3574 memset(&crq, 0, sizeof(crq));
3575 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3576 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3577 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3578
Thomas Falcon36a782f2020-08-31 11:59:57 -05003579 do {
3580 rc = ibmvnic_send_crq(adapter, &crq);
3581 if (rc != H_CLOSED)
3582 break;
3583 retries--;
3584 msleep(50);
3585
3586 } while (retries > 0);
3587
3588 if (rc) {
3589 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3590 return rc;
3591 }
3592
3593 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003594}
3595
Thomas Falcon032c5e82015-12-21 11:26:06 -06003596static int send_version_xchg(struct ibmvnic_adapter *adapter)
3597{
3598 union ibmvnic_crq crq;
3599
3600 memset(&crq, 0, sizeof(crq));
3601 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3602 crq.version_exchange.cmd = VERSION_EXCHANGE;
3603 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3604
3605 return ibmvnic_send_crq(adapter, &crq);
3606}
3607
Nathan Fontenot37798d02017-11-08 11:23:56 -06003608struct vnic_login_client_data {
3609 u8 type;
3610 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003611 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003612} __packed;
3613
3614static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3615{
3616 int len;
3617
3618 /* Calculate the amount of buffer space needed for the
3619 * vnic client data in the login buffer. There are four entries,
3620 * OS name, LPAR name, device name, and a null last entry.
3621 */
3622 len = 4 * sizeof(struct vnic_login_client_data);
3623 len += 6; /* "Linux" plus NULL */
3624 len += strlen(utsname()->nodename) + 1;
3625 len += strlen(adapter->netdev->name) + 1;
3626
3627 return len;
3628}
3629
3630static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3631 struct vnic_login_client_data *vlcd)
3632{
3633 const char *os_name = "Linux";
3634 int len;
3635
3636 /* Type 1 - LPAR OS */
3637 vlcd->type = 1;
3638 len = strlen(os_name) + 1;
3639 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003640 strncpy(vlcd->name, os_name, len);
3641 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003642
3643 /* Type 2 - LPAR name */
3644 vlcd->type = 2;
3645 len = strlen(utsname()->nodename) + 1;
3646 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003647 strncpy(vlcd->name, utsname()->nodename, len);
3648 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003649
3650 /* Type 3 - device name */
3651 vlcd->type = 3;
3652 len = strlen(adapter->netdev->name) + 1;
3653 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003654 strncpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003655}
3656
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003657static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003658{
3659 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3660 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003661 struct device *dev = &adapter->vdev->dev;
3662 dma_addr_t rsp_buffer_token;
3663 dma_addr_t buffer_token;
3664 size_t rsp_buffer_size;
3665 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003666 size_t buffer_size;
3667 __be64 *tx_list_p;
3668 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003669 int client_data_len;
3670 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003671 int i;
3672
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003673 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3674 netdev_err(adapter->netdev,
3675 "RX or TX queues are not allocated, device login failed\n");
3676 return -1;
3677 }
3678
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003679 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003680 client_data_len = vnic_client_data_len(adapter);
3681
Thomas Falcon032c5e82015-12-21 11:26:06 -06003682 buffer_size =
3683 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003684 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3685 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003686
Nathan Fontenot37798d02017-11-08 11:23:56 -06003687 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003688 if (!login_buffer)
3689 goto buf_alloc_failed;
3690
3691 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3692 DMA_TO_DEVICE);
3693 if (dma_mapping_error(dev, buffer_token)) {
3694 dev_err(dev, "Couldn't map login buffer\n");
3695 goto buf_map_failed;
3696 }
3697
John Allen498cd8e2016-04-06 11:49:55 -05003698 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3699 sizeof(u64) * adapter->req_tx_queues +
3700 sizeof(u64) * adapter->req_rx_queues +
3701 sizeof(u64) * adapter->req_rx_queues +
3702 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003703
3704 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3705 if (!login_rsp_buffer)
3706 goto buf_rsp_alloc_failed;
3707
3708 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3709 rsp_buffer_size, DMA_FROM_DEVICE);
3710 if (dma_mapping_error(dev, rsp_buffer_token)) {
3711 dev_err(dev, "Couldn't map login rsp buffer\n");
3712 goto buf_rsp_map_failed;
3713 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003714
Thomas Falcon032c5e82015-12-21 11:26:06 -06003715 adapter->login_buf = login_buffer;
3716 adapter->login_buf_token = buffer_token;
3717 adapter->login_buf_sz = buffer_size;
3718 adapter->login_rsp_buf = login_rsp_buffer;
3719 adapter->login_rsp_buf_token = rsp_buffer_token;
3720 adapter->login_rsp_buf_sz = rsp_buffer_size;
3721
3722 login_buffer->len = cpu_to_be32(buffer_size);
3723 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3724 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3725 login_buffer->off_txcomp_subcrqs =
3726 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3727 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3728 login_buffer->off_rxcomp_subcrqs =
3729 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3730 sizeof(u64) * adapter->req_tx_queues);
3731 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3732 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3733
3734 tx_list_p = (__be64 *)((char *)login_buffer +
3735 sizeof(struct ibmvnic_login_buffer));
3736 rx_list_p = (__be64 *)((char *)login_buffer +
3737 sizeof(struct ibmvnic_login_buffer) +
3738 sizeof(u64) * adapter->req_tx_queues);
3739
3740 for (i = 0; i < adapter->req_tx_queues; i++) {
3741 if (adapter->tx_scrq[i]) {
3742 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3743 crq_num);
3744 }
3745 }
3746
3747 for (i = 0; i < adapter->req_rx_queues; i++) {
3748 if (adapter->rx_scrq[i]) {
3749 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3750 crq_num);
3751 }
3752 }
3753
Nathan Fontenot37798d02017-11-08 11:23:56 -06003754 /* Insert vNIC login client data */
3755 vlcd = (struct vnic_login_client_data *)
3756 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3757 login_buffer->client_data_offset =
3758 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3759 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3760
3761 vnic_add_client_data(adapter, vlcd);
3762
Thomas Falcon032c5e82015-12-21 11:26:06 -06003763 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3764 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3765 netdev_dbg(adapter->netdev, "%016lx\n",
3766 ((unsigned long int *)(adapter->login_buf))[i]);
3767 }
3768
3769 memset(&crq, 0, sizeof(crq));
3770 crq.login.first = IBMVNIC_CRQ_CMD;
3771 crq.login.cmd = LOGIN;
3772 crq.login.ioba = cpu_to_be32(buffer_token);
3773 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003774 ibmvnic_send_crq(adapter, &crq);
3775
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003776 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003777
Thomas Falcon032c5e82015-12-21 11:26:06 -06003778buf_rsp_map_failed:
3779 kfree(login_rsp_buffer);
3780buf_rsp_alloc_failed:
3781 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3782buf_map_failed:
3783 kfree(login_buffer);
3784buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003785 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003786}
3787
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003788static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3789 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003790{
3791 union ibmvnic_crq crq;
3792
3793 memset(&crq, 0, sizeof(crq));
3794 crq.request_map.first = IBMVNIC_CRQ_CMD;
3795 crq.request_map.cmd = REQUEST_MAP;
3796 crq.request_map.map_id = map_id;
3797 crq.request_map.ioba = cpu_to_be32(addr);
3798 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003799 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003800}
3801
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003802static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003803{
3804 union ibmvnic_crq crq;
3805
3806 memset(&crq, 0, sizeof(crq));
3807 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3808 crq.request_unmap.cmd = REQUEST_UNMAP;
3809 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003810 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003811}
3812
3813static void send_map_query(struct ibmvnic_adapter *adapter)
3814{
3815 union ibmvnic_crq crq;
3816
3817 memset(&crq, 0, sizeof(crq));
3818 crq.query_map.first = IBMVNIC_CRQ_CMD;
3819 crq.query_map.cmd = QUERY_MAP;
3820 ibmvnic_send_crq(adapter, &crq);
3821}
3822
3823/* Send a series of CRQs requesting various capabilities of the VNIC server */
3824static void send_cap_queries(struct ibmvnic_adapter *adapter)
3825{
3826 union ibmvnic_crq crq;
3827
Thomas Falcon901e0402017-02-15 12:17:59 -06003828 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003829 memset(&crq, 0, sizeof(crq));
3830 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3831 crq.query_capability.cmd = QUERY_CAPABILITY;
3832
3833 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003834 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003835 ibmvnic_send_crq(adapter, &crq);
3836
3837 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003838 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003839 ibmvnic_send_crq(adapter, &crq);
3840
3841 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003842 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003843 ibmvnic_send_crq(adapter, &crq);
3844
3845 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003846 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003847 ibmvnic_send_crq(adapter, &crq);
3848
3849 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003850 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003851 ibmvnic_send_crq(adapter, &crq);
3852
3853 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003854 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003855 ibmvnic_send_crq(adapter, &crq);
3856
3857 crq.query_capability.capability =
3858 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003859 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003860 ibmvnic_send_crq(adapter, &crq);
3861
3862 crq.query_capability.capability =
3863 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003864 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003865 ibmvnic_send_crq(adapter, &crq);
3866
3867 crq.query_capability.capability =
3868 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003869 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003870 ibmvnic_send_crq(adapter, &crq);
3871
3872 crq.query_capability.capability =
3873 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003874 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003875 ibmvnic_send_crq(adapter, &crq);
3876
3877 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06003878 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003879 ibmvnic_send_crq(adapter, &crq);
3880
3881 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003882 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003883 ibmvnic_send_crq(adapter, &crq);
3884
3885 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003886 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003887 ibmvnic_send_crq(adapter, &crq);
3888
3889 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003890 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003891 ibmvnic_send_crq(adapter, &crq);
3892
3893 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06003894 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003895 ibmvnic_send_crq(adapter, &crq);
3896
3897 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06003898 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003899 ibmvnic_send_crq(adapter, &crq);
3900
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003901 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3902 atomic_inc(&adapter->running_cap_crqs);
3903 ibmvnic_send_crq(adapter, &crq);
3904
Thomas Falcon032c5e82015-12-21 11:26:06 -06003905 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003906 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003907 ibmvnic_send_crq(adapter, &crq);
3908
3909 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003910 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003911 ibmvnic_send_crq(adapter, &crq);
3912
3913 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003914 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003915 ibmvnic_send_crq(adapter, &crq);
3916
3917 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003918 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003919 ibmvnic_send_crq(adapter, &crq);
3920
3921 crq.query_capability.capability =
3922 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06003923 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003924 ibmvnic_send_crq(adapter, &crq);
3925
3926 crq.query_capability.capability =
3927 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003928 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003929 ibmvnic_send_crq(adapter, &crq);
3930
3931 crq.query_capability.capability =
3932 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003933 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003934 ibmvnic_send_crq(adapter, &crq);
3935
3936 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003937 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003938 ibmvnic_send_crq(adapter, &crq);
3939}
3940
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003941static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3942 struct ibmvnic_adapter *adapter)
3943{
3944 struct device *dev = &adapter->vdev->dev;
3945
3946 if (crq->get_vpd_size_rsp.rc.code) {
3947 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3948 crq->get_vpd_size_rsp.rc.code);
3949 complete(&adapter->fw_done);
3950 return;
3951 }
3952
3953 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3954 complete(&adapter->fw_done);
3955}
3956
3957static void handle_vpd_rsp(union ibmvnic_crq *crq,
3958 struct ibmvnic_adapter *adapter)
3959{
3960 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003961 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003962 u8 fw_level_len = 0;
3963
3964 memset(adapter->fw_version, 0, 32);
3965
3966 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3967 DMA_FROM_DEVICE);
3968
3969 if (crq->get_vpd_rsp.rc.code) {
3970 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3971 crq->get_vpd_rsp.rc.code);
3972 goto complete;
3973 }
3974
3975 /* get the position of the firmware version info
3976 * located after the ASCII 'RM' substring in the buffer
3977 */
3978 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3979 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02003980 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003981 goto complete;
3982 }
3983
3984 /* get length of firmware level ASCII substring */
3985 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3986 fw_level_len = *(substr + 2);
3987 } else {
3988 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3989 goto complete;
3990 }
3991
3992 /* copy firmware version string from vpd into adapter */
3993 if ((substr + 3 + fw_level_len) <
3994 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003995 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003996 } else {
3997 dev_info(dev, "FW substr extrapolated VPD buff\n");
3998 }
3999
4000complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004001 if (adapter->fw_version[0] == '\0')
4002 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004003 complete(&adapter->fw_done);
4004}
4005
Thomas Falcon032c5e82015-12-21 11:26:06 -06004006static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4007{
4008 struct device *dev = &adapter->vdev->dev;
4009 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcondde746a2019-04-10 11:07:00 -05004010 netdev_features_t old_hw_features = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004011 union ibmvnic_crq crq;
4012 int i;
4013
4014 dma_unmap_single(dev, adapter->ip_offload_tok,
4015 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4016
4017 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4018 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4019 netdev_dbg(adapter->netdev, "%016lx\n",
4020 ((unsigned long int *)(buf))[i]);
4021
4022 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4023 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4024 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4025 buf->tcp_ipv4_chksum);
4026 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4027 buf->tcp_ipv6_chksum);
4028 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4029 buf->udp_ipv4_chksum);
4030 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4031 buf->udp_ipv6_chksum);
4032 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4033 buf->large_tx_ipv4);
4034 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4035 buf->large_tx_ipv6);
4036 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4037 buf->large_rx_ipv4);
4038 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4039 buf->large_rx_ipv6);
4040 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4041 buf->max_ipv4_header_size);
4042 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4043 buf->max_ipv6_header_size);
4044 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4045 buf->max_tcp_header_size);
4046 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4047 buf->max_udp_header_size);
4048 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4049 buf->max_large_tx_size);
4050 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4051 buf->max_large_rx_size);
4052 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4053 buf->ipv6_extension_header);
4054 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4055 buf->tcp_pseudosum_req);
4056 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4057 buf->num_ipv6_ext_headers);
4058 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4059 buf->off_ipv6_ext_headers);
4060
4061 adapter->ip_offload_ctrl_tok =
4062 dma_map_single(dev, &adapter->ip_offload_ctrl,
4063 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
4064
4065 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4066 dev_err(dev, "Couldn't map ip offload control buffer\n");
4067 return;
4068 }
4069
Thomas Falconf6897942018-01-18 19:05:01 -06004070 adapter->ip_offload_ctrl.len =
4071 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004072 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
Thomas Falconf6897942018-01-18 19:05:01 -06004073 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
4074 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004075 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4076 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
4077 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4078 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
Thomas Falconfdb06102017-10-17 12:36:55 -05004079 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
4080 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004081
Thomas Falconfdb06102017-10-17 12:36:55 -05004082 /* large_rx disabled for now, additional features needed */
Thomas Falcon032c5e82015-12-21 11:26:06 -06004083 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
4084 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
4085
Thomas Falcondde746a2019-04-10 11:07:00 -05004086 if (adapter->state != VNIC_PROBING) {
4087 old_hw_features = adapter->netdev->hw_features;
4088 adapter->netdev->hw_features = 0;
4089 }
4090
Thomas Falconb66b7bd2019-04-10 11:06:59 -05004091 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004092
4093 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
Thomas Falcondde746a2019-04-10 11:07:00 -05004094 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004095
4096 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
Thomas Falcondde746a2019-04-10 11:07:00 -05004097 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004098
Thomas Falcon9be02cd2016-04-01 17:20:35 -05004099 if ((adapter->netdev->features &
4100 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
Thomas Falcondde746a2019-04-10 11:07:00 -05004101 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
Thomas Falcon9be02cd2016-04-01 17:20:35 -05004102
Thomas Falconfdb06102017-10-17 12:36:55 -05004103 if (buf->large_tx_ipv4)
Thomas Falcondde746a2019-04-10 11:07:00 -05004104 adapter->netdev->hw_features |= NETIF_F_TSO;
Thomas Falconfdb06102017-10-17 12:36:55 -05004105 if (buf->large_tx_ipv6)
Thomas Falcondde746a2019-04-10 11:07:00 -05004106 adapter->netdev->hw_features |= NETIF_F_TSO6;
Thomas Falconfdb06102017-10-17 12:36:55 -05004107
Thomas Falcondde746a2019-04-10 11:07:00 -05004108 if (adapter->state == VNIC_PROBING) {
4109 adapter->netdev->features |= adapter->netdev->hw_features;
4110 } else if (old_hw_features != adapter->netdev->hw_features) {
4111 netdev_features_t tmp = 0;
4112
4113 /* disable features no longer supported */
4114 adapter->netdev->features &= adapter->netdev->hw_features;
4115 /* turn on features now supported if previously enabled */
4116 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4117 adapter->netdev->hw_features;
4118 adapter->netdev->features |=
4119 tmp & adapter->netdev->wanted_features;
4120 }
Thomas Falconaa0bf852017-10-17 12:36:56 -05004121
Thomas Falcon032c5e82015-12-21 11:26:06 -06004122 memset(&crq, 0, sizeof(crq));
4123 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4124 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4125 crq.control_ip_offload.len =
4126 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4127 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4128 ibmvnic_send_crq(adapter, &crq);
4129}
4130
Thomas Falconc9008d32018-08-06 21:39:59 -05004131static const char *ibmvnic_fw_err_cause(u16 cause)
4132{
4133 switch (cause) {
4134 case ADAPTER_PROBLEM:
4135 return "adapter problem";
4136 case BUS_PROBLEM:
4137 return "bus problem";
4138 case FW_PROBLEM:
4139 return "firmware problem";
4140 case DD_PROBLEM:
4141 return "device driver problem";
4142 case EEH_RECOVERY:
4143 return "EEH recovery";
4144 case FW_UPDATED:
4145 return "firmware updated";
4146 case LOW_MEMORY:
4147 return "low Memory";
4148 default:
4149 return "unknown";
4150 }
4151}
4152
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004153static void handle_error_indication(union ibmvnic_crq *crq,
4154 struct ibmvnic_adapter *adapter)
4155{
4156 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004157 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004158
Thomas Falconc9008d32018-08-06 21:39:59 -05004159 cause = be16_to_cpu(crq->error_indication.error_cause);
4160
4161 dev_warn_ratelimited(dev,
4162 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4163 crq->error_indication.flags
4164 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4165 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004166
Nathan Fontenoted651a12017-05-03 14:04:38 -04004167 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4168 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004169 else
4170 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004171}
4172
Thomas Falconf8136142018-01-29 13:45:05 -06004173static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4174 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004175{
4176 struct net_device *netdev = adapter->netdev;
4177 struct device *dev = &adapter->vdev->dev;
4178 long rc;
4179
4180 rc = crq->change_mac_addr_rsp.rc.code;
4181 if (rc) {
4182 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004183 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004184 }
Thomas Falcon62740e92019-05-09 23:13:43 -05004185 ether_addr_copy(netdev->dev_addr,
4186 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004187out:
4188 complete(&adapter->fw_done);
4189 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004190}
4191
4192static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4193 struct ibmvnic_adapter *adapter)
4194{
4195 struct device *dev = &adapter->vdev->dev;
4196 u64 *req_value;
4197 char *name;
4198
Thomas Falcon901e0402017-02-15 12:17:59 -06004199 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004200 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4201 case REQ_TX_QUEUES:
4202 req_value = &adapter->req_tx_queues;
4203 name = "tx";
4204 break;
4205 case REQ_RX_QUEUES:
4206 req_value = &adapter->req_rx_queues;
4207 name = "rx";
4208 break;
4209 case REQ_RX_ADD_QUEUES:
4210 req_value = &adapter->req_rx_add_queues;
4211 name = "rx_add";
4212 break;
4213 case REQ_TX_ENTRIES_PER_SUBCRQ:
4214 req_value = &adapter->req_tx_entries_per_subcrq;
4215 name = "tx_entries_per_subcrq";
4216 break;
4217 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4218 req_value = &adapter->req_rx_add_entries_per_subcrq;
4219 name = "rx_add_entries_per_subcrq";
4220 break;
4221 case REQ_MTU:
4222 req_value = &adapter->req_mtu;
4223 name = "mtu";
4224 break;
4225 case PROMISC_REQUESTED:
4226 req_value = &adapter->promisc;
4227 name = "promisc";
4228 break;
4229 default:
4230 dev_err(dev, "Got invalid cap request rsp %d\n",
4231 crq->request_capability.capability);
4232 return;
4233 }
4234
4235 switch (crq->request_capability_rsp.rc.code) {
4236 case SUCCESS:
4237 break;
4238 case PARTIALSUCCESS:
4239 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4240 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06004241 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06004242 number), name);
John Allene7913802018-01-18 16:27:12 -06004243
4244 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4245 REQ_MTU) {
4246 pr_err("mtu of %llu is not supported. Reverting.\n",
4247 *req_value);
4248 *req_value = adapter->fallback.mtu;
4249 } else {
4250 *req_value =
4251 be64_to_cpu(crq->request_capability_rsp.number);
4252 }
4253
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04004254 ibmvnic_send_req_caps(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004255 return;
4256 default:
4257 dev_err(dev, "Error %d in request cap rsp\n",
4258 crq->request_capability_rsp.rc.code);
4259 return;
4260 }
4261
4262 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004263 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06004264 union ibmvnic_crq newcrq;
4265 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4266 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4267 &adapter->ip_offload_buf;
4268
Thomas Falcon249168a2017-02-15 12:18:00 -06004269 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004270 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4271 buf_sz,
4272 DMA_FROM_DEVICE);
4273
4274 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4275 if (!firmware_has_feature(FW_FEATURE_CMO))
4276 dev_err(dev, "Couldn't map offload buffer\n");
4277 return;
4278 }
4279
4280 memset(&newcrq, 0, sizeof(newcrq));
4281 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4282 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4283 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4284 newcrq.query_ip_offload.ioba =
4285 cpu_to_be32(adapter->ip_offload_tok);
4286
4287 ibmvnic_send_crq(adapter, &newcrq);
4288 }
4289}
4290
4291static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4292 struct ibmvnic_adapter *adapter)
4293{
4294 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004295 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004296 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4297 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004298 u64 *tx_handle_array;
4299 u64 *rx_handle_array;
4300 int num_tx_pools;
4301 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004302 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004303 int i;
4304
4305 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004306 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004307 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004308 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004309
John Allen498cd8e2016-04-06 11:49:55 -05004310 /* If the number of queues requested can't be allocated by the
4311 * server, the login response will return with code 1. We will need
4312 * to resend the login buffer with fewer queues requested.
4313 */
4314 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004315 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004316 complete(&adapter->init_done);
4317 return 0;
4318 }
4319
John Allenc26eba02017-10-26 16:23:25 -05004320 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4321
Thomas Falcon032c5e82015-12-21 11:26:06 -06004322 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4323 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4324 netdev_dbg(adapter->netdev, "%016lx\n",
4325 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4326 }
4327
4328 /* Sanity checks */
4329 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4330 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4331 adapter->req_rx_add_queues !=
4332 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4333 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4334 ibmvnic_remove(adapter->vdev);
4335 return -EIO;
4336 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004337 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4338 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4339 /* variable buffer sizes are not supported, so just read the
4340 * first entry.
4341 */
4342 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004343
4344 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4345 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4346
4347 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4348 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4349 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4350 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4351
4352 for (i = 0; i < num_tx_pools; i++)
4353 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4354
4355 for (i = 0; i < num_rx_pools; i++)
4356 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4357
Thomas Falcon507ebe62020-08-21 13:39:01 -05004358 adapter->num_active_tx_scrqs = num_tx_pools;
4359 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004360 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004361 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004362 complete(&adapter->init_done);
4363
Thomas Falcon032c5e82015-12-21 11:26:06 -06004364 return 0;
4365}
4366
Thomas Falcon032c5e82015-12-21 11:26:06 -06004367static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4368 struct ibmvnic_adapter *adapter)
4369{
4370 struct device *dev = &adapter->vdev->dev;
4371 long rc;
4372
4373 rc = crq->request_unmap_rsp.rc.code;
4374 if (rc)
4375 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4376}
4377
4378static void handle_query_map_rsp(union ibmvnic_crq *crq,
4379 struct ibmvnic_adapter *adapter)
4380{
4381 struct net_device *netdev = adapter->netdev;
4382 struct device *dev = &adapter->vdev->dev;
4383 long rc;
4384
4385 rc = crq->query_map_rsp.rc.code;
4386 if (rc) {
4387 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4388 return;
4389 }
4390 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4391 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4392 crq->query_map_rsp.free_pages);
4393}
4394
4395static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4396 struct ibmvnic_adapter *adapter)
4397{
4398 struct net_device *netdev = adapter->netdev;
4399 struct device *dev = &adapter->vdev->dev;
4400 long rc;
4401
Thomas Falcon901e0402017-02-15 12:17:59 -06004402 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004403 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004404 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004405 rc = crq->query_capability.rc.code;
4406 if (rc) {
4407 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4408 goto out;
4409 }
4410
4411 switch (be16_to_cpu(crq->query_capability.capability)) {
4412 case MIN_TX_QUEUES:
4413 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004414 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004415 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4416 adapter->min_tx_queues);
4417 break;
4418 case MIN_RX_QUEUES:
4419 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004420 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004421 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4422 adapter->min_rx_queues);
4423 break;
4424 case MIN_RX_ADD_QUEUES:
4425 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004426 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004427 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4428 adapter->min_rx_add_queues);
4429 break;
4430 case MAX_TX_QUEUES:
4431 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004432 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004433 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4434 adapter->max_tx_queues);
4435 break;
4436 case MAX_RX_QUEUES:
4437 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004438 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004439 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4440 adapter->max_rx_queues);
4441 break;
4442 case MAX_RX_ADD_QUEUES:
4443 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004444 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004445 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4446 adapter->max_rx_add_queues);
4447 break;
4448 case MIN_TX_ENTRIES_PER_SUBCRQ:
4449 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004450 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004451 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4452 adapter->min_tx_entries_per_subcrq);
4453 break;
4454 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4455 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004456 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004457 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4458 adapter->min_rx_add_entries_per_subcrq);
4459 break;
4460 case MAX_TX_ENTRIES_PER_SUBCRQ:
4461 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004462 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004463 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4464 adapter->max_tx_entries_per_subcrq);
4465 break;
4466 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4467 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004468 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004469 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4470 adapter->max_rx_add_entries_per_subcrq);
4471 break;
4472 case TCP_IP_OFFLOAD:
4473 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004474 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004475 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4476 adapter->tcp_ip_offload);
4477 break;
4478 case PROMISC_SUPPORTED:
4479 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004480 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004481 netdev_dbg(netdev, "promisc_supported = %lld\n",
4482 adapter->promisc_supported);
4483 break;
4484 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004485 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004486 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004487 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4488 break;
4489 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004490 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004491 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004492 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4493 break;
4494 case MAX_MULTICAST_FILTERS:
4495 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004496 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004497 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4498 adapter->max_multicast_filters);
4499 break;
4500 case VLAN_HEADER_INSERTION:
4501 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004502 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004503 if (adapter->vlan_header_insertion)
4504 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4505 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4506 adapter->vlan_header_insertion);
4507 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004508 case RX_VLAN_HEADER_INSERTION:
4509 adapter->rx_vlan_header_insertion =
4510 be64_to_cpu(crq->query_capability.number);
4511 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4512 adapter->rx_vlan_header_insertion);
4513 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004514 case MAX_TX_SG_ENTRIES:
4515 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004516 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004517 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4518 adapter->max_tx_sg_entries);
4519 break;
4520 case RX_SG_SUPPORTED:
4521 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004522 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004523 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4524 adapter->rx_sg_supported);
4525 break;
4526 case OPT_TX_COMP_SUB_QUEUES:
4527 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004528 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004529 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4530 adapter->opt_tx_comp_sub_queues);
4531 break;
4532 case OPT_RX_COMP_QUEUES:
4533 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004534 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004535 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4536 adapter->opt_rx_comp_queues);
4537 break;
4538 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4539 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004540 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004541 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4542 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4543 break;
4544 case OPT_TX_ENTRIES_PER_SUBCRQ:
4545 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004546 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004547 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4548 adapter->opt_tx_entries_per_subcrq);
4549 break;
4550 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4551 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004552 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004553 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4554 adapter->opt_rxba_entries_per_subcrq);
4555 break;
4556 case TX_RX_DESC_REQ:
4557 adapter->tx_rx_desc_req = crq->query_capability.number;
4558 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4559 adapter->tx_rx_desc_req);
4560 break;
4561
4562 default:
4563 netdev_err(netdev, "Got invalid cap rsp %d\n",
4564 crq->query_capability.capability);
4565 }
4566
4567out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004568 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4569 adapter->wait_capability = false;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04004570 ibmvnic_send_req_caps(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004571 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004572}
4573
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004574static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4575{
4576 union ibmvnic_crq crq;
4577 int rc;
4578
4579 memset(&crq, 0, sizeof(crq));
4580 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4581 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004582
4583 mutex_lock(&adapter->fw_lock);
4584 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06004585 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004586
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004587 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004588 if (rc) {
4589 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004590 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004591 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004592
4593 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004594 if (rc) {
4595 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06004596 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004597 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004598
Thomas Falconff25dcb2019-11-25 17:12:56 -06004599 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004600 return adapter->fw_done_rc ? -EIO : 0;
4601}
4602
4603static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4604 struct ibmvnic_adapter *adapter)
4605{
4606 struct net_device *netdev = adapter->netdev;
4607 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004608 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004609
4610 rc = crq->query_phys_parms_rsp.rc.code;
4611 if (rc) {
4612 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4613 return rc;
4614 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004615 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004616 case IBMVNIC_10MBPS:
4617 adapter->speed = SPEED_10;
4618 break;
4619 case IBMVNIC_100MBPS:
4620 adapter->speed = SPEED_100;
4621 break;
4622 case IBMVNIC_1GBPS:
4623 adapter->speed = SPEED_1000;
4624 break;
4625 case IBMVNIC_10GBP:
4626 adapter->speed = SPEED_10000;
4627 break;
4628 case IBMVNIC_25GBPS:
4629 adapter->speed = SPEED_25000;
4630 break;
4631 case IBMVNIC_40GBPS:
4632 adapter->speed = SPEED_40000;
4633 break;
4634 case IBMVNIC_50GBPS:
4635 adapter->speed = SPEED_50000;
4636 break;
4637 case IBMVNIC_100GBPS:
4638 adapter->speed = SPEED_100000;
4639 break;
4640 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004641 if (netif_carrier_ok(netdev))
4642 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004643 adapter->speed = SPEED_UNKNOWN;
4644 }
4645 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4646 adapter->duplex = DUPLEX_FULL;
4647 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4648 adapter->duplex = DUPLEX_HALF;
4649 else
4650 adapter->duplex = DUPLEX_UNKNOWN;
4651
4652 return rc;
4653}
4654
Thomas Falcon032c5e82015-12-21 11:26:06 -06004655static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4656 struct ibmvnic_adapter *adapter)
4657{
4658 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4659 struct net_device *netdev = adapter->netdev;
4660 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004661 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004662 long rc;
4663
4664 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004665 (unsigned long int)cpu_to_be64(u64_crq[0]),
4666 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004667 switch (gen_crq->first) {
4668 case IBMVNIC_CRQ_INIT_RSP:
4669 switch (gen_crq->cmd) {
4670 case IBMVNIC_CRQ_INIT:
4671 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004672 adapter->from_passive_init = true;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004673 adapter->failover_pending = false;
Thomas Falcon17c87052018-05-23 13:37:58 -05004674 if (!completion_done(&adapter->init_done)) {
4675 complete(&adapter->init_done);
4676 adapter->init_done_rc = -EIO;
4677 }
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004678 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004679 break;
4680 case IBMVNIC_CRQ_INIT_COMPLETE:
4681 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05004682 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004683 send_version_xchg(adapter);
4684 break;
4685 default:
4686 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4687 }
4688 return;
4689 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004690 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05004691 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06004692 /* terminate any thread waiting for a response
4693 * from the device
4694 */
4695 if (!completion_done(&adapter->fw_done)) {
4696 adapter->fw_done_rc = -EIO;
4697 complete(&adapter->fw_done);
4698 }
4699 if (!completion_done(&adapter->stats_done))
4700 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04004701 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05004702 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004703 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004704 dev_info(dev, "Migrated, re-enabling adapter\n");
4705 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004706 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4707 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004708 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004709 } else {
4710 /* The adapter lost the connection */
4711 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4712 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004713 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004714 }
4715 return;
4716 case IBMVNIC_CRQ_CMD_RSP:
4717 break;
4718 default:
4719 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4720 gen_crq->first);
4721 return;
4722 }
4723
4724 switch (gen_crq->cmd) {
4725 case VERSION_EXCHANGE_RSP:
4726 rc = crq->version_exchange_rsp.rc.code;
4727 if (rc) {
4728 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4729 break;
4730 }
Thomas Falcon78468892020-05-28 11:19:17 -05004731 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06004732 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05004733 dev_info(dev, "Partner protocol version is %d\n",
4734 ibmvnic_version);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004735 send_cap_queries(adapter);
4736 break;
4737 case QUERY_CAPABILITY_RSP:
4738 handle_query_cap_rsp(crq, adapter);
4739 break;
4740 case QUERY_MAP_RSP:
4741 handle_query_map_rsp(crq, adapter);
4742 break;
4743 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004744 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4745 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004746 break;
4747 case REQUEST_UNMAP_RSP:
4748 handle_request_unmap_rsp(crq, adapter);
4749 break;
4750 case REQUEST_CAPABILITY_RSP:
4751 handle_request_cap_rsp(crq, adapter);
4752 break;
4753 case LOGIN_RSP:
4754 netdev_dbg(netdev, "Got Login Response\n");
4755 handle_login_rsp(crq, adapter);
4756 break;
4757 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004758 netdev_dbg(netdev,
4759 "Got Logical Link State Response, state: %d rc: %d\n",
4760 crq->logical_link_state_rsp.link_state,
4761 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004762 adapter->logical_link_state =
4763 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004764 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4765 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004766 break;
4767 case LINK_STATE_INDICATION:
4768 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4769 adapter->phys_link_state =
4770 crq->link_state_indication.phys_link_state;
4771 adapter->logical_link_state =
4772 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05004773 if (adapter->phys_link_state && adapter->logical_link_state)
4774 netif_carrier_on(netdev);
4775 else
4776 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004777 break;
4778 case CHANGE_MAC_ADDR_RSP:
4779 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004780 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004781 break;
4782 case ERROR_INDICATION:
4783 netdev_dbg(netdev, "Got Error Indication\n");
4784 handle_error_indication(crq, adapter);
4785 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004786 case REQUEST_STATISTICS_RSP:
4787 netdev_dbg(netdev, "Got Statistics Response\n");
4788 complete(&adapter->stats_done);
4789 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004790 case QUERY_IP_OFFLOAD_RSP:
4791 netdev_dbg(netdev, "Got Query IP offload Response\n");
4792 handle_query_ip_offload_rsp(adapter);
4793 break;
4794 case MULTICAST_CTRL_RSP:
4795 netdev_dbg(netdev, "Got multicast control Response\n");
4796 break;
4797 case CONTROL_IP_OFFLOAD_RSP:
4798 netdev_dbg(netdev, "Got Control IP offload Response\n");
4799 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4800 sizeof(adapter->ip_offload_ctrl),
4801 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004802 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004803 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004804 case COLLECT_FW_TRACE_RSP:
4805 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4806 complete(&adapter->fw_done);
4807 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004808 case GET_VPD_SIZE_RSP:
4809 handle_vpd_size_rsp(crq, adapter);
4810 break;
4811 case GET_VPD_RSP:
4812 handle_vpd_rsp(crq, adapter);
4813 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004814 case QUERY_PHYS_PARMS_RSP:
4815 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4816 complete(&adapter->fw_done);
4817 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004818 default:
4819 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4820 gen_crq->cmd);
4821 }
4822}
4823
4824static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4825{
4826 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004827
Thomas Falcon6c267b32017-02-15 12:17:58 -06004828 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004829 return IRQ_HANDLED;
4830}
4831
4832static void ibmvnic_tasklet(void *data)
4833{
4834 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004835 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004836 union ibmvnic_crq *crq;
4837 unsigned long flags;
4838 bool done = false;
4839
4840 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004841 while (!done) {
4842 /* Pull all the valid messages off the CRQ */
4843 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4844 ibmvnic_handle_crq(crq, adapter);
4845 crq->generic.first = 0;
4846 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004847
4848 /* remain in tasklet until all
4849 * capabilities responses are received
4850 */
4851 if (!adapter->wait_capability)
4852 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004853 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004854 /* if capabilities CRQ's were sent in this tasklet, the following
4855 * tasklet must wait until all responses are received
4856 */
4857 if (atomic_read(&adapter->running_cap_crqs) != 0)
4858 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004859 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004860}
4861
4862static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4863{
4864 struct vio_dev *vdev = adapter->vdev;
4865 int rc;
4866
4867 do {
4868 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4869 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4870
4871 if (rc)
4872 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4873
4874 return rc;
4875}
4876
4877static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4878{
4879 struct ibmvnic_crq_queue *crq = &adapter->crq;
4880 struct device *dev = &adapter->vdev->dev;
4881 struct vio_dev *vdev = adapter->vdev;
4882 int rc;
4883
4884 /* Close the CRQ */
4885 do {
4886 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4887 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4888
4889 /* Clean out the queue */
4890 memset(crq->msgs, 0, PAGE_SIZE);
4891 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05004892 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004893
4894 /* And re-open it again */
4895 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4896 crq->msg_token, PAGE_SIZE);
4897
4898 if (rc == H_CLOSED)
4899 /* Adapter is good, but other end is not ready */
4900 dev_warn(dev, "Partner adapter not ready\n");
4901 else if (rc != 0)
4902 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4903
4904 return rc;
4905}
4906
Nathan Fontenotf9928872017-03-30 02:48:54 -04004907static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004908{
4909 struct ibmvnic_crq_queue *crq = &adapter->crq;
4910 struct vio_dev *vdev = adapter->vdev;
4911 long rc;
4912
Nathan Fontenotf9928872017-03-30 02:48:54 -04004913 if (!crq->msgs)
4914 return;
4915
Thomas Falcon032c5e82015-12-21 11:26:06 -06004916 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4917 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004918 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004919 do {
4920 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4921 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4922
4923 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4924 DMA_BIDIRECTIONAL);
4925 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004926 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05004927 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004928}
4929
Nathan Fontenotf9928872017-03-30 02:48:54 -04004930static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004931{
4932 struct ibmvnic_crq_queue *crq = &adapter->crq;
4933 struct device *dev = &adapter->vdev->dev;
4934 struct vio_dev *vdev = adapter->vdev;
4935 int rc, retrc = -ENOMEM;
4936
Nathan Fontenotf9928872017-03-30 02:48:54 -04004937 if (crq->msgs)
4938 return 0;
4939
Thomas Falcon032c5e82015-12-21 11:26:06 -06004940 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4941 /* Should we allocate more than one page? */
4942
4943 if (!crq->msgs)
4944 return -ENOMEM;
4945
4946 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4947 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4948 DMA_BIDIRECTIONAL);
4949 if (dma_mapping_error(dev, crq->msg_token))
4950 goto map_failed;
4951
4952 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4953 crq->msg_token, PAGE_SIZE);
4954
4955 if (rc == H_RESOURCE)
4956 /* maybe kexecing and resource is busy. try a reset */
4957 rc = ibmvnic_reset_crq(adapter);
4958 retrc = rc;
4959
4960 if (rc == H_CLOSED) {
4961 dev_warn(dev, "Partner adapter not ready\n");
4962 } else if (rc) {
4963 dev_warn(dev, "Error %d opening adapter\n", rc);
4964 goto reg_crq_failed;
4965 }
4966
4967 retrc = 0;
4968
Thomas Falcon6c267b32017-02-15 12:17:58 -06004969 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4970 (unsigned long)adapter);
4971
Thomas Falcon032c5e82015-12-21 11:26:06 -06004972 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03004973 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4974 adapter->vdev->unit_address);
4975 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004976 if (rc) {
4977 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4978 vdev->irq, rc);
4979 goto req_irq_failed;
4980 }
4981
4982 rc = vio_enable_interrupts(vdev);
4983 if (rc) {
4984 dev_err(dev, "Error %d enabling interrupts\n", rc);
4985 goto req_irq_failed;
4986 }
4987
4988 crq->cur = 0;
4989 spin_lock_init(&crq->lock);
4990
4991 return retrc;
4992
4993req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06004994 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004995 do {
4996 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4997 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4998reg_crq_failed:
4999 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5000map_failed:
5001 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005002 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005003 return retrc;
5004}
5005
Lijun Pan635e4422020-08-19 17:52:26 -05005006static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005007{
5008 struct device *dev = &adapter->vdev->dev;
5009 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005010 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005011 int rc;
5012
John Allen017892c12017-05-26 10:30:19 -04005013 adapter->from_passive_init = false;
5014
Lijun Pan635e4422020-08-19 17:52:26 -05005015 if (reset) {
5016 old_num_rx_queues = adapter->req_rx_queues;
5017 old_num_tx_queues = adapter->req_tx_queues;
5018 reinit_completion(&adapter->init_done);
5019 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005020
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005021 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005022 rc = ibmvnic_send_crq_init(adapter);
5023 if (rc) {
5024 dev_err(dev, "Send crq init failed with error %d\n", rc);
5025 return rc;
5026 }
5027
John Allenf6ef6402017-03-17 17:13:42 -05005028 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5029 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04005030 return -1;
5031 }
5032
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005033 if (adapter->init_done_rc) {
5034 release_crq_queue(adapter);
5035 return adapter->init_done_rc;
5036 }
5037
Lijun Pan635e4422020-08-19 17:52:26 -05005038 if (reset &&
5039 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005040 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005041 if (adapter->req_rx_queues != old_num_rx_queues ||
5042 adapter->req_tx_queues != old_num_tx_queues) {
5043 release_sub_crqs(adapter, 0);
5044 rc = init_sub_crqs(adapter);
5045 } else {
5046 rc = reset_sub_crq_queues(adapter);
5047 }
5048 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005049 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005050 }
5051
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005052 if (rc) {
5053 dev_err(dev, "Initialization of sub crqs failed\n");
5054 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005055 return rc;
5056 }
5057
5058 rc = init_sub_crq_irqs(adapter);
5059 if (rc) {
5060 dev_err(dev, "Failed to initialize sub crq irqs\n");
5061 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005062 }
5063
5064 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005065}
5066
Thomas Falcon40c9db82017-06-12 12:35:04 -05005067static struct device_attribute dev_attr_failover;
5068
Thomas Falcon032c5e82015-12-21 11:26:06 -06005069static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5070{
5071 struct ibmvnic_adapter *adapter;
5072 struct net_device *netdev;
5073 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005074 int rc;
5075
5076 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5077 dev->unit_address);
5078
5079 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5080 VETH_MAC_ADDR, NULL);
5081 if (!mac_addr_p) {
5082 dev_err(&dev->dev,
5083 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5084 __FILE__, __LINE__);
5085 return 0;
5086 }
5087
5088 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005089 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005090 if (!netdev)
5091 return -ENOMEM;
5092
5093 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005094 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005095 dev_set_drvdata(&dev->dev, netdev);
5096 adapter->vdev = dev;
5097 adapter->netdev = netdev;
5098
5099 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5100 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5101 netdev->irq = dev->irq;
5102 netdev->netdev_ops = &ibmvnic_netdev_ops;
5103 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5104 SET_NETDEV_DEV(netdev, &dev->dev);
5105
5106 spin_lock_init(&adapter->stats_lock);
5107
Nathan Fontenoted651a12017-05-03 14:04:38 -04005108 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005109 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5110 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005111 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005112 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005113 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005114 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005115 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005116 init_completion(&adapter->fw_done);
5117 init_completion(&adapter->reset_done);
5118 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005119 clear_bit(0, &adapter->resetting);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005120
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005121 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005122 rc = init_crq_queue(adapter);
5123 if (rc) {
5124 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5125 rc);
5126 goto ibmvnic_init_fail;
5127 }
5128
Lijun Pan635e4422020-08-19 17:52:26 -05005129 rc = ibmvnic_reset_init(adapter, false);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005130 if (rc && rc != EAGAIN)
5131 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005132 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005133
Thomas Falcon07184212018-05-16 15:49:05 -05005134 rc = init_stats_buffers(adapter);
5135 if (rc)
5136 goto ibmvnic_init_fail;
5137
5138 rc = init_stats_token(adapter);
5139 if (rc)
5140 goto ibmvnic_stats_fail;
5141
Thomas Falconf39f0d12017-02-14 10:22:59 -06005142 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05005143 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5144 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005145
Thomas Falcon40c9db82017-06-12 12:35:04 -05005146 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005147 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005148 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005149
Mick Tarsele876a8a2017-09-28 13:53:18 -07005150 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005151 rc = register_netdev(netdev);
5152 if (rc) {
5153 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005154 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005155 }
5156 dev_info(&dev->dev, "ibmvnic registered\n");
5157
Nathan Fontenot90c80142017-05-03 14:04:32 -04005158 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05005159
5160 adapter->wait_for_reset = false;
5161
Thomas Falcon032c5e82015-12-21 11:26:06 -06005162 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005163
5164ibmvnic_register_fail:
5165 device_remove_file(&dev->dev, &dev_attr_failover);
5166
Thomas Falcon07184212018-05-16 15:49:05 -05005167ibmvnic_dev_file_err:
5168 release_stats_token(adapter);
5169
5170ibmvnic_stats_fail:
5171 release_stats_buffers(adapter);
5172
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005173ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005174 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005175 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005176 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005177 free_netdev(netdev);
5178
5179 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005180}
5181
5182static int ibmvnic_remove(struct vio_dev *dev)
5183{
5184 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005185 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005186 unsigned long flags;
5187
5188 spin_lock_irqsave(&adapter->state_lock, flags);
5189 if (adapter->state == VNIC_RESETTING) {
5190 spin_unlock_irqrestore(&adapter->state_lock, flags);
5191 return -EBUSY;
5192 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06005193
Nathan Fontenot90c80142017-05-03 14:04:32 -04005194 adapter->state = VNIC_REMOVING;
Juliet Kim7d7195a2020-03-10 09:23:58 -05005195 spin_unlock_irqrestore(&adapter->state_lock, flags);
5196
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005197 flush_work(&adapter->ibmvnic_reset);
5198 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5199
Juliet Kima5681e22018-11-19 15:59:22 -06005200 rtnl_lock();
5201 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005202
5203 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005204 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005205 release_crq_queue(adapter);
5206
Thomas Falcon53cc7722018-02-26 18:10:56 -06005207 release_stats_token(adapter);
5208 release_stats_buffers(adapter);
5209
Nathan Fontenot90c80142017-05-03 14:04:32 -04005210 adapter->state = VNIC_REMOVED;
5211
Juliet Kima5681e22018-11-19 15:59:22 -06005212 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005213 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005214 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005215 free_netdev(netdev);
5216 dev_set_drvdata(&dev->dev, NULL);
5217
5218 return 0;
5219}
5220
Thomas Falcon40c9db82017-06-12 12:35:04 -05005221static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5222 const char *buf, size_t count)
5223{
5224 struct net_device *netdev = dev_get_drvdata(dev);
5225 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5226 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5227 __be64 session_token;
5228 long rc;
5229
5230 if (!sysfs_streq(buf, "1"))
5231 return -EINVAL;
5232
5233 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5234 H_GET_SESSION_TOKEN, 0, 0, 0);
5235 if (rc) {
5236 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5237 rc);
5238 return -EINVAL;
5239 }
5240
5241 session_token = (__be64)retbuf[0];
5242 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5243 be64_to_cpu(session_token));
5244 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5245 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5246 if (rc) {
5247 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5248 rc);
5249 return -EINVAL;
5250 }
5251
5252 return count;
5253}
5254
Joe Perches6cbaefb2017-12-19 10:15:09 -08005255static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005256
Thomas Falcon032c5e82015-12-21 11:26:06 -06005257static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5258{
5259 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5260 struct ibmvnic_adapter *adapter;
5261 struct iommu_table *tbl;
5262 unsigned long ret = 0;
5263 int i;
5264
5265 tbl = get_iommu_table_base(&vdev->dev);
5266
5267 /* netdev inits at probe time along with the structures we need below*/
5268 if (!netdev)
5269 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5270
5271 adapter = netdev_priv(netdev);
5272
5273 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005274 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5275
5276 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5277 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5278
Thomas Falcon507ebe62020-08-21 13:39:01 -05005279 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005280 ret += adapter->rx_pool[i].size *
5281 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5282
5283 return ret;
5284}
5285
5286static int ibmvnic_resume(struct device *dev)
5287{
5288 struct net_device *netdev = dev_get_drvdata(dev);
5289 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005290
John Allencb89ba22017-06-19 11:27:53 -05005291 if (adapter->state != VNIC_OPEN)
5292 return 0;
5293
John Allena2488782017-07-24 13:26:06 -05005294 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005295
5296 return 0;
5297}
5298
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305299static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005300 {"network", "IBM,vnic"},
5301 {"", "" }
5302};
5303MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5304
5305static const struct dev_pm_ops ibmvnic_pm_ops = {
5306 .resume = ibmvnic_resume
5307};
5308
5309static struct vio_driver ibmvnic_driver = {
5310 .id_table = ibmvnic_device_table,
5311 .probe = ibmvnic_probe,
5312 .remove = ibmvnic_remove,
5313 .get_desired_dma = ibmvnic_get_desired_dma,
5314 .name = ibmvnic_driver_name,
5315 .pm = &ibmvnic_pm_ops,
5316};
5317
5318/* module functions */
5319static int __init ibmvnic_module_init(void)
5320{
5321 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5322 IBMVNIC_DRIVER_VERSION);
5323
5324 return vio_register_driver(&ibmvnic_driver);
5325}
5326
5327static void __exit ibmvnic_module_exit(void)
5328{
5329 vio_unregister_driver(&ibmvnic_driver);
5330}
5331
5332module_init(ibmvnic_module_init);
5333module_exit(ibmvnic_module_exit);