blob: 85df91c9861b0bc0afe4613f5b0c0afd05e0ab39 [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060082static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060083static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
Thomas Falconad7775d2016-04-01 17:20:34 -050087static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060088static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
89static int enable_scrq_irq(struct ibmvnic_adapter *,
90 struct ibmvnic_sub_crq_queue *);
91static int disable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int pending_scrq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static int ibmvnic_poll(struct napi_struct *napi, int data);
Lijun Pan69980d02020-09-27 20:13:28 -050098static void send_query_map(struct ibmvnic_adapter *adapter);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -050099static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
100static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600101static int send_login(struct ibmvnic_adapter *adapter);
Lijun Pan491099a2020-09-27 20:13:26 -0500102static void send_query_cap(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500103static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500104static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Lijun Pan635e4422020-08-19 17:52:26 -0500105static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400106static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500107static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500108static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300109static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600110
111struct ibmvnic_stat {
112 char name[ETH_GSTRING_LEN];
113 int offset;
114};
115
116#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
117 offsetof(struct ibmvnic_statistics, stat))
118#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
119
120static const struct ibmvnic_stat ibmvnic_stats[] = {
121 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
122 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
123 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
124 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
125 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
126 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
127 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
128 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
129 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
130 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
131 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
132 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
133 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
134 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
135 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
136 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
137 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
138 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
139 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
140 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
141 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
142 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
143};
144
145static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
146 unsigned long length, unsigned long *number,
147 unsigned long *irq)
148{
149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
150 long rc;
151
152 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
153 *number = retbuf[0];
154 *irq = retbuf[1];
155
156 return rc;
157}
158
Thomas Falcon476d96c2019-11-25 17:12:55 -0600159/**
160 * ibmvnic_wait_for_completion - Check device state and wait for completion
161 * @adapter: private device data
162 * @comp_done: completion structure to wait for
163 * @timeout: time to wait in milliseconds
164 *
165 * Wait for a completion signal or until the timeout limit is reached
166 * while checking that the device is still active.
167 */
168static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
169 struct completion *comp_done,
170 unsigned long timeout)
171{
172 struct net_device *netdev;
173 unsigned long div_timeout;
174 u8 retry;
175
176 netdev = adapter->netdev;
177 retry = 5;
178 div_timeout = msecs_to_jiffies(timeout / retry);
179 while (true) {
180 if (!adapter->crq.active) {
181 netdev_err(netdev, "Device down!\n");
182 return -ENODEV;
183 }
Thomas Falcon8f9cc1e2019-12-11 09:38:39 -0600184 if (!retry--)
Thomas Falcon476d96c2019-11-25 17:12:55 -0600185 break;
186 if (wait_for_completion_timeout(comp_done, div_timeout))
187 return 0;
188 }
189 netdev_err(netdev, "Operation timed out.\n");
190 return -ETIMEDOUT;
191}
192
Thomas Falcon032c5e82015-12-21 11:26:06 -0600193static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
194 struct ibmvnic_long_term_buff *ltb, int size)
195{
196 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500197 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600198
199 ltb->size = size;
200 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
201 GFP_KERNEL);
202
203 if (!ltb->buff) {
204 dev_err(dev, "Couldn't alloc long term buffer\n");
205 return -ENOMEM;
206 }
207 ltb->map_id = adapter->map_id;
208 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500209
Thomas Falconff25dcb2019-11-25 17:12:56 -0600210 mutex_lock(&adapter->fw_lock);
211 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600212 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500213 rc = send_request_map(adapter, ltb->addr,
214 ltb->size, ltb->map_id);
215 if (rc) {
216 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600217 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500218 return rc;
219 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600220
221 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
222 if (rc) {
223 dev_err(dev,
224 "Long term map request aborted or timed out,rc = %d\n",
225 rc);
226 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600227 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600228 return rc;
229 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500230
231 if (adapter->fw_done_rc) {
232 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
233 adapter->fw_done_rc);
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500234 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600235 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500236 return -1;
237 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600238 mutex_unlock(&adapter->fw_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600239 return 0;
240}
241
242static void free_long_term_buff(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_long_term_buff *ltb)
244{
245 struct device *dev = &adapter->vdev->dev;
246
Nathan Fontenotc657e322017-03-30 02:49:06 -0400247 if (!ltb->buff)
248 return;
249
Nathan Fontenoted651a12017-05-03 14:04:38 -0400250 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
251 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500252 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400253 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600254}
255
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500256static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
257 struct ibmvnic_long_term_buff *ltb)
258{
Thomas Falcon476d96c2019-11-25 17:12:55 -0600259 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500260 int rc;
261
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500262 memset(ltb->buff, 0, ltb->size);
263
Thomas Falconff25dcb2019-11-25 17:12:56 -0600264 mutex_lock(&adapter->fw_lock);
265 adapter->fw_done_rc = 0;
266
Thomas Falcon070eca92019-11-25 17:12:53 -0600267 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500268 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600269 if (rc) {
270 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500271 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -0600272 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600273
274 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
275 if (rc) {
276 dev_info(dev,
277 "Reset failed, long term map request timed out or aborted\n");
Thomas Falconff25dcb2019-11-25 17:12:56 -0600278 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600279 return rc;
280 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500281
282 if (adapter->fw_done_rc) {
Thomas Falcon476d96c2019-11-25 17:12:55 -0600283 dev_info(dev,
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500284 "Reset failed, attempting to free and reallocate buffer\n");
285 free_long_term_buff(adapter, ltb);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600286 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500287 return alloc_long_term_buff(adapter, ltb, ltb->size);
288 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600289 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500290 return 0;
291}
292
Thomas Falconf185a492017-05-26 10:30:48 -0400293static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
294{
295 int i;
296
Thomas Falcon507ebe62020-08-21 13:39:01 -0500297 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falconf185a492017-05-26 10:30:48 -0400298 adapter->rx_pool[i].active = 0;
299}
300
Thomas Falcon032c5e82015-12-21 11:26:06 -0600301static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
302 struct ibmvnic_rx_pool *pool)
303{
304 int count = pool->size - atomic_read(&pool->available);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -0500305 u64 handle = adapter->rx_scrq[pool->index]->handle;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600306 struct device *dev = &adapter->vdev->dev;
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600307 struct ibmvnic_ind_xmit_queue *ind_bufp;
308 struct ibmvnic_sub_crq_queue *rx_scrq;
309 union sub_crq *sub_crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600310 int buffers_added = 0;
311 unsigned long lpar_rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600312 struct sk_buff *skb;
313 unsigned int offset;
314 dma_addr_t dma_addr;
315 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600316 int shift = 0;
317 int index;
318 int i;
319
Thomas Falconf185a492017-05-26 10:30:48 -0400320 if (!pool->active)
321 return;
322
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600323 rx_scrq = adapter->rx_scrq[pool->index];
324 ind_bufp = &rx_scrq->ind_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600325 for (i = 0; i < count; ++i) {
326 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
327 if (!skb) {
328 dev_err(dev, "Couldn't replenish rx buff\n");
329 adapter->replenish_no_mem++;
330 break;
331 }
332
333 index = pool->free_map[pool->next_free];
334
335 if (pool->rx_buff[index].skb)
336 dev_err(dev, "Inconsistent free_map!\n");
337
338 /* Copy the skb to the long term mapped DMA buffer */
339 offset = index * pool->buff_size;
340 dst = pool->long_term_buff.buff + offset;
341 memset(dst, 0, pool->buff_size);
342 dma_addr = pool->long_term_buff.addr + offset;
343 pool->rx_buff[index].data = dst;
344
345 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
346 pool->rx_buff[index].dma = dma_addr;
347 pool->rx_buff[index].skb = skb;
348 pool->rx_buff[index].pool_index = pool->index;
349 pool->rx_buff[index].size = pool->buff_size;
350
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600351 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
352 memset(sub_crq, 0, sizeof(*sub_crq));
353 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
354 sub_crq->rx_add.correlator =
Thomas Falcon032c5e82015-12-21 11:26:06 -0600355 cpu_to_be64((u64)&pool->rx_buff[index]);
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600356 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
357 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600358
359 /* The length field of the sCRQ is defined to be 24 bits so the
360 * buffer size needs to be left shifted by a byte before it is
361 * converted to big endian to prevent the last byte from being
362 * truncated.
363 */
364#ifdef __LITTLE_ENDIAN__
365 shift = 8;
366#endif
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600367 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600368 pool->next_free = (pool->next_free + 1) % pool->size;
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600369 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
370 i == count - 1) {
371 lpar_rc =
372 send_subcrq_indirect(adapter, handle,
373 (u64)ind_bufp->indir_dma,
374 (u64)ind_bufp->index);
375 if (lpar_rc != H_SUCCESS)
376 goto failure;
377 buffers_added += ind_bufp->index;
378 adapter->replenish_add_buff_success += ind_bufp->index;
379 ind_bufp->index = 0;
380 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600381 }
382 atomic_add(buffers_added, &pool->available);
383 return;
384
385failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500386 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
387 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600388 for (i = ind_bufp->index - 1; i >= 0; --i) {
389 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600390
Thomas Falcon4f0b6812020-11-18 19:12:18 -0600391 pool->next_free = pool->next_free == 0 ?
392 pool->size - 1 : pool->next_free - 1;
393 sub_crq = &ind_bufp->indir_arr[i];
394 rx_buff = (struct ibmvnic_rx_buff *)
395 be64_to_cpu(sub_crq->rx_add.correlator);
396 index = (int)(rx_buff - pool->rx_buff);
397 pool->free_map[pool->next_free] = index;
398 dev_kfree_skb_any(pool->rx_buff[index].skb);
399 pool->rx_buff[index].skb = NULL;
400 }
401 ind_bufp->index = 0;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500402 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400403 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500404 * queue is closed or pending failover.
405 * Firmware guarantees that a signal will be sent to the
406 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400407 */
408 deactivate_rx_pools(adapter);
409 netif_carrier_off(adapter->netdev);
410 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600411}
412
413static void replenish_pools(struct ibmvnic_adapter *adapter)
414{
415 int i;
416
Thomas Falcon032c5e82015-12-21 11:26:06 -0600417 adapter->replenish_task_cycles++;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500418 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600419 if (adapter->rx_pool[i].active)
420 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
421 }
422}
423
John Allen3d52b592017-08-02 16:44:14 -0500424static void release_stats_buffers(struct ibmvnic_adapter *adapter)
425{
426 kfree(adapter->tx_stats_buffers);
427 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600428 adapter->tx_stats_buffers = NULL;
429 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500430}
431
432static int init_stats_buffers(struct ibmvnic_adapter *adapter)
433{
434 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600435 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500436 sizeof(struct ibmvnic_tx_queue_stats),
437 GFP_KERNEL);
438 if (!adapter->tx_stats_buffers)
439 return -ENOMEM;
440
441 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600442 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500443 sizeof(struct ibmvnic_rx_queue_stats),
444 GFP_KERNEL);
445 if (!adapter->rx_stats_buffers)
446 return -ENOMEM;
447
448 return 0;
449}
450
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400451static void release_stats_token(struct ibmvnic_adapter *adapter)
452{
453 struct device *dev = &adapter->vdev->dev;
454
455 if (!adapter->stats_token)
456 return;
457
458 dma_unmap_single(dev, adapter->stats_token,
459 sizeof(struct ibmvnic_statistics),
460 DMA_FROM_DEVICE);
461 adapter->stats_token = 0;
462}
463
464static int init_stats_token(struct ibmvnic_adapter *adapter)
465{
466 struct device *dev = &adapter->vdev->dev;
467 dma_addr_t stok;
468
469 stok = dma_map_single(dev, &adapter->stats,
470 sizeof(struct ibmvnic_statistics),
471 DMA_FROM_DEVICE);
472 if (dma_mapping_error(dev, stok)) {
473 dev_err(dev, "Couldn't map stats buffer\n");
474 return -1;
475 }
476
477 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500478 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400479 return 0;
480}
481
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400482static int reset_rx_pools(struct ibmvnic_adapter *adapter)
483{
484 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500485 u64 buff_size;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400486 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500487 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600488
Mingming Cao9f134572020-08-25 13:26:41 -0400489 if (!adapter->rx_pool)
490 return -1;
491
Thomas Falcon507ebe62020-08-21 13:39:01 -0500492 buff_size = adapter->cur_rx_buf_sz;
493 rx_scrqs = adapter->num_active_rx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400494 for (i = 0; i < rx_scrqs; i++) {
495 rx_pool = &adapter->rx_pool[i];
496
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500497 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
498
Thomas Falcon507ebe62020-08-21 13:39:01 -0500499 if (rx_pool->buff_size != buff_size) {
John Allen896d8692018-01-18 16:26:31 -0600500 free_long_term_buff(adapter, &rx_pool->long_term_buff);
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600501 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Thomas Falcon7c940b12019-06-07 16:03:55 -0500502 rc = alloc_long_term_buff(adapter,
503 &rx_pool->long_term_buff,
504 rx_pool->size *
505 rx_pool->buff_size);
John Allen896d8692018-01-18 16:26:31 -0600506 } else {
507 rc = reset_long_term_buff(adapter,
508 &rx_pool->long_term_buff);
509 }
510
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500511 if (rc)
512 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400513
514 for (j = 0; j < rx_pool->size; j++)
515 rx_pool->free_map[j] = j;
516
517 memset(rx_pool->rx_buff, 0,
518 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
519
520 atomic_set(&rx_pool->available, 0);
521 rx_pool->next_alloc = 0;
522 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500523 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400524 }
525
526 return 0;
527}
528
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400529static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600530{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400531 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400532 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600533
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400534 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600535 return;
536
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600537 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400538 rx_pool = &adapter->rx_pool[i];
539
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500540 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
541
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400542 kfree(rx_pool->free_map);
543 free_long_term_buff(adapter, &rx_pool->long_term_buff);
544
545 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400546 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400547
548 for (j = 0; j < rx_pool->size; j++) {
549 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600550 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
551 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400552 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600553 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400554
555 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600556 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400557
558 kfree(adapter->rx_pool);
559 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600560 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400561}
562
563static int init_rx_pools(struct net_device *netdev)
564{
565 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
566 struct device *dev = &adapter->vdev->dev;
567 struct ibmvnic_rx_pool *rx_pool;
568 int rxadd_subcrqs;
Thomas Falcon507ebe62020-08-21 13:39:01 -0500569 u64 buff_size;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400570 int i, j;
571
Thomas Falcon507ebe62020-08-21 13:39:01 -0500572 rxadd_subcrqs = adapter->num_active_rx_scrqs;
573 buff_size = adapter->cur_rx_buf_sz;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400574
575 adapter->rx_pool = kcalloc(rxadd_subcrqs,
576 sizeof(struct ibmvnic_rx_pool),
577 GFP_KERNEL);
578 if (!adapter->rx_pool) {
579 dev_err(dev, "Failed to allocate rx pools\n");
580 return -1;
581 }
582
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600583 adapter->num_active_rx_pools = rxadd_subcrqs;
584
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400585 for (i = 0; i < rxadd_subcrqs; i++) {
586 rx_pool = &adapter->rx_pool[i];
587
588 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500589 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400590 i, adapter->req_rx_add_entries_per_subcrq,
Thomas Falcon507ebe62020-08-21 13:39:01 -0500591 buff_size);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400592
593 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
594 rx_pool->index = i;
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600595 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400596 rx_pool->active = 1;
597
598 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
599 GFP_KERNEL);
600 if (!rx_pool->free_map) {
601 release_rx_pools(adapter);
602 return -1;
603 }
604
605 rx_pool->rx_buff = kcalloc(rx_pool->size,
606 sizeof(struct ibmvnic_rx_buff),
607 GFP_KERNEL);
608 if (!rx_pool->rx_buff) {
609 dev_err(dev, "Couldn't alloc rx buffers\n");
610 release_rx_pools(adapter);
611 return -1;
612 }
613
614 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
615 rx_pool->size * rx_pool->buff_size)) {
616 release_rx_pools(adapter);
617 return -1;
618 }
619
620 for (j = 0; j < rx_pool->size; ++j)
621 rx_pool->free_map[j] = j;
622
623 atomic_set(&rx_pool->available, 0);
624 rx_pool->next_alloc = 0;
625 rx_pool->next_free = 0;
626 }
627
628 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600629}
630
Thomas Falcone26dc252018-03-16 20:00:25 -0500631static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
632 struct ibmvnic_tx_pool *tx_pool)
633{
634 int rc, i;
635
636 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
637 if (rc)
638 return rc;
639
640 memset(tx_pool->tx_buff, 0,
641 tx_pool->num_buffers *
642 sizeof(struct ibmvnic_tx_buff));
643
644 for (i = 0; i < tx_pool->num_buffers; i++)
645 tx_pool->free_map[i] = i;
646
647 tx_pool->consumer_index = 0;
648 tx_pool->producer_index = 0;
649
650 return 0;
651}
652
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400653static int reset_tx_pools(struct ibmvnic_adapter *adapter)
654{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400655 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500656 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400657
Mingming Cao9f134572020-08-25 13:26:41 -0400658 if (!adapter->tx_pool)
659 return -1;
660
Thomas Falcon507ebe62020-08-21 13:39:01 -0500661 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400662 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500663 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500664 if (rc)
665 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500666 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500667 if (rc)
668 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400669 }
670
671 return 0;
672}
673
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200674static void release_vpd_data(struct ibmvnic_adapter *adapter)
675{
676 if (!adapter->vpd)
677 return;
678
679 kfree(adapter->vpd->buff);
680 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600681
682 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200683}
684
Thomas Falconfb794212018-03-16 20:00:26 -0500685static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
686 struct ibmvnic_tx_pool *tx_pool)
687{
688 kfree(tx_pool->tx_buff);
689 kfree(tx_pool->free_map);
690 free_long_term_buff(adapter, &tx_pool->long_term_buff);
691}
692
Nathan Fontenotc657e322017-03-30 02:49:06 -0400693static void release_tx_pools(struct ibmvnic_adapter *adapter)
694{
John Allen896d8692018-01-18 16:26:31 -0600695 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400696
697 if (!adapter->tx_pool)
698 return;
699
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600700 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500701 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
702 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400703 }
704
705 kfree(adapter->tx_pool);
706 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500707 kfree(adapter->tso_pool);
708 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600709 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400710}
711
Thomas Falcon32053062018-03-16 20:00:27 -0500712static int init_one_tx_pool(struct net_device *netdev,
713 struct ibmvnic_tx_pool *tx_pool,
714 int num_entries, int buf_size)
715{
716 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
717 int i;
718
719 tx_pool->tx_buff = kcalloc(num_entries,
720 sizeof(struct ibmvnic_tx_buff),
721 GFP_KERNEL);
722 if (!tx_pool->tx_buff)
723 return -1;
724
725 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
726 num_entries * buf_size))
727 return -1;
728
729 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
730 if (!tx_pool->free_map)
731 return -1;
732
733 for (i = 0; i < num_entries; i++)
734 tx_pool->free_map[i] = i;
735
736 tx_pool->consumer_index = 0;
737 tx_pool->producer_index = 0;
738 tx_pool->num_buffers = num_entries;
739 tx_pool->buf_size = buf_size;
740
741 return 0;
742}
743
Nathan Fontenotc657e322017-03-30 02:49:06 -0400744static int init_tx_pools(struct net_device *netdev)
745{
746 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400747 int tx_subcrqs;
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600748 u64 buff_size;
Thomas Falcon32053062018-03-16 20:00:27 -0500749 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400750
Thomas Falcon507ebe62020-08-21 13:39:01 -0500751 tx_subcrqs = adapter->num_active_tx_scrqs;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400752 adapter->tx_pool = kcalloc(tx_subcrqs,
753 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
754 if (!adapter->tx_pool)
755 return -1;
756
Thomas Falcon32053062018-03-16 20:00:27 -0500757 adapter->tso_pool = kcalloc(tx_subcrqs,
758 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
759 if (!adapter->tso_pool)
760 return -1;
761
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600762 adapter->num_active_tx_pools = tx_subcrqs;
763
Nathan Fontenotc657e322017-03-30 02:49:06 -0400764 for (i = 0; i < tx_subcrqs; i++) {
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600765 buff_size = adapter->req_mtu + VLAN_HLEN;
766 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
Thomas Falcon32053062018-03-16 20:00:27 -0500767 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
768 adapter->req_tx_entries_per_subcrq,
Dwip N. Banerjee9a87c3f2020-11-18 19:12:22 -0600769 buff_size);
Thomas Falcon32053062018-03-16 20:00:27 -0500770 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400771 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500772 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400773 }
774
Thomas Falcon7c940b12019-06-07 16:03:55 -0500775 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
776 IBMVNIC_TSO_BUFS,
777 IBMVNIC_TSO_BUF_SZ);
Thomas Falcon32053062018-03-16 20:00:27 -0500778 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400779 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500780 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400781 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400782 }
783
784 return 0;
785}
786
John Allend944c3d62017-05-26 10:30:13 -0400787static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
788{
789 int i;
790
791 if (adapter->napi_enabled)
792 return;
793
794 for (i = 0; i < adapter->req_rx_queues; i++)
795 napi_enable(&adapter->napi[i]);
796
797 adapter->napi_enabled = true;
798}
799
800static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
801{
802 int i;
803
804 if (!adapter->napi_enabled)
805 return;
806
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500807 for (i = 0; i < adapter->req_rx_queues; i++) {
808 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400809 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500810 }
John Allend944c3d62017-05-26 10:30:13 -0400811
812 adapter->napi_enabled = false;
813}
814
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600815static int init_napi(struct ibmvnic_adapter *adapter)
816{
817 int i;
818
819 adapter->napi = kcalloc(adapter->req_rx_queues,
820 sizeof(struct napi_struct), GFP_KERNEL);
821 if (!adapter->napi)
822 return -ENOMEM;
823
824 for (i = 0; i < adapter->req_rx_queues; i++) {
825 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
826 netif_napi_add(adapter->netdev, &adapter->napi[i],
827 ibmvnic_poll, NAPI_POLL_WEIGHT);
828 }
829
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600830 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600831 return 0;
832}
833
834static void release_napi(struct ibmvnic_adapter *adapter)
835{
836 int i;
837
838 if (!adapter->napi)
839 return;
840
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600841 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800842 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
843 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600844 }
845
846 kfree(adapter->napi);
847 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600848 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500849 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600850}
851
John Allena57a5d22017-03-17 17:13:41 -0500852static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600853{
854 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500855 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500856 int retry_count = 0;
Thomas Falcondff515a32020-06-15 10:29:23 -0500857 int retries = 10;
Thomas Falconeb110412018-05-24 14:37:53 -0500858 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500859 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600860
John Allenbd0b6722017-03-17 17:13:40 -0500861 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500862 retry = false;
Thomas Falcondff515a32020-06-15 10:29:23 -0500863 if (retry_count > retries) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500864 netdev_warn(netdev, "Login attempts exceeded\n");
865 return -1;
866 }
867
868 adapter->init_done_rc = 0;
869 reinit_completion(&adapter->init_done);
870 rc = send_login(adapter);
871 if (rc) {
872 netdev_warn(netdev, "Unable to login\n");
873 return rc;
874 }
875
876 if (!wait_for_completion_timeout(&adapter->init_done,
877 timeout)) {
Thomas Falcondff515a32020-06-15 10:29:23 -0500878 netdev_warn(netdev, "Login timed out, retrying...\n");
879 retry = true;
880 adapter->init_done_rc = 0;
881 retry_count++;
882 continue;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500883 }
884
Thomas Falcondff515a32020-06-15 10:29:23 -0500885 if (adapter->init_done_rc == ABORTED) {
886 netdev_warn(netdev, "Login aborted, retrying...\n");
887 retry = true;
888 adapter->init_done_rc = 0;
889 retry_count++;
890 /* FW or device may be busy, so
891 * wait a bit before retrying login
892 */
893 msleep(500);
894 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500895 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600896 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500897
Thomas Falconeb110412018-05-24 14:37:53 -0500898 retry = true;
899 netdev_dbg(netdev,
900 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500901 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -0500902 reinit_completion(&adapter->init_done);
Lijun Pan491099a2020-09-27 20:13:26 -0500903 send_query_cap(adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500904 if (!wait_for_completion_timeout(&adapter->init_done,
905 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500906 netdev_warn(netdev,
907 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -0500908 return -1;
909 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500910
Thomas Falcon4d96f122017-08-01 15:04:36 -0500911 rc = init_sub_crqs(adapter);
912 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500913 netdev_warn(netdev,
914 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500915 return -1;
916 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500917
Thomas Falcon4d96f122017-08-01 15:04:36 -0500918 rc = init_sub_crq_irqs(adapter);
919 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500920 netdev_warn(netdev,
921 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500922 return -1;
923 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500924 } else if (adapter->init_done_rc) {
925 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -0500926 return -1;
927 }
Thomas Falconeb110412018-05-24 14:37:53 -0500928 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -0500929
Thomas Falcon62740e92019-05-09 23:13:43 -0500930 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -0600931
John Allena57a5d22017-03-17 17:13:41 -0500932 return 0;
933}
934
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600935static void release_login_buffer(struct ibmvnic_adapter *adapter)
936{
937 kfree(adapter->login_buf);
938 adapter->login_buf = NULL;
939}
940
941static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
942{
943 kfree(adapter->login_rsp_buf);
944 adapter->login_rsp_buf = NULL;
945}
946
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400947static void release_resources(struct ibmvnic_adapter *adapter)
948{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200949 release_vpd_data(adapter);
950
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400951 release_tx_pools(adapter);
952 release_rx_pools(adapter);
953
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600954 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600955 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400956}
957
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400958static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
959{
960 struct net_device *netdev = adapter->netdev;
961 unsigned long timeout = msecs_to_jiffies(30000);
962 union ibmvnic_crq crq;
963 bool resend;
964 int rc;
965
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500966 netdev_dbg(netdev, "setting link state %d\n", link_state);
967
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400968 memset(&crq, 0, sizeof(crq));
969 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
970 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
971 crq.logical_link_state.link_state = link_state;
972
973 do {
974 resend = false;
975
976 reinit_completion(&adapter->init_done);
977 rc = ibmvnic_send_crq(adapter, &crq);
978 if (rc) {
979 netdev_err(netdev, "Failed to set link state\n");
980 return rc;
981 }
982
983 if (!wait_for_completion_timeout(&adapter->init_done,
984 timeout)) {
985 netdev_err(netdev, "timeout setting link state\n");
986 return -1;
987 }
988
Lijun Pan4c5f6af2020-08-19 17:52:23 -0500989 if (adapter->init_done_rc == PARTIALSUCCESS) {
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400990 /* Partuial success, delay and re-send */
991 mdelay(1000);
992 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -0500993 } else if (adapter->init_done_rc) {
994 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
995 adapter->init_done_rc);
996 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400997 }
998 } while (resend);
999
1000 return 0;
1001}
1002
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001003static int set_real_num_queues(struct net_device *netdev)
1004{
1005 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1006 int rc;
1007
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001008 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1009 adapter->req_tx_queues, adapter->req_rx_queues);
1010
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001011 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1012 if (rc) {
1013 netdev_err(netdev, "failed to set the number of tx queues\n");
1014 return rc;
1015 }
1016
1017 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1018 if (rc)
1019 netdev_err(netdev, "failed to set the number of rx queues\n");
1020
1021 return rc;
1022}
1023
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001024static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1025{
1026 struct device *dev = &adapter->vdev->dev;
1027 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001028 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001029 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001030
1031 if (adapter->vpd->buff)
1032 len = adapter->vpd->len;
1033
Thomas Falconff25dcb2019-11-25 17:12:56 -06001034 mutex_lock(&adapter->fw_lock);
1035 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001036 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001037
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001038 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1039 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001040 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001041 if (rc) {
1042 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001043 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001044 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001045
1046 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1047 if (rc) {
1048 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001049 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001050 return rc;
1051 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001052 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001053
1054 if (!adapter->vpd->len)
1055 return -ENODATA;
1056
1057 if (!adapter->vpd->buff)
1058 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1059 else if (adapter->vpd->len != len)
1060 adapter->vpd->buff =
1061 krealloc(adapter->vpd->buff,
1062 adapter->vpd->len, GFP_KERNEL);
1063
1064 if (!adapter->vpd->buff) {
1065 dev_err(dev, "Could allocate VPD buffer\n");
1066 return -ENOMEM;
1067 }
1068
1069 adapter->vpd->dma_addr =
1070 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1071 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001072 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001073 dev_err(dev, "Could not map VPD buffer\n");
1074 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001075 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001076 return -ENOMEM;
1077 }
1078
Thomas Falconff25dcb2019-11-25 17:12:56 -06001079 mutex_lock(&adapter->fw_lock);
1080 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001081 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001082
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001083 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1084 crq.get_vpd.cmd = GET_VPD;
1085 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1086 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001087 rc = ibmvnic_send_crq(adapter, &crq);
1088 if (rc) {
1089 kfree(adapter->vpd->buff);
1090 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001091 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001092 return rc;
1093 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001094
1095 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1096 if (rc) {
1097 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1098 kfree(adapter->vpd->buff);
1099 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001100 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001101 return rc;
1102 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001103
Thomas Falconff25dcb2019-11-25 17:12:56 -06001104 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001105 return 0;
1106}
1107
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001108static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001109{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001110 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001111 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001112
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001113 rc = set_real_num_queues(netdev);
1114 if (rc)
1115 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001116
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001117 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1118 if (!adapter->vpd)
1119 return -ENOMEM;
1120
John Allen69d08dc2018-01-18 16:27:58 -06001121 /* Vital Product Data (VPD) */
1122 rc = ibmvnic_get_vpd(adapter);
1123 if (rc) {
1124 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1125 return rc;
1126 }
1127
Thomas Falcon032c5e82015-12-21 11:26:06 -06001128 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001129
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001130 rc = init_napi(adapter);
1131 if (rc)
1132 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001133
Lijun Pan69980d02020-09-27 20:13:28 -05001134 send_query_map(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001135
1136 rc = init_rx_pools(netdev);
1137 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001138 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001139
Nathan Fontenotc657e322017-03-30 02:49:06 -04001140 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001141 return rc;
1142}
1143
Nathan Fontenoted651a12017-05-03 14:04:38 -04001144static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001145{
1146 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001147 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001148 int i, rc;
1149
Nathan Fontenot90c80142017-05-03 14:04:32 -04001150 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001151 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001152 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001153
Thomas Falcon032c5e82015-12-21 11:26:06 -06001154 /* We're ready to receive frames, enable the sub-crq interrupts and
1155 * set the logical link state to up
1156 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001157 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001158 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001159 if (prev_state == VNIC_CLOSED)
1160 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001161 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001162 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001163
Nathan Fontenoted651a12017-05-03 14:04:38 -04001164 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001165 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001166 if (prev_state == VNIC_CLOSED)
1167 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001168 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Thomas Falcon0d973382020-11-18 19:12:19 -06001169 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
Nathan Fontenoted651a12017-05-03 14:04:38 -04001170 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001171
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001172 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001173 if (rc) {
1174 for (i = 0; i < adapter->req_rx_queues; i++)
1175 napi_disable(&adapter->napi[i]);
1176 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001177 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001178 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001179
Nathan Fontenoted651a12017-05-03 14:04:38 -04001180 netif_tx_start_all_queues(netdev);
1181
1182 if (prev_state == VNIC_CLOSED) {
1183 for (i = 0; i < adapter->req_rx_queues; i++)
1184 napi_schedule(&adapter->napi[i]);
1185 }
1186
1187 adapter->state = VNIC_OPEN;
1188 return rc;
1189}
1190
1191static int ibmvnic_open(struct net_device *netdev)
1192{
1193 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001194 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001195
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001196 /* If device failover is pending, just set device state and return.
1197 * Device operation will be handled by reset routine.
1198 */
1199 if (adapter->failover_pending) {
1200 adapter->state = VNIC_OPEN;
1201 return 0;
1202 }
1203
Nathan Fontenoted651a12017-05-03 14:04:38 -04001204 if (adapter->state != VNIC_CLOSED) {
1205 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001206 if (rc)
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001207 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001208
1209 rc = init_resources(adapter);
1210 if (rc) {
1211 netdev_err(netdev, "failed to initialize resources\n");
1212 release_resources(adapter);
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001213 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001214 }
1215 }
1216
1217 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001218
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07001219out:
1220 /*
1221 * If open fails due to a pending failover, set device state and
1222 * return. Device operation will be handled by reset routine.
1223 */
1224 if (rc && adapter->failover_pending) {
1225 adapter->state = VNIC_OPEN;
1226 rc = 0;
1227 }
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001228 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001229}
1230
Thomas Falcond0869c02018-02-13 18:23:43 -06001231static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1232{
1233 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001234 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001235 u64 rx_entries;
1236 int rx_scrqs;
1237 int i, j;
1238
1239 if (!adapter->rx_pool)
1240 return;
1241
Thomas Falcon660e3092018-04-20 14:25:32 -05001242 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001243 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1244
1245 /* Free any remaining skbs in the rx buffer pools */
1246 for (i = 0; i < rx_scrqs; i++) {
1247 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001248 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001249 continue;
1250
1251 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1252 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001253 rx_buff = &rx_pool->rx_buff[j];
1254 if (rx_buff && rx_buff->skb) {
1255 dev_kfree_skb_any(rx_buff->skb);
1256 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001257 }
1258 }
1259 }
1260}
1261
Thomas Falcone9e1e972018-03-16 20:00:30 -05001262static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1263 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001264{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001265 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001266 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001267 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001268
Dan Carpenter050e85c2018-03-23 14:36:15 +03001269 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001270 return;
1271
1272 tx_entries = tx_pool->num_buffers;
1273
1274 for (i = 0; i < tx_entries; i++) {
1275 tx_buff = &tx_pool->tx_buff[i];
1276 if (tx_buff && tx_buff->skb) {
1277 dev_kfree_skb_any(tx_buff->skb);
1278 tx_buff->skb = NULL;
1279 }
1280 }
1281}
1282
1283static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1284{
1285 int tx_scrqs;
1286 int i;
1287
1288 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001289 return;
1290
Thomas Falcon660e3092018-04-20 14:25:32 -05001291 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001292
1293 /* Free any remaining skbs in the tx buffer pools */
1294 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001295 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001296 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1297 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001298 }
1299}
1300
John Allen6095e592018-03-30 13:44:21 -05001301static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001302{
John Allen6095e592018-03-30 13:44:21 -05001303 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001304 int i;
1305
Nathan Fontenot46293b92017-05-03 14:05:02 -04001306 if (adapter->tx_scrq) {
1307 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001308 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001309 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001310 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001311 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001312 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001313 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001314 }
1315
Nathan Fontenot46293b92017-05-03 14:05:02 -04001316 if (adapter->rx_scrq) {
1317 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001318 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001319 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001320 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001321 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001322 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001323 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001324 }
1325 }
John Allen6095e592018-03-30 13:44:21 -05001326}
1327
1328static void ibmvnic_cleanup(struct net_device *netdev)
1329{
1330 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1331
1332 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001333 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001334 netif_tx_disable(netdev);
1335 else
1336 netif_tx_stop_all_queues(netdev);
1337
1338 ibmvnic_napi_disable(adapter);
1339 ibmvnic_disable_irqs(adapter);
1340
Thomas Falcond0869c02018-02-13 18:23:43 -06001341 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001342 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001343}
1344
1345static int __ibmvnic_close(struct net_device *netdev)
1346{
1347 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1348 int rc = 0;
1349
1350 adapter->state = VNIC_CLOSING;
1351 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1352 if (rc)
1353 return rc;
Nathan Fontenot90c80142017-05-03 14:04:32 -04001354 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001355 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001356}
1357
Nathan Fontenoted651a12017-05-03 14:04:38 -04001358static int ibmvnic_close(struct net_device *netdev)
1359{
1360 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1361 int rc;
1362
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001363 /* If device failover is pending, just set device state and return.
1364 * Device operation will be handled by reset routine.
1365 */
1366 if (adapter->failover_pending) {
1367 adapter->state = VNIC_CLOSED;
1368 return 0;
1369 }
1370
Nathan Fontenoted651a12017-05-03 14:04:38 -04001371 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001372 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001373
1374 return rc;
1375}
1376
Thomas Falconad7775d2016-04-01 17:20:34 -05001377/**
1378 * build_hdr_data - creates L2/L3/L4 header data buffer
1379 * @hdr_field - bitfield determining needed headers
1380 * @skb - socket buffer
1381 * @hdr_len - array of header lengths
1382 * @tot_len - total length of data
1383 *
1384 * Reads hdr_field to determine which headers are needed by firmware.
1385 * Builds a buffer containing these headers. Saves individual header
1386 * lengths and total buffer length to be used to build descriptors.
1387 */
1388static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1389 int *hdr_len, u8 *hdr_data)
1390{
1391 int len = 0;
1392 u8 *hdr;
1393
Thomas Falconda75e3b2018-03-12 11:51:02 -05001394 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1395 hdr_len[0] = sizeof(struct vlan_ethhdr);
1396 else
1397 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001398
1399 if (skb->protocol == htons(ETH_P_IP)) {
1400 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1401 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1402 hdr_len[2] = tcp_hdrlen(skb);
1403 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1404 hdr_len[2] = sizeof(struct udphdr);
1405 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1406 hdr_len[1] = sizeof(struct ipv6hdr);
1407 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1408 hdr_len[2] = tcp_hdrlen(skb);
1409 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1410 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001411 } else if (skb->protocol == htons(ETH_P_ARP)) {
1412 hdr_len[1] = arp_hdr_len(skb->dev);
1413 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001414 }
1415
1416 memset(hdr_data, 0, 120);
1417 if ((hdr_field >> 6) & 1) {
1418 hdr = skb_mac_header(skb);
1419 memcpy(hdr_data, hdr, hdr_len[0]);
1420 len += hdr_len[0];
1421 }
1422
1423 if ((hdr_field >> 5) & 1) {
1424 hdr = skb_network_header(skb);
1425 memcpy(hdr_data + len, hdr, hdr_len[1]);
1426 len += hdr_len[1];
1427 }
1428
1429 if ((hdr_field >> 4) & 1) {
1430 hdr = skb_transport_header(skb);
1431 memcpy(hdr_data + len, hdr, hdr_len[2]);
1432 len += hdr_len[2];
1433 }
1434 return len;
1435}
1436
1437/**
1438 * create_hdr_descs - create header and header extension descriptors
1439 * @hdr_field - bitfield determining needed headers
1440 * @data - buffer containing header data
1441 * @len - length of data buffer
1442 * @hdr_len - array of individual header lengths
1443 * @scrq_arr - descriptor array
1444 *
1445 * Creates header and, if needed, header extension descriptors and
1446 * places them in a descriptor array, scrq_arr
1447 */
1448
Thomas Falcon2de09682017-10-16 10:02:11 -05001449static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1450 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001451{
1452 union sub_crq hdr_desc;
1453 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001454 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001455 u8 *data, *cur;
1456 int tmp;
1457
1458 while (tmp_len > 0) {
1459 cur = hdr_data + len - tmp_len;
1460
1461 memset(&hdr_desc, 0, sizeof(hdr_desc));
1462 if (cur != hdr_data) {
1463 data = hdr_desc.hdr_ext.data;
1464 tmp = tmp_len > 29 ? 29 : tmp_len;
1465 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1466 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1467 hdr_desc.hdr_ext.len = tmp;
1468 } else {
1469 data = hdr_desc.hdr.data;
1470 tmp = tmp_len > 24 ? 24 : tmp_len;
1471 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1472 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1473 hdr_desc.hdr.len = tmp;
1474 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1475 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1476 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1477 hdr_desc.hdr.flag = hdr_field << 1;
1478 }
1479 memcpy(data, cur, tmp);
1480 tmp_len -= tmp;
1481 *scrq_arr = hdr_desc;
1482 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001483 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001484 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001485
1486 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001487}
1488
1489/**
1490 * build_hdr_descs_arr - build a header descriptor array
1491 * @skb - socket buffer
1492 * @num_entries - number of descriptors to be sent
1493 * @subcrq - first TX descriptor
1494 * @hdr_field - bit field determining which headers will be sent
1495 *
1496 * This function will build a TX descriptor array with applicable
1497 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1498 */
1499
Thomas Falconc62aa372020-11-18 19:12:20 -06001500static void build_hdr_descs_arr(struct sk_buff *skb,
1501 union sub_crq *indir_arr,
Thomas Falconad7775d2016-04-01 17:20:34 -05001502 int *num_entries, u8 hdr_field)
1503{
1504 int hdr_len[3] = {0, 0, 0};
Thomas Falconc62aa372020-11-18 19:12:20 -06001505 u8 hdr_data[140] = {0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001506 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001507
Thomas Falconc62aa372020-11-18 19:12:20 -06001508 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1509 hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001510 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconc62aa372020-11-18 19:12:20 -06001511 indir_arr + 1);
Thomas Falconad7775d2016-04-01 17:20:34 -05001512}
1513
Thomas Falcon1f247a62018-03-12 11:51:04 -05001514static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1515 struct net_device *netdev)
1516{
1517 /* For some backing devices, mishandling of small packets
1518 * can result in a loss of connection or TX stall. Device
1519 * architects recommend that no packet should be smaller
1520 * than the minimum MTU value provided to the driver, so
1521 * pad any packets to that length
1522 */
1523 if (skb->len < netdev->min_mtu)
1524 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001525
1526 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001527}
1528
Thomas Falcon0d973382020-11-18 19:12:19 -06001529static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1530 struct ibmvnic_sub_crq_queue *tx_scrq)
1531{
1532 struct ibmvnic_ind_xmit_queue *ind_bufp;
1533 struct ibmvnic_tx_buff *tx_buff;
1534 struct ibmvnic_tx_pool *tx_pool;
1535 union sub_crq tx_scrq_entry;
1536 int queue_num;
1537 int entries;
1538 int index;
1539 int i;
1540
1541 ind_bufp = &tx_scrq->ind_buf;
1542 entries = (u64)ind_bufp->index;
1543 queue_num = tx_scrq->pool_index;
1544
1545 for (i = entries - 1; i >= 0; --i) {
1546 tx_scrq_entry = ind_bufp->indir_arr[i];
1547 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1548 continue;
1549 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1550 if (index & IBMVNIC_TSO_POOL_MASK) {
1551 tx_pool = &adapter->tso_pool[queue_num];
1552 index &= ~IBMVNIC_TSO_POOL_MASK;
1553 } else {
1554 tx_pool = &adapter->tx_pool[queue_num];
1555 }
1556 tx_pool->free_map[tx_pool->consumer_index] = index;
1557 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1558 tx_pool->num_buffers - 1 :
1559 tx_pool->consumer_index - 1;
1560 tx_buff = &tx_pool->tx_buff[index];
1561 adapter->netdev->stats.tx_packets--;
1562 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1563 adapter->tx_stats_buffers[queue_num].packets--;
1564 adapter->tx_stats_buffers[queue_num].bytes -=
1565 tx_buff->skb->len;
1566 dev_kfree_skb_any(tx_buff->skb);
1567 tx_buff->skb = NULL;
1568 adapter->netdev->stats.tx_dropped++;
1569 }
1570 ind_bufp->index = 0;
1571 if (atomic_sub_return(entries, &tx_scrq->used) <=
1572 (adapter->req_tx_entries_per_subcrq / 2) &&
1573 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
1574 netif_wake_subqueue(adapter->netdev, queue_num);
1575 netdev_dbg(adapter->netdev, "Started queue %d\n",
1576 queue_num);
1577 }
1578}
1579
1580static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1581 struct ibmvnic_sub_crq_queue *tx_scrq)
1582{
1583 struct ibmvnic_ind_xmit_queue *ind_bufp;
1584 u64 dma_addr;
1585 u64 entries;
1586 u64 handle;
1587 int rc;
1588
1589 ind_bufp = &tx_scrq->ind_buf;
1590 dma_addr = (u64)ind_bufp->indir_dma;
1591 entries = (u64)ind_bufp->index;
1592 handle = tx_scrq->handle;
1593
1594 if (!entries)
1595 return 0;
1596 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1597 if (rc)
1598 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1599 else
1600 ind_bufp->index = 0;
1601 return 0;
1602}
1603
YueHaibing94b2bb22018-09-18 14:35:47 +08001604static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001605{
1606 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1607 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001608 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001609 struct device *dev = &adapter->vdev->dev;
Thomas Falcon0d973382020-11-18 19:12:19 -06001610 struct ibmvnic_ind_xmit_queue *ind_bufp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001611 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001612 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001613 struct ibmvnic_tx_pool *tx_pool;
1614 unsigned int tx_send_failed = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001615 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001616 unsigned int tx_map_failed = 0;
Thomas Falconc62aa372020-11-18 19:12:20 -06001617 union sub_crq indir_arr[16];
Thomas Falcon032c5e82015-12-21 11:26:06 -06001618 unsigned int tx_dropped = 0;
1619 unsigned int tx_packets = 0;
1620 unsigned int tx_bytes = 0;
1621 dma_addr_t data_dma_addr;
1622 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001623 unsigned long lpar_rc;
1624 union sub_crq tx_crq;
1625 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001626 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001627 unsigned char *dst;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001628 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001629 u8 proto = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06001630
1631 tx_scrq = adapter->tx_scrq[queue_num];
1632 txq = netdev_get_tx_queue(netdev, queue_num);
1633 ind_bufp = &tx_scrq->ind_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001634
Juliet Kim7ed5b312019-09-20 16:11:23 -04001635 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001636 if (!netif_subqueue_stopped(netdev, skb))
1637 netif_stop_subqueue(netdev, queue_num);
1638 dev_kfree_skb_any(skb);
1639
Thomas Falcon032c5e82015-12-21 11:26:06 -06001640 tx_send_failed++;
1641 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001642 ret = NETDEV_TX_OK;
Thomas Falcon0d973382020-11-18 19:12:19 -06001643 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001644 goto out;
1645 }
1646
Thomas Falcon7083a452018-03-12 21:05:26 -05001647 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001648 tx_dropped++;
1649 tx_send_failed++;
1650 ret = NETDEV_TX_OK;
Thomas Falcon0d973382020-11-18 19:12:19 -06001651 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon1f247a62018-03-12 11:51:04 -05001652 goto out;
1653 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001654 if (skb_is_gso(skb))
1655 tx_pool = &adapter->tso_pool[queue_num];
1656 else
1657 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001658
Thomas Falcon032c5e82015-12-21 11:26:06 -06001659 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001660
Thomas Falcon86b61a52018-03-16 20:00:29 -05001661 if (index == IBMVNIC_INVALID_MAP) {
1662 dev_kfree_skb_any(skb);
1663 tx_send_failed++;
1664 tx_dropped++;
1665 ret = NETDEV_TX_OK;
Thomas Falcon0d973382020-11-18 19:12:19 -06001666 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
Thomas Falcon86b61a52018-03-16 20:00:29 -05001667 goto out;
1668 }
1669
1670 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1671
Thomas Falcon06b3e352018-03-16 20:00:28 -05001672 offset = index * tx_pool->buf_size;
1673 dst = tx_pool->long_term_buff.buff + offset;
1674 memset(dst, 0, tx_pool->buf_size);
1675 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001676
Thomas Falcon15482052017-10-17 12:36:54 -05001677 if (skb_shinfo(skb)->nr_frags) {
1678 int cur, i;
1679
1680 /* Copy the head */
1681 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1682 cur = skb_headlen(skb);
1683
1684 /* Copy the frags */
1685 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1686 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1687
1688 memcpy(dst + cur,
1689 page_address(skb_frag_page(frag)) +
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07001690 skb_frag_off(frag), skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001691 cur += skb_frag_size(frag);
1692 }
1693 } else {
1694 skb_copy_from_linear_data(skb, dst, skb->len);
1695 }
1696
Thomas Falcon032c5e82015-12-21 11:26:06 -06001697 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001698 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001699
1700 tx_buff = &tx_pool->tx_buff[index];
1701 tx_buff->skb = skb;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001702 tx_buff->index = index;
1703 tx_buff->pool_index = queue_num;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001704
1705 memset(&tx_crq, 0, sizeof(tx_crq));
1706 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1707 tx_crq.v1.type = IBMVNIC_TX_DESC;
1708 tx_crq.v1.n_crq_elem = 1;
1709 tx_crq.v1.n_sge = 1;
1710 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001711
Thomas Falconfdb06102017-10-17 12:36:55 -05001712 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001713 tx_crq.v1.correlator =
1714 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001715 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001716 tx_crq.v1.correlator = cpu_to_be32(index);
1717 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001718 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1719 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1720
Michał Mirosławe84b4792018-11-07 17:50:52 +01001721 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001722 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1723 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1724 }
1725
1726 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001727 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1728 proto = ip_hdr(skb)->protocol;
1729 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1730 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1731 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001732 }
1733
Thomas Falcona0dca102018-01-18 19:29:48 -06001734 if (proto == IPPROTO_TCP)
1735 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1736 else if (proto == IPPROTO_UDP)
1737 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1738
Thomas Falconad7775d2016-04-01 17:20:34 -05001739 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001740 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001741 hdrs += 2;
1742 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001743 if (skb_is_gso(skb)) {
1744 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1745 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1746 hdrs += 2;
1747 }
Thomas Falcon0d973382020-11-18 19:12:19 -06001748
1749 if ((*hdrs >> 7) & 1)
Thomas Falconc62aa372020-11-18 19:12:20 -06001750 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
Thomas Falcon0d973382020-11-18 19:12:19 -06001751
1752 tx_crq.v1.n_crq_elem = num_entries;
1753 tx_buff->num_entries = num_entries;
1754 /* flush buffer if current entry can not fit */
1755 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1756 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1757 if (lpar_rc != H_SUCCESS)
1758 goto tx_flush_err;
Thomas Falconad7775d2016-04-01 17:20:34 -05001759 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001760
Thomas Falconc62aa372020-11-18 19:12:20 -06001761 indir_arr[0] = tx_crq;
1762 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
Thomas Falcon0d973382020-11-18 19:12:19 -06001763 num_entries * sizeof(struct ibmvnic_generic_scrq));
1764 ind_bufp->index += num_entries;
1765 if (__netdev_tx_sent_queue(txq, skb->len,
1766 netdev_xmit_more() &&
1767 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1768 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1769 if (lpar_rc != H_SUCCESS)
1770 goto tx_err;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001771 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001772
Thomas Falconffc385b2018-02-18 10:08:41 -06001773 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001774 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001775 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001776 netif_stop_subqueue(netdev, queue_num);
1777 }
1778
Thomas Falcon032c5e82015-12-21 11:26:06 -06001779 tx_packets++;
1780 tx_bytes += skb->len;
1781 txq->trans_start = jiffies;
1782 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001783 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001784
Thomas Falcon0d973382020-11-18 19:12:19 -06001785tx_flush_err:
1786 dev_kfree_skb_any(skb);
1787 tx_buff->skb = NULL;
1788 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1789 tx_pool->num_buffers - 1 :
1790 tx_pool->consumer_index - 1;
1791 tx_dropped++;
1792tx_err:
1793 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1794 dev_err_ratelimited(dev, "tx: send failed\n");
1795
1796 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1797 /* Disable TX and report carrier off if queue is closed
1798 * or pending failover.
1799 * Firmware guarantees that a signal will be sent to the
1800 * driver, triggering a reset or some other action.
1801 */
1802 netif_tx_stop_all_queues(netdev);
1803 netif_carrier_off(netdev);
1804 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001805out:
1806 netdev->stats.tx_dropped += tx_dropped;
1807 netdev->stats.tx_bytes += tx_bytes;
1808 netdev->stats.tx_packets += tx_packets;
1809 adapter->tx_send_failed += tx_send_failed;
1810 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001811 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1812 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1813 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001814
1815 return ret;
1816}
1817
1818static void ibmvnic_set_multi(struct net_device *netdev)
1819{
1820 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1821 struct netdev_hw_addr *ha;
1822 union ibmvnic_crq crq;
1823
1824 memset(&crq, 0, sizeof(crq));
1825 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1826 crq.request_capability.cmd = REQUEST_CAPABILITY;
1827
1828 if (netdev->flags & IFF_PROMISC) {
1829 if (!adapter->promisc_supported)
1830 return;
1831 } else {
1832 if (netdev->flags & IFF_ALLMULTI) {
1833 /* Accept all multicast */
1834 memset(&crq, 0, sizeof(crq));
1835 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1836 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1837 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1838 ibmvnic_send_crq(adapter, &crq);
1839 } else if (netdev_mc_empty(netdev)) {
1840 /* Reject all multicast */
1841 memset(&crq, 0, sizeof(crq));
1842 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1843 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1844 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1845 ibmvnic_send_crq(adapter, &crq);
1846 } else {
1847 /* Accept one or more multicast(s) */
1848 netdev_for_each_mc_addr(ha, netdev) {
1849 memset(&crq, 0, sizeof(crq));
1850 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1851 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1852 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1853 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1854 ha->addr);
1855 ibmvnic_send_crq(adapter, &crq);
1856 }
1857 }
1858 }
1859}
1860
Thomas Falcon62740e92019-05-09 23:13:43 -05001861static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001862{
1863 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001864 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001865 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001866
Thomas Falcon62740e92019-05-09 23:13:43 -05001867 if (!is_valid_ether_addr(dev_addr)) {
1868 rc = -EADDRNOTAVAIL;
1869 goto err;
1870 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001871
1872 memset(&crq, 0, sizeof(crq));
1873 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1874 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05001875 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06001876
Thomas Falconff25dcb2019-11-25 17:12:56 -06001877 mutex_lock(&adapter->fw_lock);
1878 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001879 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001880
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001881 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05001882 if (rc) {
1883 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001884 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001885 goto err;
1886 }
1887
Thomas Falcon476d96c2019-11-25 17:12:55 -06001888 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001889 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06001890 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05001891 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001892 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001893 goto err;
1894 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001895 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001896 return 0;
1897err:
1898 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1899 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001900}
1901
John Allenc26eba02017-10-26 16:23:25 -05001902static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1903{
1904 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1905 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001906 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001907
Thomas Falcon62740e92019-05-09 23:13:43 -05001908 rc = 0;
Lijun Pan8fc36722020-10-27 17:04:56 -05001909 if (!is_valid_ether_addr(addr->sa_data))
1910 return -EADDRNOTAVAIL;
1911
1912 if (adapter->state != VNIC_PROBED) {
1913 ether_addr_copy(adapter->mac_addr, addr->sa_data);
Thomas Falcon62740e92019-05-09 23:13:43 -05001914 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
Lijun Pan8fc36722020-10-27 17:04:56 -05001915 }
John Allenc26eba02017-10-26 16:23:25 -05001916
Thomas Falconf8136142018-01-29 13:45:05 -06001917 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001918}
1919
Nathan Fontenoted651a12017-05-03 14:04:38 -04001920/**
Juliet Kimb27507b2019-09-20 16:11:22 -04001921 * do_change_param_reset returns zero if we are able to keep processing reset
1922 * events, or non-zero if we hit a fatal error and must halt.
1923 */
1924static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1925 struct ibmvnic_rwi *rwi,
1926 u32 reset_state)
1927{
1928 struct net_device *netdev = adapter->netdev;
1929 int i, rc;
1930
1931 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1932 rwi->reset_reason);
1933
1934 netif_carrier_off(netdev);
1935 adapter->reset_reason = rwi->reset_reason;
1936
1937 ibmvnic_cleanup(netdev);
1938
1939 if (reset_state == VNIC_OPEN) {
1940 rc = __ibmvnic_close(netdev);
1941 if (rc)
1942 return rc;
1943 }
1944
1945 release_resources(adapter);
1946 release_sub_crqs(adapter, 1);
1947 release_crq_queue(adapter);
1948
1949 adapter->state = VNIC_PROBED;
1950
1951 rc = init_crq_queue(adapter);
1952
1953 if (rc) {
1954 netdev_err(adapter->netdev,
1955 "Couldn't initialize crq. rc=%d\n", rc);
1956 return rc;
1957 }
1958
Lijun Pan635e4422020-08-19 17:52:26 -05001959 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04001960 if (rc)
1961 return IBMVNIC_INIT_FAILED;
1962
1963 /* If the adapter was in PROBE state prior to the reset,
1964 * exit here.
1965 */
1966 if (reset_state == VNIC_PROBED)
1967 return 0;
1968
1969 rc = ibmvnic_login(netdev);
1970 if (rc) {
1971 adapter->state = reset_state;
1972 return rc;
1973 }
1974
1975 rc = init_resources(adapter);
1976 if (rc)
1977 return rc;
1978
1979 ibmvnic_disable_irqs(adapter);
1980
1981 adapter->state = VNIC_CLOSED;
1982
1983 if (reset_state == VNIC_CLOSED)
1984 return 0;
1985
1986 rc = __ibmvnic_open(netdev);
1987 if (rc)
1988 return IBMVNIC_OPEN_FAILED;
1989
1990 /* refresh device's multicast list */
1991 ibmvnic_set_multi(netdev);
1992
1993 /* kick napi */
1994 for (i = 0; i < adapter->req_rx_queues; i++)
1995 napi_schedule(&adapter->napi[i]);
1996
1997 return 0;
1998}
1999
2000/**
Nathan Fontenoted651a12017-05-03 14:04:38 -04002001 * do_reset returns zero if we are able to keep processing reset events, or
2002 * non-zero if we hit a fatal error and must halt.
2003 */
2004static int do_reset(struct ibmvnic_adapter *adapter,
2005 struct ibmvnic_rwi *rwi, u32 reset_state)
2006{
John Allen896d8692018-01-18 16:26:31 -06002007 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002008 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002009 struct net_device *netdev = adapter->netdev;
2010 int i, rc;
2011
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002012 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
2013 rwi->reset_reason);
2014
Juliet Kimb27507b2019-09-20 16:11:22 -04002015 rtnl_lock();
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002016 /*
2017 * Now that we have the rtnl lock, clear any pending failover.
2018 * This will ensure ibmvnic_open() has either completed or will
2019 * block until failover is complete.
2020 */
2021 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2022 adapter->failover_pending = false;
Juliet Kimb27507b2019-09-20 16:11:22 -04002023
Nathan Fontenoted651a12017-05-03 14:04:38 -04002024 netif_carrier_off(netdev);
2025 adapter->reset_reason = rwi->reset_reason;
2026
John Allen896d8692018-01-18 16:26:31 -06002027 old_num_rx_queues = adapter->req_rx_queues;
2028 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06002029 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2030 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06002031
Nathan Fontenot30f79622018-04-06 18:37:06 -05002032 ibmvnic_cleanup(netdev);
2033
Thomas Falcon1f946082019-06-07 16:03:53 -05002034 if (reset_state == VNIC_OPEN &&
2035 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05002036 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002037 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002038
Juliet Kimb27507b2019-09-20 16:11:22 -04002039 /* Release the RTNL lock before link state change and
2040 * re-acquire after the link state change to allow
2041 * linkwatch_event to grab the RTNL lock and run during
2042 * a reset.
2043 */
2044 rtnl_unlock();
2045 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2046 rtnl_lock();
2047 if (rc)
2048 goto out;
2049
2050 if (adapter->state != VNIC_CLOSING) {
2051 rc = -1;
2052 goto out;
2053 }
2054
2055 adapter->state = VNIC_CLOSED;
John Allenc26eba02017-10-26 16:23:25 -05002056 }
2057
John Allen8cb31cf2017-05-26 10:30:37 -04002058 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2059 /* remove the closed state so when we call open it appears
2060 * we are coming from the probed state.
2061 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002062 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04002063
Juliet Kimb27507b2019-09-20 16:11:22 -04002064 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002065 rc = ibmvnic_reenable_crq_queue(adapter);
2066 release_sub_crqs(adapter, 1);
2067 } else {
2068 rc = ibmvnic_reset_crq(adapter);
Dany Madden8b40eb732020-06-18 15:24:13 -04002069 if (rc == H_CLOSED || rc == H_SUCCESS) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05002070 rc = vio_enable_interrupts(adapter->vdev);
Dany Madden8b40eb732020-06-18 15:24:13 -04002071 if (rc)
2072 netdev_err(adapter->netdev,
2073 "Reset failed to enable interrupts. rc=%d\n",
2074 rc);
2075 }
Nathan Fontenot30f79622018-04-06 18:37:06 -05002076 }
2077
2078 if (rc) {
2079 netdev_err(adapter->netdev,
Dany Madden8b40eb732020-06-18 15:24:13 -04002080 "Reset couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002081 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05002082 }
2083
Lijun Pan635e4422020-08-19 17:52:26 -05002084 rc = ibmvnic_reset_init(adapter, true);
Juliet Kimb27507b2019-09-20 16:11:22 -04002085 if (rc) {
2086 rc = IBMVNIC_INIT_FAILED;
2087 goto out;
2088 }
John Allen8cb31cf2017-05-26 10:30:37 -04002089
2090 /* If the adapter was in PROBE state prior to the reset,
2091 * exit here.
2092 */
Juliet Kimb27507b2019-09-20 16:11:22 -04002093 if (reset_state == VNIC_PROBED) {
2094 rc = 0;
2095 goto out;
2096 }
John Allen8cb31cf2017-05-26 10:30:37 -04002097
2098 rc = ibmvnic_login(netdev);
2099 if (rc) {
John Allen3578a7e2018-07-16 10:29:30 -05002100 adapter->state = reset_state;
Juliet Kimb27507b2019-09-20 16:11:22 -04002101 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04002102 }
2103
Juliet Kimb27507b2019-09-20 16:11:22 -04002104 if (adapter->req_rx_queues != old_num_rx_queues ||
2105 adapter->req_tx_queues != old_num_tx_queues ||
2106 adapter->req_rx_add_entries_per_subcrq !=
2107 old_num_rx_slots ||
2108 adapter->req_tx_entries_per_subcrq !=
Mingming Cao9f134572020-08-25 13:26:41 -04002109 old_num_tx_slots ||
2110 !adapter->rx_pool ||
2111 !adapter->tso_pool ||
2112 !adapter->tx_pool) {
John Allen896d8692018-01-18 16:26:31 -06002113 release_rx_pools(adapter);
2114 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06002115 release_napi(adapter);
2116 release_vpd_data(adapter);
2117
2118 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002119 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002120 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002121
John Allenc26eba02017-10-26 16:23:25 -05002122 } else {
2123 rc = reset_tx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002124 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002125 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2126 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002127 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002128 }
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002129
John Allenc26eba02017-10-26 16:23:25 -05002130 rc = reset_rx_pools(adapter);
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002131 if (rc) {
Mingming Cao9f134572020-08-25 13:26:41 -04002132 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2133 rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04002134 goto out;
Jakub Kicinski8ae4dff2020-09-04 21:07:49 -07002135 }
John Allenc26eba02017-10-26 16:23:25 -05002136 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002137 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002138 }
John Allene676d812018-03-14 10:41:29 -05002139 adapter->state = VNIC_CLOSED;
2140
Juliet Kimb27507b2019-09-20 16:11:22 -04002141 if (reset_state == VNIC_CLOSED) {
2142 rc = 0;
2143 goto out;
2144 }
John Allene676d812018-03-14 10:41:29 -05002145
Nathan Fontenoted651a12017-05-03 14:04:38 -04002146 rc = __ibmvnic_open(netdev);
2147 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002148 rc = IBMVNIC_OPEN_FAILED;
2149 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002150 }
2151
Thomas Falconbe32a242019-06-07 16:03:54 -05002152 /* refresh device's multicast list */
2153 ibmvnic_set_multi(netdev);
2154
Nathan Fontenoted651a12017-05-03 14:04:38 -04002155 /* kick napi */
2156 for (i = 0; i < adapter->req_rx_queues; i++)
2157 napi_schedule(&adapter->napi[i]);
2158
Juliet Kimb27507b2019-09-20 16:11:22 -04002159 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
Thomas Falcon986103e2018-11-30 10:59:08 -06002160 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002161
Juliet Kimb27507b2019-09-20 16:11:22 -04002162 rc = 0;
2163
2164out:
2165 rtnl_unlock();
2166
2167 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002168}
2169
Thomas Falcon2770a792018-05-23 13:38:02 -05002170static int do_hard_reset(struct ibmvnic_adapter *adapter,
2171 struct ibmvnic_rwi *rwi, u32 reset_state)
2172{
2173 struct net_device *netdev = adapter->netdev;
2174 int rc;
2175
2176 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2177 rwi->reset_reason);
2178
2179 netif_carrier_off(netdev);
2180 adapter->reset_reason = rwi->reset_reason;
2181
2182 ibmvnic_cleanup(netdev);
2183 release_resources(adapter);
2184 release_sub_crqs(adapter, 0);
2185 release_crq_queue(adapter);
2186
2187 /* remove the closed state so when we call open it appears
2188 * we are coming from the probed state.
2189 */
2190 adapter->state = VNIC_PROBED;
2191
Thomas Falconbbd669a2019-04-04 18:58:26 -05002192 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002193 rc = init_crq_queue(adapter);
2194 if (rc) {
2195 netdev_err(adapter->netdev,
2196 "Couldn't initialize crq. rc=%d\n", rc);
2197 return rc;
2198 }
2199
Lijun Pan635e4422020-08-19 17:52:26 -05002200 rc = ibmvnic_reset_init(adapter, false);
Thomas Falcon2770a792018-05-23 13:38:02 -05002201 if (rc)
2202 return rc;
2203
2204 /* If the adapter was in PROBE state prior to the reset,
2205 * exit here.
2206 */
2207 if (reset_state == VNIC_PROBED)
2208 return 0;
2209
2210 rc = ibmvnic_login(netdev);
2211 if (rc) {
2212 adapter->state = VNIC_PROBED;
2213 return 0;
2214 }
Juliet Kima5681e22018-11-19 15:59:22 -06002215
2216 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002217 if (rc)
2218 return rc;
2219
2220 ibmvnic_disable_irqs(adapter);
2221 adapter->state = VNIC_CLOSED;
2222
2223 if (reset_state == VNIC_CLOSED)
2224 return 0;
2225
2226 rc = __ibmvnic_open(netdev);
Juliet Kimb27507b2019-09-20 16:11:22 -04002227 if (rc)
2228 return IBMVNIC_OPEN_FAILED;
Thomas Falcon2770a792018-05-23 13:38:02 -05002229
Thomas Falcon2770a792018-05-23 13:38:02 -05002230 return 0;
2231}
2232
Nathan Fontenoted651a12017-05-03 14:04:38 -04002233static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2234{
2235 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002236 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002237
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002238 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002239
2240 if (!list_empty(&adapter->rwi_list)) {
2241 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2242 list);
2243 list_del(&rwi->list);
2244 } else {
2245 rwi = NULL;
2246 }
2247
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002248 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002249 return rwi;
2250}
2251
2252static void free_all_rwi(struct ibmvnic_adapter *adapter)
2253{
2254 struct ibmvnic_rwi *rwi;
2255
2256 rwi = get_next_rwi(adapter);
2257 while (rwi) {
2258 kfree(rwi);
2259 rwi = get_next_rwi(adapter);
2260 }
2261}
2262
2263static void __ibmvnic_reset(struct work_struct *work)
2264{
2265 struct ibmvnic_rwi *rwi;
2266 struct ibmvnic_adapter *adapter;
Juliet Kim7d7195a2020-03-10 09:23:58 -05002267 bool saved_state = false;
2268 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002269 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002270 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002271
2272 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002273
Juliet Kim7ed5b312019-09-20 16:11:23 -04002274 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2275 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2276 IBMVNIC_RESET_DELAY);
2277 return;
2278 }
2279
Nathan Fontenoted651a12017-05-03 14:04:38 -04002280 rwi = get_next_rwi(adapter);
2281 while (rwi) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002282 spin_lock_irqsave(&adapter->state_lock, flags);
2283
Thomas Falcon36f10312019-08-27 11:10:04 -05002284 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002285 adapter->state == VNIC_REMOVED) {
Juliet Kim7d7195a2020-03-10 09:23:58 -05002286 spin_unlock_irqrestore(&adapter->state_lock, flags);
Juliet Kim1c2977c2019-09-05 17:30:01 -04002287 kfree(rwi);
2288 rc = EBUSY;
2289 break;
2290 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002291
Juliet Kim7d7195a2020-03-10 09:23:58 -05002292 if (!saved_state) {
2293 reset_state = adapter->state;
2294 adapter->state = VNIC_RESETTING;
2295 saved_state = true;
2296 }
2297 spin_unlock_irqrestore(&adapter->state_lock, flags);
2298
Juliet Kimb27507b2019-09-20 16:11:22 -04002299 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2300 /* CHANGE_PARAM requestor holds rtnl_lock */
2301 rc = do_change_param_reset(adapter, rwi, reset_state);
2302 } else if (adapter->force_reset_recovery) {
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002303 /*
2304 * Since we are doing a hard reset now, clear the
2305 * failover_pending flag so we don't ignore any
2306 * future MOBILITY or other resets.
2307 */
2308 adapter->failover_pending = false;
2309
Juliet Kimb27507b2019-09-20 16:11:22 -04002310 /* Transport event occurred during previous reset */
2311 if (adapter->wait_for_reset) {
2312 /* Previous was CHANGE_PARAM; caller locked */
2313 adapter->force_reset_recovery = false;
2314 rc = do_hard_reset(adapter, rwi, reset_state);
2315 } else {
2316 rtnl_lock();
2317 adapter->force_reset_recovery = false;
2318 rc = do_hard_reset(adapter, rwi, reset_state);
2319 rtnl_unlock();
2320 }
Juliet Kimf9c6cea2020-04-30 13:22:11 -05002321 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2322 adapter->from_passive_init)) {
Thomas Falcon2770a792018-05-23 13:38:02 -05002323 rc = do_reset(adapter, rwi, reset_state);
2324 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002325 kfree(rwi);
Juliet Kimb27507b2019-09-20 16:11:22 -04002326 if (rc == IBMVNIC_OPEN_FAILED) {
2327 if (list_empty(&adapter->rwi_list))
2328 adapter->state = VNIC_CLOSED;
2329 else
2330 adapter->state = reset_state;
2331 rc = 0;
2332 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
Thomas Falcon2770a792018-05-23 13:38:02 -05002333 !adapter->force_reset_recovery)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002334 break;
2335
2336 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002337
2338 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2339 rwi->reset_reason == VNIC_RESET_MOBILITY))
2340 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002341 }
2342
John Allenc26eba02017-10-26 16:23:25 -05002343 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002344 adapter->reset_done_rc = rc;
2345 complete(&adapter->reset_done);
2346 }
2347
Nathan Fontenoted651a12017-05-03 14:04:38 -04002348 if (rc) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002349 netdev_dbg(adapter->netdev, "Reset failed\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04002350 free_all_rwi(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002351 }
Juliet Kim1c2977c2019-09-05 17:30:01 -04002352
Juliet Kim7ed5b312019-09-20 16:11:23 -04002353 clear_bit_unlock(0, &adapter->resetting);
2354}
2355
2356static void __ibmvnic_delayed_reset(struct work_struct *work)
2357{
2358 struct ibmvnic_adapter *adapter;
2359
2360 adapter = container_of(work, struct ibmvnic_adapter,
2361 ibmvnic_delayed_reset.work);
2362 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002363}
2364
Thomas Falconaf894d22018-04-06 18:37:04 -05002365static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2366 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002367{
Thomas Falcon2770a792018-05-23 13:38:02 -05002368 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002369 struct ibmvnic_rwi *rwi, *tmp;
2370 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002371 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002372 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002373
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002374 /*
2375 * If failover is pending don't schedule any other reset.
2376 * Instead let the failover complete. If there is already a
2377 * a failover reset scheduled, we will detect and drop the
2378 * duplicate reset when walking the ->rwi_list below.
2379 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04002380 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002381 adapter->state == VNIC_REMOVED ||
Sukadev Bhattiprolu1d850492020-10-30 10:07:11 -07002382 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002383 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002384 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002385 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002386 }
2387
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002388 if (adapter->state == VNIC_PROBING) {
2389 netdev_warn(netdev, "Adapter reset during probe\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002390 ret = adapter->init_done_rc = EAGAIN;
2391 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002392 }
2393
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002394 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002395
2396 list_for_each(entry, &adapter->rwi_list) {
2397 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2398 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002399 netdev_dbg(netdev, "Skipping matching reset\n");
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002400 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falconaf894d22018-04-06 18:37:04 -05002401 ret = EBUSY;
2402 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002403 }
2404 }
2405
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002406 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002407 if (!rwi) {
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002408 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002409 ibmvnic_close(netdev);
Thomas Falconaf894d22018-04-06 18:37:04 -05002410 ret = ENOMEM;
2411 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002412 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002413 /* if we just received a transport event,
2414 * flush reset queue and process this reset
2415 */
2416 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2417 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2418 list_del(entry);
2419 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002420 rwi->reset_reason = reason;
2421 list_add_tail(&rwi->list, &adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002422 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002423 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002424 schedule_work(&adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002425
2426 return 0;
2427err:
Thomas Falconaf894d22018-04-06 18:37:04 -05002428 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002429}
2430
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002431static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002432{
2433 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002434
Nathan Fontenoted651a12017-05-03 14:04:38 -04002435 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002436}
2437
2438static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2439 struct ibmvnic_rx_buff *rx_buff)
2440{
2441 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2442
2443 rx_buff->skb = NULL;
2444
2445 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2446 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2447
2448 atomic_dec(&pool->available);
2449}
2450
2451static int ibmvnic_poll(struct napi_struct *napi, int budget)
2452{
2453 struct net_device *netdev = napi->dev;
2454 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2455 int scrq_num = (int)(napi - adapter->napi);
2456 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04002457
Thomas Falcon032c5e82015-12-21 11:26:06 -06002458restart_poll:
2459 while (frames_processed < budget) {
2460 struct sk_buff *skb;
2461 struct ibmvnic_rx_buff *rx_buff;
2462 union sub_crq *next;
2463 u32 length;
2464 u16 offset;
2465 u8 flags = 0;
2466
Juliet Kim7ed5b312019-09-20 16:11:23 -04002467 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002468 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05002469 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2470 napi_complete_done(napi, frames_processed);
2471 return frames_processed;
2472 }
2473
Thomas Falcon032c5e82015-12-21 11:26:06 -06002474 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2475 break;
2476 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2477 rx_buff =
2478 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2479 rx_comp.correlator);
2480 /* do error checking */
2481 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002482 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2483 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002484 /* free the entry */
2485 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002486 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002487 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002488 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002489 } else if (!rx_buff->skb) {
2490 /* free the entry */
2491 next->rx_comp.first = 0;
2492 remove_buff_from_pool(adapter, rx_buff);
2493 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002494 }
2495
2496 length = be32_to_cpu(next->rx_comp.len);
2497 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2498 flags = next->rx_comp.flags;
2499 skb = rx_buff->skb;
2500 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2501 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002502
2503 /* VLAN Header has been stripped by the system firmware and
2504 * needs to be inserted by the driver
2505 */
2506 if (adapter->rx_vlan_header_insertion &&
2507 (flags & IBMVNIC_VLAN_STRIPPED))
2508 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2509 ntohs(next->rx_comp.vlan_tci));
2510
Thomas Falcon032c5e82015-12-21 11:26:06 -06002511 /* free the entry */
2512 next->rx_comp.first = 0;
2513 remove_buff_from_pool(adapter, rx_buff);
2514
2515 skb_put(skb, length);
2516 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002517 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002518
2519 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2520 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2521 skb->ip_summed = CHECKSUM_UNNECESSARY;
2522 }
2523
2524 length = skb->len;
2525 napi_gro_receive(napi, skb); /* send it up */
2526 netdev->stats.rx_packets++;
2527 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002528 adapter->rx_stats_buffers[scrq_num].packets++;
2529 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002530 frames_processed++;
2531 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002532
2533 if (adapter->state != VNIC_CLOSING)
2534 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002535
2536 if (frames_processed < budget) {
2537 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002538 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002539 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2540 napi_reschedule(napi)) {
2541 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2542 goto restart_poll;
2543 }
2544 }
2545 return frames_processed;
2546}
2547
John Allenc26eba02017-10-26 16:23:25 -05002548static int wait_for_reset(struct ibmvnic_adapter *adapter)
2549{
Thomas Falconaf894d22018-04-06 18:37:04 -05002550 int rc, ret;
2551
John Allenc26eba02017-10-26 16:23:25 -05002552 adapter->fallback.mtu = adapter->req_mtu;
2553 adapter->fallback.rx_queues = adapter->req_rx_queues;
2554 adapter->fallback.tx_queues = adapter->req_tx_queues;
2555 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2556 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2557
Thomas Falcon070eca92019-11-25 17:12:53 -06002558 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002559 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002560 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002561
2562 if (rc) {
2563 ret = rc;
2564 goto out;
2565 }
2566 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2567 if (rc) {
2568 ret = -ENODEV;
2569 goto out;
2570 }
John Allenc26eba02017-10-26 16:23:25 -05002571
Thomas Falconaf894d22018-04-06 18:37:04 -05002572 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002573 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002574 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002575 adapter->desired.mtu = adapter->fallback.mtu;
2576 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2577 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2578 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2579 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2580
Thomas Falcon070eca92019-11-25 17:12:53 -06002581 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002582 adapter->wait_for_reset = true;
2583 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002584 if (rc) {
2585 ret = rc;
2586 goto out;
2587 }
2588 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2589 60000);
2590 if (rc) {
2591 ret = -ENODEV;
2592 goto out;
2593 }
John Allenc26eba02017-10-26 16:23:25 -05002594 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002595out:
John Allenc26eba02017-10-26 16:23:25 -05002596 adapter->wait_for_reset = false;
2597
Thomas Falconaf894d22018-04-06 18:37:04 -05002598 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002599}
2600
John Allen3a807b72017-06-06 16:55:52 -05002601static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2602{
John Allenc26eba02017-10-26 16:23:25 -05002603 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2604
2605 adapter->desired.mtu = new_mtu + ETH_HLEN;
2606
2607 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002608}
2609
Thomas Falconf10b09e2018-03-12 11:51:05 -05002610static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2611 struct net_device *dev,
2612 netdev_features_t features)
2613{
2614 /* Some backing hardware adapters can not
2615 * handle packets with a MSS less than 224
2616 * or with only one segment.
2617 */
2618 if (skb_is_gso(skb)) {
2619 if (skb_shinfo(skb)->gso_size < 224 ||
2620 skb_shinfo(skb)->gso_segs == 1)
2621 features &= ~NETIF_F_GSO_MASK;
2622 }
2623
2624 return features;
2625}
2626
Thomas Falcon032c5e82015-12-21 11:26:06 -06002627static const struct net_device_ops ibmvnic_netdev_ops = {
2628 .ndo_open = ibmvnic_open,
2629 .ndo_stop = ibmvnic_close,
2630 .ndo_start_xmit = ibmvnic_xmit,
2631 .ndo_set_rx_mode = ibmvnic_set_multi,
2632 .ndo_set_mac_address = ibmvnic_set_mac,
2633 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002634 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002635 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002636 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002637};
2638
2639/* ethtool functions */
2640
Philippe Reynes8a433792017-01-07 22:37:29 +01002641static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2642 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002643{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2645 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002646
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002647 rc = send_query_phys_parms(adapter);
2648 if (rc) {
2649 adapter->speed = SPEED_UNKNOWN;
2650 adapter->duplex = DUPLEX_UNKNOWN;
2651 }
2652 cmd->base.speed = adapter->speed;
2653 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002654 cmd->base.port = PORT_FIBRE;
2655 cmd->base.phy_address = 0;
2656 cmd->base.autoneg = AUTONEG_ENABLE;
2657
Thomas Falcon032c5e82015-12-21 11:26:06 -06002658 return 0;
2659}
2660
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002661static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002662 struct ethtool_drvinfo *info)
2663{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002664 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2665
Thomas Falcon032c5e82015-12-21 11:26:06 -06002666 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2667 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002668 strlcpy(info->fw_version, adapter->fw_version,
2669 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002670}
2671
2672static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2673{
2674 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2675
2676 return adapter->msg_enable;
2677}
2678
2679static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2680{
2681 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2682
2683 adapter->msg_enable = data;
2684}
2685
2686static u32 ibmvnic_get_link(struct net_device *netdev)
2687{
2688 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2689
2690 /* Don't need to send a query because we request a logical link up at
2691 * init and then we wait for link state indications
2692 */
2693 return adapter->logical_link_state;
2694}
2695
2696static void ibmvnic_get_ringparam(struct net_device *netdev,
2697 struct ethtool_ringparam *ring)
2698{
John Allenbc131b32017-08-02 16:46:30 -05002699 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2700
Thomas Falcon723ad912018-09-28 18:38:26 -05002701 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2702 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2703 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2704 } else {
2705 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2706 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2707 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002708 ring->rx_mini_max_pending = 0;
2709 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002710 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2711 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002712 ring->rx_mini_pending = 0;
2713 ring->rx_jumbo_pending = 0;
2714}
2715
John Allenc26eba02017-10-26 16:23:25 -05002716static int ibmvnic_set_ringparam(struct net_device *netdev,
2717 struct ethtool_ringparam *ring)
2718{
2719 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002720 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002721
Thomas Falcon723ad912018-09-28 18:38:26 -05002722 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002723 adapter->desired.rx_entries = ring->rx_pending;
2724 adapter->desired.tx_entries = ring->tx_pending;
2725
Thomas Falcon723ad912018-09-28 18:38:26 -05002726 ret = wait_for_reset(adapter);
2727
2728 if (!ret &&
2729 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2730 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2731 netdev_info(netdev,
2732 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2733 ring->rx_pending, ring->tx_pending,
2734 adapter->req_rx_add_entries_per_subcrq,
2735 adapter->req_tx_entries_per_subcrq);
2736 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002737}
2738
John Allenc2dbeb62017-08-02 16:47:17 -05002739static void ibmvnic_get_channels(struct net_device *netdev,
2740 struct ethtool_channels *channels)
2741{
2742 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2743
Thomas Falcon723ad912018-09-28 18:38:26 -05002744 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2745 channels->max_rx = adapter->max_rx_queues;
2746 channels->max_tx = adapter->max_tx_queues;
2747 } else {
2748 channels->max_rx = IBMVNIC_MAX_QUEUES;
2749 channels->max_tx = IBMVNIC_MAX_QUEUES;
2750 }
2751
John Allenc2dbeb62017-08-02 16:47:17 -05002752 channels->max_other = 0;
2753 channels->max_combined = 0;
2754 channels->rx_count = adapter->req_rx_queues;
2755 channels->tx_count = adapter->req_tx_queues;
2756 channels->other_count = 0;
2757 channels->combined_count = 0;
2758}
2759
John Allenc26eba02017-10-26 16:23:25 -05002760static int ibmvnic_set_channels(struct net_device *netdev,
2761 struct ethtool_channels *channels)
2762{
2763 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002764 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002765
Thomas Falcon723ad912018-09-28 18:38:26 -05002766 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002767 adapter->desired.rx_queues = channels->rx_count;
2768 adapter->desired.tx_queues = channels->tx_count;
2769
Thomas Falcon723ad912018-09-28 18:38:26 -05002770 ret = wait_for_reset(adapter);
2771
2772 if (!ret &&
2773 (adapter->req_rx_queues != channels->rx_count ||
2774 adapter->req_tx_queues != channels->tx_count))
2775 netdev_info(netdev,
2776 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2777 channels->rx_count, channels->tx_count,
2778 adapter->req_rx_queues, adapter->req_tx_queues);
2779 return ret;
2780
John Allenc26eba02017-10-26 16:23:25 -05002781}
2782
Thomas Falcon032c5e82015-12-21 11:26:06 -06002783static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2784{
John Allen3d52b592017-08-02 16:44:14 -05002785 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002786 int i;
2787
Thomas Falcon723ad912018-09-28 18:38:26 -05002788 switch (stringset) {
2789 case ETH_SS_STATS:
2790 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2791 i++, data += ETH_GSTRING_LEN)
2792 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2793
2794 for (i = 0; i < adapter->req_tx_queues; i++) {
2795 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2796 data += ETH_GSTRING_LEN;
2797
2798 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2799 data += ETH_GSTRING_LEN;
2800
2801 snprintf(data, ETH_GSTRING_LEN,
2802 "tx%d_dropped_packets", i);
2803 data += ETH_GSTRING_LEN;
2804 }
2805
2806 for (i = 0; i < adapter->req_rx_queues; i++) {
2807 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2808 data += ETH_GSTRING_LEN;
2809
2810 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2811 data += ETH_GSTRING_LEN;
2812
2813 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2814 data += ETH_GSTRING_LEN;
2815 }
2816 break;
2817
2818 case ETH_SS_PRIV_FLAGS:
2819 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2820 strcpy(data + i * ETH_GSTRING_LEN,
2821 ibmvnic_priv_flags[i]);
2822 break;
2823 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06002824 return;
John Allen3d52b592017-08-02 16:44:14 -05002825 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002826}
2827
2828static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2829{
John Allen3d52b592017-08-02 16:44:14 -05002830 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2831
Thomas Falcon032c5e82015-12-21 11:26:06 -06002832 switch (sset) {
2833 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002834 return ARRAY_SIZE(ibmvnic_stats) +
2835 adapter->req_tx_queues * NUM_TX_STATS +
2836 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05002837 case ETH_SS_PRIV_FLAGS:
2838 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002839 default:
2840 return -EOPNOTSUPP;
2841 }
2842}
2843
2844static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2845 struct ethtool_stats *stats, u64 *data)
2846{
2847 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2848 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002849 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002850 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002851
2852 memset(&crq, 0, sizeof(crq));
2853 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2854 crq.request_statistics.cmd = REQUEST_STATISTICS;
2855 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2856 crq.request_statistics.len =
2857 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002858
2859 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06002860 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002861 rc = ibmvnic_send_crq(adapter, &crq);
2862 if (rc)
2863 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06002864 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2865 if (rc)
2866 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002867
2868 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002869 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2870 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002871
2872 for (j = 0; j < adapter->req_tx_queues; j++) {
2873 data[i] = adapter->tx_stats_buffers[j].packets;
2874 i++;
2875 data[i] = adapter->tx_stats_buffers[j].bytes;
2876 i++;
2877 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2878 i++;
2879 }
2880
2881 for (j = 0; j < adapter->req_rx_queues; j++) {
2882 data[i] = adapter->rx_stats_buffers[j].packets;
2883 i++;
2884 data[i] = adapter->rx_stats_buffers[j].bytes;
2885 i++;
2886 data[i] = adapter->rx_stats_buffers[j].interrupts;
2887 i++;
2888 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002889}
2890
Thomas Falcon723ad912018-09-28 18:38:26 -05002891static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2892{
2893 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2894
2895 return adapter->priv_flags;
2896}
2897
2898static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2899{
2900 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2901 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2902
2903 if (which_maxes)
2904 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2905 else
2906 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2907
2908 return 0;
2909}
Thomas Falcon032c5e82015-12-21 11:26:06 -06002910static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002911 .get_drvinfo = ibmvnic_get_drvinfo,
2912 .get_msglevel = ibmvnic_get_msglevel,
2913 .set_msglevel = ibmvnic_set_msglevel,
2914 .get_link = ibmvnic_get_link,
2915 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002916 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002917 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002918 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002919 .get_strings = ibmvnic_get_strings,
2920 .get_sset_count = ibmvnic_get_sset_count,
2921 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002922 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05002923 .get_priv_flags = ibmvnic_get_priv_flags,
2924 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002925};
2926
2927/* Routines for managing CRQs/sCRQs */
2928
Nathan Fontenot57a49432017-05-26 10:31:12 -04002929static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2930 struct ibmvnic_sub_crq_queue *scrq)
2931{
2932 int rc;
2933
2934 if (scrq->irq) {
2935 free_irq(scrq->irq, scrq);
2936 irq_dispose_mapping(scrq->irq);
2937 scrq->irq = 0;
2938 }
2939
Thomas Falconc8b2ad02017-06-14 23:50:07 -05002940 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
Thomas Falcon41f71462018-04-06 18:37:03 -05002941 atomic_set(&scrq->used, 0);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002942 scrq->cur = 0;
Thomas Falconf019fb62020-11-18 19:12:17 -06002943 scrq->ind_buf.index = 0;
Nathan Fontenot57a49432017-05-26 10:31:12 -04002944
2945 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2946 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2947 return rc;
2948}
2949
2950static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2951{
2952 int i, rc;
2953
2954 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002955 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002956 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2957 if (rc)
2958 return rc;
2959 }
2960
2961 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002962 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002963 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2964 if (rc)
2965 return rc;
2966 }
2967
Nathan Fontenot57a49432017-05-26 10:31:12 -04002968 return rc;
2969}
2970
Thomas Falcon032c5e82015-12-21 11:26:06 -06002971static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002972 struct ibmvnic_sub_crq_queue *scrq,
2973 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002974{
2975 struct device *dev = &adapter->vdev->dev;
2976 long rc;
2977
2978 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2979
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002980 if (do_h_free) {
2981 /* Close the sub-crqs */
2982 do {
2983 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2984 adapter->vdev->unit_address,
2985 scrq->crq_num);
2986 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002987
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002988 if (rc) {
2989 netdev_err(adapter->netdev,
2990 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2991 scrq->crq_num, rc);
2992 }
Thomas Falconffa73852017-04-19 13:44:29 -04002993 }
2994
Thomas Falconf019fb62020-11-18 19:12:17 -06002995 dma_free_coherent(dev,
2996 IBMVNIC_IND_ARR_SZ,
2997 scrq->ind_buf.indir_arr,
2998 scrq->ind_buf.indir_dma);
2999
Thomas Falcon032c5e82015-12-21 11:26:06 -06003000 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3001 DMA_BIDIRECTIONAL);
3002 free_pages((unsigned long)scrq->msgs, 2);
3003 kfree(scrq);
3004}
3005
3006static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3007 *adapter)
3008{
3009 struct device *dev = &adapter->vdev->dev;
3010 struct ibmvnic_sub_crq_queue *scrq;
3011 int rc;
3012
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003013 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003014 if (!scrq)
3015 return NULL;
3016
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04003017 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003018 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003019 if (!scrq->msgs) {
3020 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3021 goto zero_page_failed;
3022 }
3023
3024 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3025 DMA_BIDIRECTIONAL);
3026 if (dma_mapping_error(dev, scrq->msg_token)) {
3027 dev_warn(dev, "Couldn't map crq queue messages page\n");
3028 goto map_failed;
3029 }
3030
3031 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3032 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3033
3034 if (rc == H_RESOURCE)
3035 rc = ibmvnic_reset_crq(adapter);
3036
3037 if (rc == H_CLOSED) {
3038 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3039 } else if (rc) {
3040 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3041 goto reg_failed;
3042 }
3043
Thomas Falcon032c5e82015-12-21 11:26:06 -06003044 scrq->adapter = adapter;
3045 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falconf019fb62020-11-18 19:12:17 -06003046 scrq->ind_buf.index = 0;
3047
3048 scrq->ind_buf.indir_arr =
3049 dma_alloc_coherent(dev,
3050 IBMVNIC_IND_ARR_SZ,
3051 &scrq->ind_buf.indir_dma,
3052 GFP_KERNEL);
3053
3054 if (!scrq->ind_buf.indir_arr)
3055 goto indir_failed;
3056
Thomas Falcon032c5e82015-12-21 11:26:06 -06003057 spin_lock_init(&scrq->lock);
3058
3059 netdev_dbg(adapter->netdev,
3060 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3061 scrq->crq_num, scrq->hw_irq, scrq->irq);
3062
3063 return scrq;
3064
Thomas Falconf019fb62020-11-18 19:12:17 -06003065indir_failed:
3066 do {
3067 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3068 adapter->vdev->unit_address,
3069 scrq->crq_num);
3070 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003071reg_failed:
3072 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3073 DMA_BIDIRECTIONAL);
3074map_failed:
3075 free_pages((unsigned long)scrq->msgs, 2);
3076zero_page_failed:
3077 kfree(scrq);
3078
3079 return NULL;
3080}
3081
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003082static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003083{
3084 int i;
3085
3086 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003087 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003088 if (!adapter->tx_scrq[i])
3089 continue;
3090
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003091 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3092 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003093 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003094 free_irq(adapter->tx_scrq[i]->irq,
3095 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003096 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003097 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003098 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003099
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003100 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3101 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003102 }
3103
Nathan Fontenot9501df32017-03-15 23:38:07 -04003104 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003105 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003106 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003107 }
3108
3109 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003110 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04003111 if (!adapter->rx_scrq[i])
3112 continue;
3113
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003114 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3115 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003116 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003117 free_irq(adapter->rx_scrq[i]->irq,
3118 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05003119 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003120 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003121 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04003122
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003123 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3124 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04003125 }
3126
Nathan Fontenot9501df32017-03-15 23:38:07 -04003127 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003128 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003129 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003130 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003131}
3132
3133static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3134 struct ibmvnic_sub_crq_queue *scrq)
3135{
3136 struct device *dev = &adapter->vdev->dev;
3137 unsigned long rc;
3138
3139 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3140 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3141 if (rc)
3142 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3143 scrq->hw_irq, rc);
3144 return rc;
3145}
3146
3147static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3148 struct ibmvnic_sub_crq_queue *scrq)
3149{
3150 struct device *dev = &adapter->vdev->dev;
3151 unsigned long rc;
3152
3153 if (scrq->hw_irq > 0x100000000ULL) {
3154 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3155 return 1;
3156 }
3157
Juliet Kim7ed5b312019-09-20 16:11:23 -04003158 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003159 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05003160 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003161
Juliet Kim284f87d2019-11-20 10:50:03 -05003162 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05003163 /* H_EOI would fail with rc = H_FUNCTION when running
3164 * in XIVE mode which is expected, but not an error.
3165 */
3166 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05003167 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3168 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003169 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003170
Thomas Falcon032c5e82015-12-21 11:26:06 -06003171 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3172 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3173 if (rc)
3174 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3175 scrq->hw_irq, rc);
3176 return rc;
3177}
3178
3179static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3180 struct ibmvnic_sub_crq_queue *scrq)
3181{
3182 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003183 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003184 struct ibmvnic_tx_buff *txbuff;
Thomas Falcon0d973382020-11-18 19:12:19 -06003185 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003186 union sub_crq *next;
3187 int index;
Thomas Falconc62aa372020-11-18 19:12:20 -06003188 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003189
3190restart_loop:
3191 while (pending_scrq(adapter, scrq)) {
3192 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003193 int num_entries = 0;
Thomas Falcon0d973382020-11-18 19:12:19 -06003194 int total_bytes = 0;
3195 int num_packets = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003196
3197 next = ibmvnic_next_scrq(adapter, scrq);
3198 for (i = 0; i < next->tx_comp.num_comps; i++) {
3199 if (next->tx_comp.rcs[i]) {
3200 dev_err(dev, "tx error %x\n",
3201 next->tx_comp.rcs[i]);
3202 continue;
3203 }
3204 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003205 if (index & IBMVNIC_TSO_POOL_MASK) {
3206 tx_pool = &adapter->tso_pool[pool];
3207 index &= ~IBMVNIC_TSO_POOL_MASK;
3208 } else {
3209 tx_pool = &adapter->tx_pool[pool];
3210 }
3211
3212 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon0d973382020-11-18 19:12:19 -06003213 num_packets++;
Thomas Falconffc385b2018-02-18 10:08:41 -06003214 num_entries += txbuff->num_entries;
Thomas Falcon0d973382020-11-18 19:12:19 -06003215 if (txbuff->skb) {
3216 total_bytes += txbuff->skb->len;
3217 dev_consume_skb_irq(txbuff->skb);
3218 txbuff->skb = NULL;
3219 } else {
3220 netdev_warn(adapter->netdev,
3221 "TX completion received with NULL socket buffer\n");
3222 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05003223 tx_pool->free_map[tx_pool->producer_index] = index;
3224 tx_pool->producer_index =
3225 (tx_pool->producer_index + 1) %
3226 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003227 }
3228 /* remove tx_comp scrq*/
3229 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003230
Thomas Falcon0d973382020-11-18 19:12:19 -06003231 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3232 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3233
Thomas Falconffc385b2018-02-18 10:08:41 -06003234 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003235 (adapter->req_tx_entries_per_subcrq / 2) &&
3236 __netif_subqueue_stopped(adapter->netdev,
3237 scrq->pool_index)) {
3238 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003239 netdev_dbg(adapter->netdev, "Started queue %d\n",
3240 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003241 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003242 }
3243
3244 enable_scrq_irq(adapter, scrq);
3245
3246 if (pending_scrq(adapter, scrq)) {
3247 disable_scrq_irq(adapter, scrq);
3248 goto restart_loop;
3249 }
3250
3251 return 0;
3252}
3253
3254static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3255{
3256 struct ibmvnic_sub_crq_queue *scrq = instance;
3257 struct ibmvnic_adapter *adapter = scrq->adapter;
3258
3259 disable_scrq_irq(adapter, scrq);
3260 ibmvnic_complete_tx(adapter, scrq);
3261
3262 return IRQ_HANDLED;
3263}
3264
3265static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3266{
3267 struct ibmvnic_sub_crq_queue *scrq = instance;
3268 struct ibmvnic_adapter *adapter = scrq->adapter;
3269
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003270 /* When booting a kdump kernel we can hit pending interrupts
3271 * prior to completing driver initialization.
3272 */
3273 if (unlikely(adapter->state != VNIC_OPEN))
3274 return IRQ_NONE;
3275
John Allen3d52b592017-08-02 16:44:14 -05003276 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3277
Thomas Falcon032c5e82015-12-21 11:26:06 -06003278 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3279 disable_scrq_irq(adapter, scrq);
3280 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3281 }
3282
3283 return IRQ_HANDLED;
3284}
3285
Thomas Falconea22d512016-07-06 15:35:17 -05003286static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3287{
3288 struct device *dev = &adapter->vdev->dev;
3289 struct ibmvnic_sub_crq_queue *scrq;
3290 int i = 0, j = 0;
3291 int rc = 0;
3292
3293 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003294 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3295 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003296 scrq = adapter->tx_scrq[i];
3297 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3298
Michael Ellerman99c17902016-09-10 19:59:05 +10003299 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003300 rc = -EINVAL;
3301 dev_err(dev, "Error mapping irq\n");
3302 goto req_tx_irq_failed;
3303 }
3304
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003305 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3306 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003307 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003308 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003309
3310 if (rc) {
3311 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3312 scrq->irq, rc);
3313 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003314 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003315 }
3316 }
3317
3318 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003319 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3320 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003321 scrq = adapter->rx_scrq[i];
3322 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003323 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003324 rc = -EINVAL;
3325 dev_err(dev, "Error mapping irq\n");
3326 goto req_rx_irq_failed;
3327 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003328 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3329 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003330 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003331 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003332 if (rc) {
3333 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3334 scrq->irq, rc);
3335 irq_dispose_mapping(scrq->irq);
3336 goto req_rx_irq_failed;
3337 }
3338 }
3339 return rc;
3340
3341req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003342 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003343 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3344 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003345 }
Thomas Falconea22d512016-07-06 15:35:17 -05003346 i = adapter->req_tx_queues;
3347req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003348 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003349 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
Thomas Falcon27a21452020-07-29 16:36:32 -05003350 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003351 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003352 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003353 return rc;
3354}
3355
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003356static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003357{
3358 struct device *dev = &adapter->vdev->dev;
3359 struct ibmvnic_sub_crq_queue **allqueues;
3360 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003361 int total_queues;
3362 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003363 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003364
Thomas Falcon032c5e82015-12-21 11:26:06 -06003365 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3366
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003367 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003368 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003369 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003370
3371 for (i = 0; i < total_queues; i++) {
3372 allqueues[i] = init_sub_crq_queue(adapter);
3373 if (!allqueues[i]) {
3374 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3375 break;
3376 }
3377 registered_queues++;
3378 }
3379
3380 /* Make sure we were able to register the minimum number of queues */
3381 if (registered_queues <
3382 adapter->min_tx_queues + adapter->min_rx_queues) {
3383 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3384 goto tx_failed;
3385 }
3386
3387 /* Distribute the failed allocated queues*/
3388 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3389 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3390 switch (i % 3) {
3391 case 0:
3392 if (adapter->req_rx_queues > adapter->min_rx_queues)
3393 adapter->req_rx_queues--;
3394 else
3395 more++;
3396 break;
3397 case 1:
3398 if (adapter->req_tx_queues > adapter->min_tx_queues)
3399 adapter->req_tx_queues--;
3400 else
3401 more++;
3402 break;
3403 }
3404 }
3405
3406 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003407 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003408 if (!adapter->tx_scrq)
3409 goto tx_failed;
3410
3411 for (i = 0; i < adapter->req_tx_queues; i++) {
3412 adapter->tx_scrq[i] = allqueues[i];
3413 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003414 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003415 }
3416
3417 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003418 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003419 if (!adapter->rx_scrq)
3420 goto rx_failed;
3421
3422 for (i = 0; i < adapter->req_rx_queues; i++) {
3423 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3424 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003425 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003426 }
3427
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003428 kfree(allqueues);
3429 return 0;
3430
3431rx_failed:
3432 kfree(adapter->tx_scrq);
3433 adapter->tx_scrq = NULL;
3434tx_failed:
3435 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003436 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003437 kfree(allqueues);
3438 return -1;
3439}
3440
Lijun Pan09081b92020-09-27 20:13:27 -05003441static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003442{
3443 struct device *dev = &adapter->vdev->dev;
3444 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003445 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003446
3447 if (!retry) {
3448 /* Sub-CRQ entries are 32 byte long */
3449 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3450
3451 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3452 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3453 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3454 return;
3455 }
3456
John Allenc26eba02017-10-26 16:23:25 -05003457 if (adapter->desired.mtu)
3458 adapter->req_mtu = adapter->desired.mtu;
3459 else
3460 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003461
John Allenc26eba02017-10-26 16:23:25 -05003462 if (!adapter->desired.tx_entries)
3463 adapter->desired.tx_entries =
3464 adapter->max_tx_entries_per_subcrq;
3465 if (!adapter->desired.rx_entries)
3466 adapter->desired.rx_entries =
3467 adapter->max_rx_add_entries_per_subcrq;
3468
3469 max_entries = IBMVNIC_MAX_LTB_SIZE /
3470 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3471
3472 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3473 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3474 adapter->desired.tx_entries = max_entries;
3475 }
3476
3477 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3478 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3479 adapter->desired.rx_entries = max_entries;
3480 }
3481
3482 if (adapter->desired.tx_entries)
3483 adapter->req_tx_entries_per_subcrq =
3484 adapter->desired.tx_entries;
3485 else
3486 adapter->req_tx_entries_per_subcrq =
3487 adapter->max_tx_entries_per_subcrq;
3488
3489 if (adapter->desired.rx_entries)
3490 adapter->req_rx_add_entries_per_subcrq =
3491 adapter->desired.rx_entries;
3492 else
3493 adapter->req_rx_add_entries_per_subcrq =
3494 adapter->max_rx_add_entries_per_subcrq;
3495
3496 if (adapter->desired.tx_queues)
3497 adapter->req_tx_queues =
3498 adapter->desired.tx_queues;
3499 else
3500 adapter->req_tx_queues =
3501 adapter->opt_tx_comp_sub_queues;
3502
3503 if (adapter->desired.rx_queues)
3504 adapter->req_rx_queues =
3505 adapter->desired.rx_queues;
3506 else
3507 adapter->req_rx_queues =
3508 adapter->opt_rx_comp_queues;
3509
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003510 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003511 }
3512
Thomas Falcon032c5e82015-12-21 11:26:06 -06003513 memset(&crq, 0, sizeof(crq));
3514 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3515 crq.request_capability.cmd = REQUEST_CAPABILITY;
3516
3517 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003518 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003519 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003520 ibmvnic_send_crq(adapter, &crq);
3521
3522 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003523 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003524 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003525 ibmvnic_send_crq(adapter, &crq);
3526
3527 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003528 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003529 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003530 ibmvnic_send_crq(adapter, &crq);
3531
3532 crq.request_capability.capability =
3533 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3534 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003535 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003536 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003537 ibmvnic_send_crq(adapter, &crq);
3538
3539 crq.request_capability.capability =
3540 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3541 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003542 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003543 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003544 ibmvnic_send_crq(adapter, &crq);
3545
3546 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003547 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003548 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003549 ibmvnic_send_crq(adapter, &crq);
3550
3551 if (adapter->netdev->flags & IFF_PROMISC) {
3552 if (adapter->promisc_supported) {
3553 crq.request_capability.capability =
3554 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003555 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003556 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003557 ibmvnic_send_crq(adapter, &crq);
3558 }
3559 } else {
3560 crq.request_capability.capability =
3561 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003562 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003563 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003564 ibmvnic_send_crq(adapter, &crq);
3565 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003566}
3567
3568static int pending_scrq(struct ibmvnic_adapter *adapter,
3569 struct ibmvnic_sub_crq_queue *scrq)
3570{
3571 union sub_crq *entry = &scrq->msgs[scrq->cur];
3572
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05003573 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003574 return 1;
3575 else
3576 return 0;
3577}
3578
3579static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3580 struct ibmvnic_sub_crq_queue *scrq)
3581{
3582 union sub_crq *entry;
3583 unsigned long flags;
3584
3585 spin_lock_irqsave(&scrq->lock, flags);
3586 entry = &scrq->msgs[scrq->cur];
3587 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3588 if (++scrq->cur == scrq->size)
3589 scrq->cur = 0;
3590 } else {
3591 entry = NULL;
3592 }
3593 spin_unlock_irqrestore(&scrq->lock, flags);
3594
3595 return entry;
3596}
3597
3598static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3599{
3600 struct ibmvnic_crq_queue *queue = &adapter->crq;
3601 union ibmvnic_crq *crq;
3602
3603 crq = &queue->msgs[queue->cur];
3604 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3605 if (++queue->cur == queue->size)
3606 queue->cur = 0;
3607 } else {
3608 crq = NULL;
3609 }
3610
3611 return crq;
3612}
3613
Thomas Falcon2d14d372018-07-13 12:03:32 -05003614static void print_subcrq_error(struct device *dev, int rc, const char *func)
3615{
3616 switch (rc) {
3617 case H_PARAMETER:
3618 dev_warn_ratelimited(dev,
3619 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3620 func, rc);
3621 break;
3622 case H_CLOSED:
3623 dev_warn_ratelimited(dev,
3624 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3625 func, rc);
3626 break;
3627 default:
3628 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3629 break;
3630 }
3631}
3632
Thomas Falconad7775d2016-04-01 17:20:34 -05003633static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3634 u64 remote_handle, u64 ioba, u64 num_entries)
3635{
3636 unsigned int ua = adapter->vdev->unit_address;
3637 struct device *dev = &adapter->vdev->dev;
3638 int rc;
3639
3640 /* Make sure the hypervisor sees the complete request */
3641 mb();
3642 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3643 cpu_to_be64(remote_handle),
3644 ioba, num_entries);
3645
Thomas Falcon2d14d372018-07-13 12:03:32 -05003646 if (rc)
3647 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003648
3649 return rc;
3650}
3651
Thomas Falcon032c5e82015-12-21 11:26:06 -06003652static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3653 union ibmvnic_crq *crq)
3654{
3655 unsigned int ua = adapter->vdev->unit_address;
3656 struct device *dev = &adapter->vdev->dev;
3657 u64 *u64_crq = (u64 *)crq;
3658 int rc;
3659
3660 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3661 (unsigned long int)cpu_to_be64(u64_crq[0]),
3662 (unsigned long int)cpu_to_be64(u64_crq[1]));
3663
Thomas Falcon51536982018-05-23 13:37:56 -05003664 if (!adapter->crq.active &&
3665 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3666 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3667 return -EINVAL;
3668 }
3669
Thomas Falcon032c5e82015-12-21 11:26:06 -06003670 /* Make sure the hypervisor sees the complete request */
3671 mb();
3672
3673 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3674 cpu_to_be64(u64_crq[0]),
3675 cpu_to_be64(u64_crq[1]));
3676
3677 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003678 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003679 dev_warn(dev, "CRQ Queue closed\n");
Lijun Panfa68bfa2020-08-19 17:52:24 -05003680 /* do not reset, report the fail, wait for passive init from server */
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003681 }
3682
Thomas Falcon032c5e82015-12-21 11:26:06 -06003683 dev_warn(dev, "Send error (rc=%d)\n", rc);
3684 }
3685
3686 return rc;
3687}
3688
3689static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3690{
Thomas Falcon36a782f2020-08-31 11:59:57 -05003691 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003692 union ibmvnic_crq crq;
Thomas Falcon36a782f2020-08-31 11:59:57 -05003693 int retries = 100;
3694 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003695
3696 memset(&crq, 0, sizeof(crq));
3697 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3698 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3699 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3700
Thomas Falcon36a782f2020-08-31 11:59:57 -05003701 do {
3702 rc = ibmvnic_send_crq(adapter, &crq);
3703 if (rc != H_CLOSED)
3704 break;
3705 retries--;
3706 msleep(50);
3707
3708 } while (retries > 0);
3709
3710 if (rc) {
3711 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3712 return rc;
3713 }
3714
3715 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003716}
3717
Thomas Falcon032c5e82015-12-21 11:26:06 -06003718static int send_version_xchg(struct ibmvnic_adapter *adapter)
3719{
3720 union ibmvnic_crq crq;
3721
3722 memset(&crq, 0, sizeof(crq));
3723 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3724 crq.version_exchange.cmd = VERSION_EXCHANGE;
3725 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3726
3727 return ibmvnic_send_crq(adapter, &crq);
3728}
3729
Nathan Fontenot37798d02017-11-08 11:23:56 -06003730struct vnic_login_client_data {
3731 u8 type;
3732 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003733 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003734} __packed;
3735
3736static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3737{
3738 int len;
3739
3740 /* Calculate the amount of buffer space needed for the
3741 * vnic client data in the login buffer. There are four entries,
3742 * OS name, LPAR name, device name, and a null last entry.
3743 */
3744 len = 4 * sizeof(struct vnic_login_client_data);
3745 len += 6; /* "Linux" plus NULL */
3746 len += strlen(utsname()->nodename) + 1;
3747 len += strlen(adapter->netdev->name) + 1;
3748
3749 return len;
3750}
3751
3752static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3753 struct vnic_login_client_data *vlcd)
3754{
3755 const char *os_name = "Linux";
3756 int len;
3757
3758 /* Type 1 - LPAR OS */
3759 vlcd->type = 1;
3760 len = strlen(os_name) + 1;
3761 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003762 strncpy(vlcd->name, os_name, len);
3763 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003764
3765 /* Type 2 - LPAR name */
3766 vlcd->type = 2;
3767 len = strlen(utsname()->nodename) + 1;
3768 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003769 strncpy(vlcd->name, utsname()->nodename, len);
3770 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003771
3772 /* Type 3 - device name */
3773 vlcd->type = 3;
3774 len = strlen(adapter->netdev->name) + 1;
3775 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003776 strncpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003777}
3778
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003779static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003780{
3781 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3782 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003783 struct device *dev = &adapter->vdev->dev;
3784 dma_addr_t rsp_buffer_token;
3785 dma_addr_t buffer_token;
3786 size_t rsp_buffer_size;
3787 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003788 size_t buffer_size;
3789 __be64 *tx_list_p;
3790 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003791 int client_data_len;
3792 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003793 int i;
3794
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003795 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3796 netdev_err(adapter->netdev,
3797 "RX or TX queues are not allocated, device login failed\n");
3798 return -1;
3799 }
3800
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003801 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003802 client_data_len = vnic_client_data_len(adapter);
3803
Thomas Falcon032c5e82015-12-21 11:26:06 -06003804 buffer_size =
3805 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003806 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3807 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003808
Nathan Fontenot37798d02017-11-08 11:23:56 -06003809 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003810 if (!login_buffer)
3811 goto buf_alloc_failed;
3812
3813 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3814 DMA_TO_DEVICE);
3815 if (dma_mapping_error(dev, buffer_token)) {
3816 dev_err(dev, "Couldn't map login buffer\n");
3817 goto buf_map_failed;
3818 }
3819
John Allen498cd8e2016-04-06 11:49:55 -05003820 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3821 sizeof(u64) * adapter->req_tx_queues +
3822 sizeof(u64) * adapter->req_rx_queues +
3823 sizeof(u64) * adapter->req_rx_queues +
3824 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003825
3826 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3827 if (!login_rsp_buffer)
3828 goto buf_rsp_alloc_failed;
3829
3830 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3831 rsp_buffer_size, DMA_FROM_DEVICE);
3832 if (dma_mapping_error(dev, rsp_buffer_token)) {
3833 dev_err(dev, "Couldn't map login rsp buffer\n");
3834 goto buf_rsp_map_failed;
3835 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003836
Thomas Falcon032c5e82015-12-21 11:26:06 -06003837 adapter->login_buf = login_buffer;
3838 adapter->login_buf_token = buffer_token;
3839 adapter->login_buf_sz = buffer_size;
3840 adapter->login_rsp_buf = login_rsp_buffer;
3841 adapter->login_rsp_buf_token = rsp_buffer_token;
3842 adapter->login_rsp_buf_sz = rsp_buffer_size;
3843
3844 login_buffer->len = cpu_to_be32(buffer_size);
3845 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3846 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3847 login_buffer->off_txcomp_subcrqs =
3848 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3849 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3850 login_buffer->off_rxcomp_subcrqs =
3851 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3852 sizeof(u64) * adapter->req_tx_queues);
3853 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3854 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3855
3856 tx_list_p = (__be64 *)((char *)login_buffer +
3857 sizeof(struct ibmvnic_login_buffer));
3858 rx_list_p = (__be64 *)((char *)login_buffer +
3859 sizeof(struct ibmvnic_login_buffer) +
3860 sizeof(u64) * adapter->req_tx_queues);
3861
3862 for (i = 0; i < adapter->req_tx_queues; i++) {
3863 if (adapter->tx_scrq[i]) {
3864 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3865 crq_num);
3866 }
3867 }
3868
3869 for (i = 0; i < adapter->req_rx_queues; i++) {
3870 if (adapter->rx_scrq[i]) {
3871 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3872 crq_num);
3873 }
3874 }
3875
Nathan Fontenot37798d02017-11-08 11:23:56 -06003876 /* Insert vNIC login client data */
3877 vlcd = (struct vnic_login_client_data *)
3878 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3879 login_buffer->client_data_offset =
3880 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3881 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3882
3883 vnic_add_client_data(adapter, vlcd);
3884
Thomas Falcon032c5e82015-12-21 11:26:06 -06003885 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3886 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3887 netdev_dbg(adapter->netdev, "%016lx\n",
3888 ((unsigned long int *)(adapter->login_buf))[i]);
3889 }
3890
3891 memset(&crq, 0, sizeof(crq));
3892 crq.login.first = IBMVNIC_CRQ_CMD;
3893 crq.login.cmd = LOGIN;
3894 crq.login.ioba = cpu_to_be32(buffer_token);
3895 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003896 ibmvnic_send_crq(adapter, &crq);
3897
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003898 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003899
Thomas Falcon032c5e82015-12-21 11:26:06 -06003900buf_rsp_map_failed:
3901 kfree(login_rsp_buffer);
3902buf_rsp_alloc_failed:
3903 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3904buf_map_failed:
3905 kfree(login_buffer);
3906buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003907 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003908}
3909
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003910static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3911 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003912{
3913 union ibmvnic_crq crq;
3914
3915 memset(&crq, 0, sizeof(crq));
3916 crq.request_map.first = IBMVNIC_CRQ_CMD;
3917 crq.request_map.cmd = REQUEST_MAP;
3918 crq.request_map.map_id = map_id;
3919 crq.request_map.ioba = cpu_to_be32(addr);
3920 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003921 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003922}
3923
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003924static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003925{
3926 union ibmvnic_crq crq;
3927
3928 memset(&crq, 0, sizeof(crq));
3929 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3930 crq.request_unmap.cmd = REQUEST_UNMAP;
3931 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003932 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003933}
3934
Lijun Pan69980d02020-09-27 20:13:28 -05003935static void send_query_map(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003936{
3937 union ibmvnic_crq crq;
3938
3939 memset(&crq, 0, sizeof(crq));
3940 crq.query_map.first = IBMVNIC_CRQ_CMD;
3941 crq.query_map.cmd = QUERY_MAP;
3942 ibmvnic_send_crq(adapter, &crq);
3943}
3944
3945/* Send a series of CRQs requesting various capabilities of the VNIC server */
Lijun Pan491099a2020-09-27 20:13:26 -05003946static void send_query_cap(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003947{
3948 union ibmvnic_crq crq;
3949
Thomas Falcon901e0402017-02-15 12:17:59 -06003950 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003951 memset(&crq, 0, sizeof(crq));
3952 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3953 crq.query_capability.cmd = QUERY_CAPABILITY;
3954
3955 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003956 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003957 ibmvnic_send_crq(adapter, &crq);
3958
3959 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003960 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003961 ibmvnic_send_crq(adapter, &crq);
3962
3963 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003964 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003965 ibmvnic_send_crq(adapter, &crq);
3966
3967 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003968 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003969 ibmvnic_send_crq(adapter, &crq);
3970
3971 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003972 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003973 ibmvnic_send_crq(adapter, &crq);
3974
3975 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003976 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003977 ibmvnic_send_crq(adapter, &crq);
3978
3979 crq.query_capability.capability =
3980 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003981 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003982 ibmvnic_send_crq(adapter, &crq);
3983
3984 crq.query_capability.capability =
3985 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003986 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003987 ibmvnic_send_crq(adapter, &crq);
3988
3989 crq.query_capability.capability =
3990 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003991 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003992 ibmvnic_send_crq(adapter, &crq);
3993
3994 crq.query_capability.capability =
3995 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003996 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003997 ibmvnic_send_crq(adapter, &crq);
3998
3999 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06004000 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004001 ibmvnic_send_crq(adapter, &crq);
4002
4003 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06004004 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004005 ibmvnic_send_crq(adapter, &crq);
4006
4007 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06004008 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004009 ibmvnic_send_crq(adapter, &crq);
4010
4011 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06004012 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004013 ibmvnic_send_crq(adapter, &crq);
4014
4015 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06004016 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004017 ibmvnic_send_crq(adapter, &crq);
4018
4019 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06004020 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004021 ibmvnic_send_crq(adapter, &crq);
4022
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004023 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4024 atomic_inc(&adapter->running_cap_crqs);
4025 ibmvnic_send_crq(adapter, &crq);
4026
Thomas Falcon032c5e82015-12-21 11:26:06 -06004027 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004028 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004029 ibmvnic_send_crq(adapter, &crq);
4030
4031 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06004032 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004033 ibmvnic_send_crq(adapter, &crq);
4034
4035 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004036 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004037 ibmvnic_send_crq(adapter, &crq);
4038
4039 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06004040 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004041 ibmvnic_send_crq(adapter, &crq);
4042
4043 crq.query_capability.capability =
4044 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06004045 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004046 ibmvnic_send_crq(adapter, &crq);
4047
4048 crq.query_capability.capability =
4049 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004050 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004051 ibmvnic_send_crq(adapter, &crq);
4052
4053 crq.query_capability.capability =
4054 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004055 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004056 ibmvnic_send_crq(adapter, &crq);
4057
4058 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06004059 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004060 ibmvnic_send_crq(adapter, &crq);
4061}
4062
Lijun Pan16e811f2020-09-27 20:13:29 -05004063static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4064{
4065 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4066 struct device *dev = &adapter->vdev->dev;
4067 union ibmvnic_crq crq;
4068
4069 adapter->ip_offload_tok =
4070 dma_map_single(dev,
4071 &adapter->ip_offload_buf,
4072 buf_sz,
4073 DMA_FROM_DEVICE);
4074
4075 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4076 if (!firmware_has_feature(FW_FEATURE_CMO))
4077 dev_err(dev, "Couldn't map offload buffer\n");
4078 return;
4079 }
4080
4081 memset(&crq, 0, sizeof(crq));
4082 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4083 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4084 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4085 crq.query_ip_offload.ioba =
4086 cpu_to_be32(adapter->ip_offload_tok);
4087
4088 ibmvnic_send_crq(adapter, &crq);
4089}
4090
Lijun Pan46899bd2020-09-27 20:13:30 -05004091static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4092{
4093 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4094 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4095 struct device *dev = &adapter->vdev->dev;
4096 netdev_features_t old_hw_features = 0;
4097 union ibmvnic_crq crq;
4098
4099 adapter->ip_offload_ctrl_tok =
4100 dma_map_single(dev,
4101 ctrl_buf,
4102 sizeof(adapter->ip_offload_ctrl),
4103 DMA_TO_DEVICE);
4104
4105 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4106 dev_err(dev, "Couldn't map ip offload control buffer\n");
4107 return;
4108 }
4109
4110 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4111 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4112 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4113 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4114 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4115 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4116 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4117 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4118 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4119 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4120
4121 /* large_rx disabled for now, additional features needed */
4122 ctrl_buf->large_rx_ipv4 = 0;
4123 ctrl_buf->large_rx_ipv6 = 0;
4124
4125 if (adapter->state != VNIC_PROBING) {
4126 old_hw_features = adapter->netdev->hw_features;
4127 adapter->netdev->hw_features = 0;
4128 }
4129
4130 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4131
4132 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4133 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4134
4135 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4136 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4137
4138 if ((adapter->netdev->features &
4139 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4140 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4141
4142 if (buf->large_tx_ipv4)
4143 adapter->netdev->hw_features |= NETIF_F_TSO;
4144 if (buf->large_tx_ipv6)
4145 adapter->netdev->hw_features |= NETIF_F_TSO6;
4146
4147 if (adapter->state == VNIC_PROBING) {
4148 adapter->netdev->features |= adapter->netdev->hw_features;
4149 } else if (old_hw_features != adapter->netdev->hw_features) {
4150 netdev_features_t tmp = 0;
4151
4152 /* disable features no longer supported */
4153 adapter->netdev->features &= adapter->netdev->hw_features;
4154 /* turn on features now supported if previously enabled */
4155 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4156 adapter->netdev->hw_features;
4157 adapter->netdev->features |=
4158 tmp & adapter->netdev->wanted_features;
4159 }
4160
4161 memset(&crq, 0, sizeof(crq));
4162 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4163 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4164 crq.control_ip_offload.len =
4165 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4166 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4167 ibmvnic_send_crq(adapter, &crq);
4168}
4169
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004170static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4171 struct ibmvnic_adapter *adapter)
4172{
4173 struct device *dev = &adapter->vdev->dev;
4174
4175 if (crq->get_vpd_size_rsp.rc.code) {
4176 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4177 crq->get_vpd_size_rsp.rc.code);
4178 complete(&adapter->fw_done);
4179 return;
4180 }
4181
4182 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4183 complete(&adapter->fw_done);
4184}
4185
4186static void handle_vpd_rsp(union ibmvnic_crq *crq,
4187 struct ibmvnic_adapter *adapter)
4188{
4189 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004190 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004191 u8 fw_level_len = 0;
4192
4193 memset(adapter->fw_version, 0, 32);
4194
4195 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4196 DMA_FROM_DEVICE);
4197
4198 if (crq->get_vpd_rsp.rc.code) {
4199 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4200 crq->get_vpd_rsp.rc.code);
4201 goto complete;
4202 }
4203
4204 /* get the position of the firmware version info
4205 * located after the ASCII 'RM' substring in the buffer
4206 */
4207 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4208 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02004209 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004210 goto complete;
4211 }
4212
4213 /* get length of firmware level ASCII substring */
4214 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4215 fw_level_len = *(substr + 2);
4216 } else {
4217 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4218 goto complete;
4219 }
4220
4221 /* copy firmware version string from vpd into adapter */
4222 if ((substr + 3 + fw_level_len) <
4223 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004224 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004225 } else {
4226 dev_info(dev, "FW substr extrapolated VPD buff\n");
4227 }
4228
4229complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02004230 if (adapter->fw_version[0] == '\0')
4231 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004232 complete(&adapter->fw_done);
4233}
4234
Thomas Falcon032c5e82015-12-21 11:26:06 -06004235static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4236{
4237 struct device *dev = &adapter->vdev->dev;
4238 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004239 int i;
4240
4241 dma_unmap_single(dev, adapter->ip_offload_tok,
4242 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4243
4244 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4245 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4246 netdev_dbg(adapter->netdev, "%016lx\n",
4247 ((unsigned long int *)(buf))[i]);
4248
4249 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4250 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4251 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4252 buf->tcp_ipv4_chksum);
4253 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4254 buf->tcp_ipv6_chksum);
4255 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4256 buf->udp_ipv4_chksum);
4257 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4258 buf->udp_ipv6_chksum);
4259 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4260 buf->large_tx_ipv4);
4261 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4262 buf->large_tx_ipv6);
4263 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4264 buf->large_rx_ipv4);
4265 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4266 buf->large_rx_ipv6);
4267 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4268 buf->max_ipv4_header_size);
4269 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4270 buf->max_ipv6_header_size);
4271 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4272 buf->max_tcp_header_size);
4273 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4274 buf->max_udp_header_size);
4275 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4276 buf->max_large_tx_size);
4277 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4278 buf->max_large_rx_size);
4279 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4280 buf->ipv6_extension_header);
4281 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4282 buf->tcp_pseudosum_req);
4283 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4284 buf->num_ipv6_ext_headers);
4285 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4286 buf->off_ipv6_ext_headers);
4287
Lijun Pan46899bd2020-09-27 20:13:30 -05004288 send_control_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004289}
4290
Thomas Falconc9008d32018-08-06 21:39:59 -05004291static const char *ibmvnic_fw_err_cause(u16 cause)
4292{
4293 switch (cause) {
4294 case ADAPTER_PROBLEM:
4295 return "adapter problem";
4296 case BUS_PROBLEM:
4297 return "bus problem";
4298 case FW_PROBLEM:
4299 return "firmware problem";
4300 case DD_PROBLEM:
4301 return "device driver problem";
4302 case EEH_RECOVERY:
4303 return "EEH recovery";
4304 case FW_UPDATED:
4305 return "firmware updated";
4306 case LOW_MEMORY:
4307 return "low Memory";
4308 default:
4309 return "unknown";
4310 }
4311}
4312
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004313static void handle_error_indication(union ibmvnic_crq *crq,
4314 struct ibmvnic_adapter *adapter)
4315{
4316 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004317 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004318
Thomas Falconc9008d32018-08-06 21:39:59 -05004319 cause = be16_to_cpu(crq->error_indication.error_cause);
4320
4321 dev_warn_ratelimited(dev,
4322 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4323 crq->error_indication.flags
4324 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4325 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004326
Nathan Fontenoted651a12017-05-03 14:04:38 -04004327 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4328 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004329 else
4330 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004331}
4332
Thomas Falconf8136142018-01-29 13:45:05 -06004333static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4334 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004335{
4336 struct net_device *netdev = adapter->netdev;
4337 struct device *dev = &adapter->vdev->dev;
4338 long rc;
4339
4340 rc = crq->change_mac_addr_rsp.rc.code;
4341 if (rc) {
4342 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004343 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004344 }
Lijun Pand9b0e592020-10-20 17:39:19 -05004345 /* crq->change_mac_addr.mac_addr is the requested one
4346 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4347 */
Thomas Falcon62740e92019-05-09 23:13:43 -05004348 ether_addr_copy(netdev->dev_addr,
4349 &crq->change_mac_addr_rsp.mac_addr[0]);
Lijun Pand9b0e592020-10-20 17:39:19 -05004350 ether_addr_copy(adapter->mac_addr,
4351 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004352out:
4353 complete(&adapter->fw_done);
4354 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004355}
4356
4357static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4358 struct ibmvnic_adapter *adapter)
4359{
4360 struct device *dev = &adapter->vdev->dev;
4361 u64 *req_value;
4362 char *name;
4363
Thomas Falcon901e0402017-02-15 12:17:59 -06004364 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004365 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4366 case REQ_TX_QUEUES:
4367 req_value = &adapter->req_tx_queues;
4368 name = "tx";
4369 break;
4370 case REQ_RX_QUEUES:
4371 req_value = &adapter->req_rx_queues;
4372 name = "rx";
4373 break;
4374 case REQ_RX_ADD_QUEUES:
4375 req_value = &adapter->req_rx_add_queues;
4376 name = "rx_add";
4377 break;
4378 case REQ_TX_ENTRIES_PER_SUBCRQ:
4379 req_value = &adapter->req_tx_entries_per_subcrq;
4380 name = "tx_entries_per_subcrq";
4381 break;
4382 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4383 req_value = &adapter->req_rx_add_entries_per_subcrq;
4384 name = "rx_add_entries_per_subcrq";
4385 break;
4386 case REQ_MTU:
4387 req_value = &adapter->req_mtu;
4388 name = "mtu";
4389 break;
4390 case PROMISC_REQUESTED:
4391 req_value = &adapter->promisc;
4392 name = "promisc";
4393 break;
4394 default:
4395 dev_err(dev, "Got invalid cap request rsp %d\n",
4396 crq->request_capability.capability);
4397 return;
4398 }
4399
4400 switch (crq->request_capability_rsp.rc.code) {
4401 case SUCCESS:
4402 break;
4403 case PARTIALSUCCESS:
4404 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4405 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06004406 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06004407 number), name);
John Allene7913802018-01-18 16:27:12 -06004408
4409 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4410 REQ_MTU) {
4411 pr_err("mtu of %llu is not supported. Reverting.\n",
4412 *req_value);
4413 *req_value = adapter->fallback.mtu;
4414 } else {
4415 *req_value =
4416 be64_to_cpu(crq->request_capability_rsp.number);
4417 }
4418
Lijun Pan09081b92020-09-27 20:13:27 -05004419 send_request_cap(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004420 return;
4421 default:
4422 dev_err(dev, "Error %d in request cap rsp\n",
4423 crq->request_capability_rsp.rc.code);
4424 return;
4425 }
4426
4427 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004428 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon249168a2017-02-15 12:18:00 -06004429 adapter->wait_capability = false;
Lijun Pan16e811f2020-09-27 20:13:29 -05004430 send_query_ip_offload(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004431 }
4432}
4433
4434static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4435 struct ibmvnic_adapter *adapter)
4436{
4437 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004438 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004439 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4440 struct ibmvnic_login_buffer *login = adapter->login_buf;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004441 u64 *tx_handle_array;
4442 u64 *rx_handle_array;
4443 int num_tx_pools;
4444 int num_rx_pools;
Thomas Falcon507ebe62020-08-21 13:39:01 -05004445 u64 *size_array;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004446 int i;
4447
4448 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004449 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004450 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004451 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004452
John Allen498cd8e2016-04-06 11:49:55 -05004453 /* If the number of queues requested can't be allocated by the
4454 * server, the login response will return with code 1. We will need
4455 * to resend the login buffer with fewer queues requested.
4456 */
4457 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004458 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004459 complete(&adapter->init_done);
4460 return 0;
4461 }
4462
John Allenc26eba02017-10-26 16:23:25 -05004463 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4464
Thomas Falcon032c5e82015-12-21 11:26:06 -06004465 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4466 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4467 netdev_dbg(adapter->netdev, "%016lx\n",
4468 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4469 }
4470
4471 /* Sanity checks */
4472 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4473 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4474 adapter->req_rx_add_queues !=
4475 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4476 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4477 ibmvnic_remove(adapter->vdev);
4478 return -EIO;
4479 }
Thomas Falcon507ebe62020-08-21 13:39:01 -05004480 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4481 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4482 /* variable buffer sizes are not supported, so just read the
4483 * first entry.
4484 */
4485 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004486
4487 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4488 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4489
4490 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4491 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4492 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4493 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4494
4495 for (i = 0; i < num_tx_pools; i++)
4496 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4497
4498 for (i = 0; i < num_rx_pools; i++)
4499 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4500
Thomas Falcon507ebe62020-08-21 13:39:01 -05004501 adapter->num_active_tx_scrqs = num_tx_pools;
4502 adapter->num_active_rx_scrqs = num_rx_pools;
Cristobal Fornof3ae59c2020-08-19 13:16:23 -05004503 release_login_rsp_buffer(adapter);
Thomas Falcona2c0f032018-02-21 18:18:30 -06004504 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004505 complete(&adapter->init_done);
4506
Thomas Falcon032c5e82015-12-21 11:26:06 -06004507 return 0;
4508}
4509
Thomas Falcon032c5e82015-12-21 11:26:06 -06004510static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4511 struct ibmvnic_adapter *adapter)
4512{
4513 struct device *dev = &adapter->vdev->dev;
4514 long rc;
4515
4516 rc = crq->request_unmap_rsp.rc.code;
4517 if (rc)
4518 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4519}
4520
4521static void handle_query_map_rsp(union ibmvnic_crq *crq,
4522 struct ibmvnic_adapter *adapter)
4523{
4524 struct net_device *netdev = adapter->netdev;
4525 struct device *dev = &adapter->vdev->dev;
4526 long rc;
4527
4528 rc = crq->query_map_rsp.rc.code;
4529 if (rc) {
4530 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4531 return;
4532 }
4533 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4534 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4535 crq->query_map_rsp.free_pages);
4536}
4537
4538static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4539 struct ibmvnic_adapter *adapter)
4540{
4541 struct net_device *netdev = adapter->netdev;
4542 struct device *dev = &adapter->vdev->dev;
4543 long rc;
4544
Thomas Falcon901e0402017-02-15 12:17:59 -06004545 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004546 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004547 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004548 rc = crq->query_capability.rc.code;
4549 if (rc) {
4550 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4551 goto out;
4552 }
4553
4554 switch (be16_to_cpu(crq->query_capability.capability)) {
4555 case MIN_TX_QUEUES:
4556 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004557 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004558 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4559 adapter->min_tx_queues);
4560 break;
4561 case MIN_RX_QUEUES:
4562 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004563 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004564 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4565 adapter->min_rx_queues);
4566 break;
4567 case MIN_RX_ADD_QUEUES:
4568 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004569 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004570 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4571 adapter->min_rx_add_queues);
4572 break;
4573 case MAX_TX_QUEUES:
4574 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004575 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004576 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4577 adapter->max_tx_queues);
4578 break;
4579 case MAX_RX_QUEUES:
4580 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004581 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004582 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4583 adapter->max_rx_queues);
4584 break;
4585 case MAX_RX_ADD_QUEUES:
4586 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004587 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004588 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4589 adapter->max_rx_add_queues);
4590 break;
4591 case MIN_TX_ENTRIES_PER_SUBCRQ:
4592 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004593 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004594 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4595 adapter->min_tx_entries_per_subcrq);
4596 break;
4597 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4598 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004599 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004600 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4601 adapter->min_rx_add_entries_per_subcrq);
4602 break;
4603 case MAX_TX_ENTRIES_PER_SUBCRQ:
4604 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004605 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004606 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4607 adapter->max_tx_entries_per_subcrq);
4608 break;
4609 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4610 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004611 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004612 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4613 adapter->max_rx_add_entries_per_subcrq);
4614 break;
4615 case TCP_IP_OFFLOAD:
4616 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004617 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004618 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4619 adapter->tcp_ip_offload);
4620 break;
4621 case PROMISC_SUPPORTED:
4622 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004623 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004624 netdev_dbg(netdev, "promisc_supported = %lld\n",
4625 adapter->promisc_supported);
4626 break;
4627 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004628 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004629 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004630 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4631 break;
4632 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004633 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004634 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004635 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4636 break;
4637 case MAX_MULTICAST_FILTERS:
4638 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004639 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004640 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4641 adapter->max_multicast_filters);
4642 break;
4643 case VLAN_HEADER_INSERTION:
4644 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004645 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004646 if (adapter->vlan_header_insertion)
4647 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4648 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4649 adapter->vlan_header_insertion);
4650 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004651 case RX_VLAN_HEADER_INSERTION:
4652 adapter->rx_vlan_header_insertion =
4653 be64_to_cpu(crq->query_capability.number);
4654 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4655 adapter->rx_vlan_header_insertion);
4656 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004657 case MAX_TX_SG_ENTRIES:
4658 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004659 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004660 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4661 adapter->max_tx_sg_entries);
4662 break;
4663 case RX_SG_SUPPORTED:
4664 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004665 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004666 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4667 adapter->rx_sg_supported);
4668 break;
4669 case OPT_TX_COMP_SUB_QUEUES:
4670 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004671 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004672 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4673 adapter->opt_tx_comp_sub_queues);
4674 break;
4675 case OPT_RX_COMP_QUEUES:
4676 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004677 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004678 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4679 adapter->opt_rx_comp_queues);
4680 break;
4681 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4682 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004683 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004684 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4685 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4686 break;
4687 case OPT_TX_ENTRIES_PER_SUBCRQ:
4688 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004689 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004690 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4691 adapter->opt_tx_entries_per_subcrq);
4692 break;
4693 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4694 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004695 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004696 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4697 adapter->opt_rxba_entries_per_subcrq);
4698 break;
4699 case TX_RX_DESC_REQ:
4700 adapter->tx_rx_desc_req = crq->query_capability.number;
4701 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4702 adapter->tx_rx_desc_req);
4703 break;
4704
4705 default:
4706 netdev_err(netdev, "Got invalid cap rsp %d\n",
4707 crq->query_capability.capability);
4708 }
4709
4710out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004711 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4712 adapter->wait_capability = false;
Lijun Pan09081b92020-09-27 20:13:27 -05004713 send_request_cap(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004714 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004715}
4716
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004717static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4718{
4719 union ibmvnic_crq crq;
4720 int rc;
4721
4722 memset(&crq, 0, sizeof(crq));
4723 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4724 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004725
4726 mutex_lock(&adapter->fw_lock);
4727 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06004728 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004729
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004730 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004731 if (rc) {
4732 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004733 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004734 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004735
4736 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004737 if (rc) {
4738 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06004739 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004740 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004741
Thomas Falconff25dcb2019-11-25 17:12:56 -06004742 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004743 return adapter->fw_done_rc ? -EIO : 0;
4744}
4745
4746static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4747 struct ibmvnic_adapter *adapter)
4748{
4749 struct net_device *netdev = adapter->netdev;
4750 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004751 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004752
4753 rc = crq->query_phys_parms_rsp.rc.code;
4754 if (rc) {
4755 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4756 return rc;
4757 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004758 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004759 case IBMVNIC_10MBPS:
4760 adapter->speed = SPEED_10;
4761 break;
4762 case IBMVNIC_100MBPS:
4763 adapter->speed = SPEED_100;
4764 break;
4765 case IBMVNIC_1GBPS:
4766 adapter->speed = SPEED_1000;
4767 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05004768 case IBMVNIC_10GBPS:
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004769 adapter->speed = SPEED_10000;
4770 break;
4771 case IBMVNIC_25GBPS:
4772 adapter->speed = SPEED_25000;
4773 break;
4774 case IBMVNIC_40GBPS:
4775 adapter->speed = SPEED_40000;
4776 break;
4777 case IBMVNIC_50GBPS:
4778 adapter->speed = SPEED_50000;
4779 break;
4780 case IBMVNIC_100GBPS:
4781 adapter->speed = SPEED_100000;
4782 break;
Lijun Panb9cd7952020-09-27 19:06:25 -05004783 case IBMVNIC_200GBPS:
4784 adapter->speed = SPEED_200000;
4785 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004786 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004787 if (netif_carrier_ok(netdev))
4788 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004789 adapter->speed = SPEED_UNKNOWN;
4790 }
4791 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4792 adapter->duplex = DUPLEX_FULL;
4793 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4794 adapter->duplex = DUPLEX_HALF;
4795 else
4796 adapter->duplex = DUPLEX_UNKNOWN;
4797
4798 return rc;
4799}
4800
Thomas Falcon032c5e82015-12-21 11:26:06 -06004801static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4802 struct ibmvnic_adapter *adapter)
4803{
4804 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4805 struct net_device *netdev = adapter->netdev;
4806 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004807 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004808 long rc;
4809
4810 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004811 (unsigned long int)cpu_to_be64(u64_crq[0]),
4812 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004813 switch (gen_crq->first) {
4814 case IBMVNIC_CRQ_INIT_RSP:
4815 switch (gen_crq->cmd) {
4816 case IBMVNIC_CRQ_INIT:
4817 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004818 adapter->from_passive_init = true;
Thomas Falcon17c87052018-05-23 13:37:58 -05004819 if (!completion_done(&adapter->init_done)) {
4820 complete(&adapter->init_done);
4821 adapter->init_done_rc = -EIO;
4822 }
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004823 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004824 break;
4825 case IBMVNIC_CRQ_INIT_COMPLETE:
4826 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05004827 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004828 send_version_xchg(adapter);
4829 break;
4830 default:
4831 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4832 }
4833 return;
4834 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004835 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05004836 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06004837 /* terminate any thread waiting for a response
4838 * from the device
4839 */
4840 if (!completion_done(&adapter->fw_done)) {
4841 adapter->fw_done_rc = -EIO;
4842 complete(&adapter->fw_done);
4843 }
4844 if (!completion_done(&adapter->stats_done))
4845 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04004846 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05004847 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004848 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004849 dev_info(dev, "Migrated, re-enabling adapter\n");
4850 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004851 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4852 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004853 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004854 } else {
4855 /* The adapter lost the connection */
4856 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4857 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004858 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004859 }
4860 return;
4861 case IBMVNIC_CRQ_CMD_RSP:
4862 break;
4863 default:
4864 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4865 gen_crq->first);
4866 return;
4867 }
4868
4869 switch (gen_crq->cmd) {
4870 case VERSION_EXCHANGE_RSP:
4871 rc = crq->version_exchange_rsp.rc.code;
4872 if (rc) {
4873 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4874 break;
4875 }
Thomas Falcon78468892020-05-28 11:19:17 -05004876 ibmvnic_version =
Thomas Falcon032c5e82015-12-21 11:26:06 -06004877 be16_to_cpu(crq->version_exchange_rsp.version);
Thomas Falcon78468892020-05-28 11:19:17 -05004878 dev_info(dev, "Partner protocol version is %d\n",
4879 ibmvnic_version);
Lijun Pan491099a2020-09-27 20:13:26 -05004880 send_query_cap(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004881 break;
4882 case QUERY_CAPABILITY_RSP:
4883 handle_query_cap_rsp(crq, adapter);
4884 break;
4885 case QUERY_MAP_RSP:
4886 handle_query_map_rsp(crq, adapter);
4887 break;
4888 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004889 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4890 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004891 break;
4892 case REQUEST_UNMAP_RSP:
4893 handle_request_unmap_rsp(crq, adapter);
4894 break;
4895 case REQUEST_CAPABILITY_RSP:
4896 handle_request_cap_rsp(crq, adapter);
4897 break;
4898 case LOGIN_RSP:
4899 netdev_dbg(netdev, "Got Login Response\n");
4900 handle_login_rsp(crq, adapter);
4901 break;
4902 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004903 netdev_dbg(netdev,
4904 "Got Logical Link State Response, state: %d rc: %d\n",
4905 crq->logical_link_state_rsp.link_state,
4906 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004907 adapter->logical_link_state =
4908 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004909 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4910 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004911 break;
4912 case LINK_STATE_INDICATION:
4913 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4914 adapter->phys_link_state =
4915 crq->link_state_indication.phys_link_state;
4916 adapter->logical_link_state =
4917 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05004918 if (adapter->phys_link_state && adapter->logical_link_state)
4919 netif_carrier_on(netdev);
4920 else
4921 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004922 break;
4923 case CHANGE_MAC_ADDR_RSP:
4924 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004925 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004926 break;
4927 case ERROR_INDICATION:
4928 netdev_dbg(netdev, "Got Error Indication\n");
4929 handle_error_indication(crq, adapter);
4930 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004931 case REQUEST_STATISTICS_RSP:
4932 netdev_dbg(netdev, "Got Statistics Response\n");
4933 complete(&adapter->stats_done);
4934 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004935 case QUERY_IP_OFFLOAD_RSP:
4936 netdev_dbg(netdev, "Got Query IP offload Response\n");
4937 handle_query_ip_offload_rsp(adapter);
4938 break;
4939 case MULTICAST_CTRL_RSP:
4940 netdev_dbg(netdev, "Got multicast control Response\n");
4941 break;
4942 case CONTROL_IP_OFFLOAD_RSP:
4943 netdev_dbg(netdev, "Got Control IP offload Response\n");
4944 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4945 sizeof(adapter->ip_offload_ctrl),
4946 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004947 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004948 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004949 case COLLECT_FW_TRACE_RSP:
4950 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4951 complete(&adapter->fw_done);
4952 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004953 case GET_VPD_SIZE_RSP:
4954 handle_vpd_size_rsp(crq, adapter);
4955 break;
4956 case GET_VPD_RSP:
4957 handle_vpd_rsp(crq, adapter);
4958 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004959 case QUERY_PHYS_PARMS_RSP:
4960 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4961 complete(&adapter->fw_done);
4962 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004963 default:
4964 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4965 gen_crq->cmd);
4966 }
4967}
4968
4969static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4970{
4971 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004972
Thomas Falcon6c267b32017-02-15 12:17:58 -06004973 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004974 return IRQ_HANDLED;
4975}
4976
Allen Paisaa7c3fe2020-09-14 12:59:29 +05304977static void ibmvnic_tasklet(struct tasklet_struct *t)
Thomas Falcon6c267b32017-02-15 12:17:58 -06004978{
Allen Paisaa7c3fe2020-09-14 12:59:29 +05304979 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004980 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004981 union ibmvnic_crq *crq;
4982 unsigned long flags;
4983 bool done = false;
4984
4985 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004986 while (!done) {
4987 /* Pull all the valid messages off the CRQ */
4988 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4989 ibmvnic_handle_crq(crq, adapter);
4990 crq->generic.first = 0;
4991 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004992
4993 /* remain in tasklet until all
4994 * capabilities responses are received
4995 */
4996 if (!adapter->wait_capability)
4997 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004998 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004999 /* if capabilities CRQ's were sent in this tasklet, the following
5000 * tasklet must wait until all responses are received
5001 */
5002 if (atomic_read(&adapter->running_cap_crqs) != 0)
5003 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005004 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005005}
5006
5007static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5008{
5009 struct vio_dev *vdev = adapter->vdev;
5010 int rc;
5011
5012 do {
5013 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5014 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5015
5016 if (rc)
5017 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5018
5019 return rc;
5020}
5021
5022static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5023{
5024 struct ibmvnic_crq_queue *crq = &adapter->crq;
5025 struct device *dev = &adapter->vdev->dev;
5026 struct vio_dev *vdev = adapter->vdev;
5027 int rc;
5028
5029 /* Close the CRQ */
5030 do {
5031 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5032 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5033
5034 /* Clean out the queue */
5035 memset(crq->msgs, 0, PAGE_SIZE);
5036 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05005037 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005038
5039 /* And re-open it again */
5040 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5041 crq->msg_token, PAGE_SIZE);
5042
5043 if (rc == H_CLOSED)
5044 /* Adapter is good, but other end is not ready */
5045 dev_warn(dev, "Partner adapter not ready\n");
5046 else if (rc != 0)
5047 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5048
5049 return rc;
5050}
5051
Nathan Fontenotf9928872017-03-30 02:48:54 -04005052static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005053{
5054 struct ibmvnic_crq_queue *crq = &adapter->crq;
5055 struct vio_dev *vdev = adapter->vdev;
5056 long rc;
5057
Nathan Fontenotf9928872017-03-30 02:48:54 -04005058 if (!crq->msgs)
5059 return;
5060
Thomas Falcon032c5e82015-12-21 11:26:06 -06005061 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5062 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005063 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005064 do {
5065 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5066 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5067
5068 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5069 DMA_BIDIRECTIONAL);
5070 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005071 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05005072 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005073}
5074
Nathan Fontenotf9928872017-03-30 02:48:54 -04005075static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005076{
5077 struct ibmvnic_crq_queue *crq = &adapter->crq;
5078 struct device *dev = &adapter->vdev->dev;
5079 struct vio_dev *vdev = adapter->vdev;
5080 int rc, retrc = -ENOMEM;
5081
Nathan Fontenotf9928872017-03-30 02:48:54 -04005082 if (crq->msgs)
5083 return 0;
5084
Thomas Falcon032c5e82015-12-21 11:26:06 -06005085 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5086 /* Should we allocate more than one page? */
5087
5088 if (!crq->msgs)
5089 return -ENOMEM;
5090
5091 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5092 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5093 DMA_BIDIRECTIONAL);
5094 if (dma_mapping_error(dev, crq->msg_token))
5095 goto map_failed;
5096
5097 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5098 crq->msg_token, PAGE_SIZE);
5099
5100 if (rc == H_RESOURCE)
5101 /* maybe kexecing and resource is busy. try a reset */
5102 rc = ibmvnic_reset_crq(adapter);
5103 retrc = rc;
5104
5105 if (rc == H_CLOSED) {
5106 dev_warn(dev, "Partner adapter not ready\n");
5107 } else if (rc) {
5108 dev_warn(dev, "Error %d opening adapter\n", rc);
5109 goto reg_crq_failed;
5110 }
5111
5112 retrc = 0;
5113
Allen Paisaa7c3fe2020-09-14 12:59:29 +05305114 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06005115
Thomas Falcon032c5e82015-12-21 11:26:06 -06005116 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03005117 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5118 adapter->vdev->unit_address);
5119 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005120 if (rc) {
5121 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5122 vdev->irq, rc);
5123 goto req_irq_failed;
5124 }
5125
5126 rc = vio_enable_interrupts(vdev);
5127 if (rc) {
5128 dev_err(dev, "Error %d enabling interrupts\n", rc);
5129 goto req_irq_failed;
5130 }
5131
5132 crq->cur = 0;
5133 spin_lock_init(&crq->lock);
5134
5135 return retrc;
5136
5137req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06005138 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005139 do {
5140 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5141 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5142reg_crq_failed:
5143 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5144map_failed:
5145 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04005146 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005147 return retrc;
5148}
5149
Lijun Pan635e4422020-08-19 17:52:26 -05005150static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
John Allenf6ef6402017-03-17 17:13:42 -05005151{
5152 struct device *dev = &adapter->vdev->dev;
5153 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005154 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05005155 int rc;
5156
John Allen017892c12017-05-26 10:30:19 -04005157 adapter->from_passive_init = false;
5158
Lijun Pan635e4422020-08-19 17:52:26 -05005159 if (reset) {
5160 old_num_rx_queues = adapter->req_rx_queues;
5161 old_num_tx_queues = adapter->req_tx_queues;
5162 reinit_completion(&adapter->init_done);
5163 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005164
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005165 adapter->init_done_rc = 0;
Lijun Panfa68bfa2020-08-19 17:52:24 -05005166 rc = ibmvnic_send_crq_init(adapter);
5167 if (rc) {
5168 dev_err(dev, "Send crq init failed with error %d\n", rc);
5169 return rc;
5170 }
5171
John Allenf6ef6402017-03-17 17:13:42 -05005172 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5173 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04005174 return -1;
5175 }
5176
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005177 if (adapter->init_done_rc) {
5178 release_crq_queue(adapter);
5179 return adapter->init_done_rc;
5180 }
5181
Lijun Pan785a2b12020-09-17 21:12:46 -05005182 if (adapter->from_passive_init) {
5183 adapter->state = VNIC_OPEN;
5184 adapter->from_passive_init = false;
5185 return -1;
5186 }
5187
Lijun Pan635e4422020-08-19 17:52:26 -05005188 if (reset &&
5189 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05005190 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005191 if (adapter->req_rx_queues != old_num_rx_queues ||
5192 adapter->req_tx_queues != old_num_tx_queues) {
5193 release_sub_crqs(adapter, 0);
5194 rc = init_sub_crqs(adapter);
5195 } else {
5196 rc = reset_sub_crq_queues(adapter);
5197 }
5198 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04005199 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005200 }
5201
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005202 if (rc) {
5203 dev_err(dev, "Initialization of sub crqs failed\n");
5204 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05005205 return rc;
5206 }
5207
5208 rc = init_sub_crq_irqs(adapter);
5209 if (rc) {
5210 dev_err(dev, "Failed to initialize sub crq irqs\n");
5211 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005212 }
5213
5214 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005215}
5216
Thomas Falcon40c9db82017-06-12 12:35:04 -05005217static struct device_attribute dev_attr_failover;
5218
Thomas Falcon032c5e82015-12-21 11:26:06 -06005219static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5220{
5221 struct ibmvnic_adapter *adapter;
5222 struct net_device *netdev;
5223 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005224 int rc;
5225
5226 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5227 dev->unit_address);
5228
5229 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5230 VETH_MAC_ADDR, NULL);
5231 if (!mac_addr_p) {
5232 dev_err(&dev->dev,
5233 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5234 __FILE__, __LINE__);
5235 return 0;
5236 }
5237
5238 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005239 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005240 if (!netdev)
5241 return -ENOMEM;
5242
5243 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005244 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005245 dev_set_drvdata(&dev->dev, netdev);
5246 adapter->vdev = dev;
5247 adapter->netdev = netdev;
5248
5249 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5250 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5251 netdev->irq = dev->irq;
5252 netdev->netdev_ops = &ibmvnic_netdev_ops;
5253 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5254 SET_NETDEV_DEV(netdev, &dev->dev);
5255
5256 spin_lock_init(&adapter->stats_lock);
5257
Nathan Fontenoted651a12017-05-03 14:04:38 -04005258 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005259 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5260 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005261 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005262 spin_lock_init(&adapter->rwi_lock);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005263 spin_lock_init(&adapter->state_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005264 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005265 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005266 init_completion(&adapter->fw_done);
5267 init_completion(&adapter->reset_done);
5268 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005269 clear_bit(0, &adapter->resetting);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005270
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005271 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005272 rc = init_crq_queue(adapter);
5273 if (rc) {
5274 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5275 rc);
5276 goto ibmvnic_init_fail;
5277 }
5278
Lijun Pan635e4422020-08-19 17:52:26 -05005279 rc = ibmvnic_reset_init(adapter, false);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005280 if (rc && rc != EAGAIN)
5281 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005282 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005283
Thomas Falcon07184212018-05-16 15:49:05 -05005284 rc = init_stats_buffers(adapter);
5285 if (rc)
5286 goto ibmvnic_init_fail;
5287
5288 rc = init_stats_token(adapter);
5289 if (rc)
5290 goto ibmvnic_stats_fail;
5291
Thomas Falconf39f0d12017-02-14 10:22:59 -06005292 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05005293 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5294 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005295
Thomas Falcon40c9db82017-06-12 12:35:04 -05005296 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005297 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005298 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005299
Mick Tarsele876a8a2017-09-28 13:53:18 -07005300 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005301 rc = register_netdev(netdev);
5302 if (rc) {
5303 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005304 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005305 }
5306 dev_info(&dev->dev, "ibmvnic registered\n");
5307
Nathan Fontenot90c80142017-05-03 14:04:32 -04005308 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05005309
5310 adapter->wait_for_reset = false;
5311
Thomas Falcon032c5e82015-12-21 11:26:06 -06005312 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005313
5314ibmvnic_register_fail:
5315 device_remove_file(&dev->dev, &dev_attr_failover);
5316
Thomas Falcon07184212018-05-16 15:49:05 -05005317ibmvnic_dev_file_err:
5318 release_stats_token(adapter);
5319
5320ibmvnic_stats_fail:
5321 release_stats_buffers(adapter);
5322
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005323ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005324 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005325 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005326 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005327 free_netdev(netdev);
5328
5329 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005330}
5331
5332static int ibmvnic_remove(struct vio_dev *dev)
5333{
5334 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005335 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Juliet Kim7d7195a2020-03-10 09:23:58 -05005336 unsigned long flags;
5337
5338 spin_lock_irqsave(&adapter->state_lock, flags);
5339 if (adapter->state == VNIC_RESETTING) {
5340 spin_unlock_irqrestore(&adapter->state_lock, flags);
5341 return -EBUSY;
5342 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06005343
Nathan Fontenot90c80142017-05-03 14:04:32 -04005344 adapter->state = VNIC_REMOVING;
Juliet Kim7d7195a2020-03-10 09:23:58 -05005345 spin_unlock_irqrestore(&adapter->state_lock, flags);
5346
Thomas Falcon6954a9e2020-06-12 13:34:41 -05005347 flush_work(&adapter->ibmvnic_reset);
5348 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5349
Juliet Kima5681e22018-11-19 15:59:22 -06005350 rtnl_lock();
5351 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005352
5353 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005354 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005355 release_crq_queue(adapter);
5356
Thomas Falcon53cc7722018-02-26 18:10:56 -06005357 release_stats_token(adapter);
5358 release_stats_buffers(adapter);
5359
Nathan Fontenot90c80142017-05-03 14:04:32 -04005360 adapter->state = VNIC_REMOVED;
5361
Juliet Kima5681e22018-11-19 15:59:22 -06005362 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005363 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005364 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005365 free_netdev(netdev);
5366 dev_set_drvdata(&dev->dev, NULL);
5367
5368 return 0;
5369}
5370
Thomas Falcon40c9db82017-06-12 12:35:04 -05005371static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5372 const char *buf, size_t count)
5373{
5374 struct net_device *netdev = dev_get_drvdata(dev);
5375 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5376 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5377 __be64 session_token;
5378 long rc;
5379
5380 if (!sysfs_streq(buf, "1"))
5381 return -EINVAL;
5382
5383 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5384 H_GET_SESSION_TOKEN, 0, 0, 0);
5385 if (rc) {
5386 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5387 rc);
5388 return -EINVAL;
5389 }
5390
5391 session_token = (__be64)retbuf[0];
5392 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5393 be64_to_cpu(session_token));
5394 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5395 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5396 if (rc) {
5397 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5398 rc);
5399 return -EINVAL;
5400 }
5401
5402 return count;
5403}
5404
Joe Perches6cbaefb2017-12-19 10:15:09 -08005405static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005406
Thomas Falcon032c5e82015-12-21 11:26:06 -06005407static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5408{
5409 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5410 struct ibmvnic_adapter *adapter;
5411 struct iommu_table *tbl;
5412 unsigned long ret = 0;
5413 int i;
5414
5415 tbl = get_iommu_table_base(&vdev->dev);
5416
5417 /* netdev inits at probe time along with the structures we need below*/
5418 if (!netdev)
5419 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5420
5421 adapter = netdev_priv(netdev);
5422
5423 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005424 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5425
5426 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5427 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5428
Thomas Falcon507ebe62020-08-21 13:39:01 -05005429 for (i = 0; i < adapter->num_active_rx_pools; i++)
Thomas Falcon032c5e82015-12-21 11:26:06 -06005430 ret += adapter->rx_pool[i].size *
5431 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5432
5433 return ret;
5434}
5435
5436static int ibmvnic_resume(struct device *dev)
5437{
5438 struct net_device *netdev = dev_get_drvdata(dev);
5439 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005440
John Allencb89ba22017-06-19 11:27:53 -05005441 if (adapter->state != VNIC_OPEN)
5442 return 0;
5443
John Allena2488782017-07-24 13:26:06 -05005444 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005445
5446 return 0;
5447}
5448
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305449static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005450 {"network", "IBM,vnic"},
5451 {"", "" }
5452};
5453MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5454
5455static const struct dev_pm_ops ibmvnic_pm_ops = {
5456 .resume = ibmvnic_resume
5457};
5458
5459static struct vio_driver ibmvnic_driver = {
5460 .id_table = ibmvnic_device_table,
5461 .probe = ibmvnic_probe,
5462 .remove = ibmvnic_remove,
5463 .get_desired_dma = ibmvnic_get_desired_dma,
5464 .name = ibmvnic_driver_name,
5465 .pm = &ibmvnic_pm_ops,
5466};
5467
5468/* module functions */
5469static int __init ibmvnic_module_init(void)
5470{
5471 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5472 IBMVNIC_DRIVER_VERSION);
5473
5474 return vio_register_driver(&ibmvnic_driver);
5475}
5476
5477static void __exit ibmvnic_module_exit(void)
5478{
5479 vio_unregister_driver(&ibmvnic_driver);
5480}
5481
5482module_init(ibmvnic_module_init);
5483module_exit(ibmvnic_module_exit);