blob: c900807819244b0a00931fc191d72979f4520740 [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060082static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060083static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050089static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060090static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99static int ibmvnic_poll(struct napi_struct *napi, int data);
100static void send_map_query(struct ibmvnic_adapter *adapter);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600103static int send_login(struct ibmvnic_adapter *adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500104static void send_cap_queries(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500105static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
John Allenea5509f2017-03-17 17:13:43 -0500107static int ibmvnic_init(struct ibmvnic_adapter *);
Thomas Falcon8a348452018-05-23 13:38:00 -0500108static int ibmvnic_reset_init(struct ibmvnic_adapter *);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400109static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500110static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500111static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300112static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151{
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160}
161
Thomas Falcon476d96c2019-11-25 17:12:55 -0600162/**
163 * ibmvnic_wait_for_completion - Check device state and wait for completion
164 * @adapter: private device data
165 * @comp_done: completion structure to wait for
166 * @timeout: time to wait in milliseconds
167 *
168 * Wait for a completion signal or until the timeout limit is reached
169 * while checking that the device is still active.
170 */
171static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
172 struct completion *comp_done,
173 unsigned long timeout)
174{
175 struct net_device *netdev;
176 unsigned long div_timeout;
177 u8 retry;
178
179 netdev = adapter->netdev;
180 retry = 5;
181 div_timeout = msecs_to_jiffies(timeout / retry);
182 while (true) {
183 if (!adapter->crq.active) {
184 netdev_err(netdev, "Device down!\n");
185 return -ENODEV;
186 }
187 if (retry--)
188 break;
189 if (wait_for_completion_timeout(comp_done, div_timeout))
190 return 0;
191 }
192 netdev_err(netdev, "Operation timed out.\n");
193 return -ETIMEDOUT;
194}
195
Thomas Falcon032c5e82015-12-21 11:26:06 -0600196static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
197 struct ibmvnic_long_term_buff *ltb, int size)
198{
199 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500200 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600201
202 ltb->size = size;
203 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
204 GFP_KERNEL);
205
206 if (!ltb->buff) {
207 dev_err(dev, "Couldn't alloc long term buffer\n");
208 return -ENOMEM;
209 }
210 ltb->map_id = adapter->map_id;
211 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500212
Thomas Falconff25dcb2019-11-25 17:12:56 -0600213 mutex_lock(&adapter->fw_lock);
214 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -0600215 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500216 rc = send_request_map(adapter, ltb->addr,
217 ltb->size, ltb->map_id);
218 if (rc) {
219 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600220 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500221 return rc;
222 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600223
224 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
225 if (rc) {
226 dev_err(dev,
227 "Long term map request aborted or timed out,rc = %d\n",
228 rc);
229 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600230 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600231 return rc;
232 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500233
234 if (adapter->fw_done_rc) {
235 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
236 adapter->fw_done_rc);
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500237 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600238 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500239 return -1;
240 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600241 mutex_unlock(&adapter->fw_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600242 return 0;
243}
244
245static void free_long_term_buff(struct ibmvnic_adapter *adapter,
246 struct ibmvnic_long_term_buff *ltb)
247{
248 struct device *dev = &adapter->vdev->dev;
249
Nathan Fontenotc657e322017-03-30 02:49:06 -0400250 if (!ltb->buff)
251 return;
252
Nathan Fontenoted651a12017-05-03 14:04:38 -0400253 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
254 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500255 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400256 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600257}
258
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500259static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
260 struct ibmvnic_long_term_buff *ltb)
261{
Thomas Falcon476d96c2019-11-25 17:12:55 -0600262 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500263 int rc;
264
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500265 memset(ltb->buff, 0, ltb->size);
266
Thomas Falconff25dcb2019-11-25 17:12:56 -0600267 mutex_lock(&adapter->fw_lock);
268 adapter->fw_done_rc = 0;
269
Thomas Falcon070eca92019-11-25 17:12:53 -0600270 reinit_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500271 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600272 if (rc) {
273 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500274 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -0600275 }
Thomas Falcon476d96c2019-11-25 17:12:55 -0600276
277 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
278 if (rc) {
279 dev_info(dev,
280 "Reset failed, long term map request timed out or aborted\n");
Thomas Falconff25dcb2019-11-25 17:12:56 -0600281 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -0600282 return rc;
283 }
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500284
285 if (adapter->fw_done_rc) {
Thomas Falcon476d96c2019-11-25 17:12:55 -0600286 dev_info(dev,
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500287 "Reset failed, attempting to free and reallocate buffer\n");
288 free_long_term_buff(adapter, ltb);
Thomas Falconff25dcb2019-11-25 17:12:56 -0600289 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500290 return alloc_long_term_buff(adapter, ltb, ltb->size);
291 }
Thomas Falconff25dcb2019-11-25 17:12:56 -0600292 mutex_unlock(&adapter->fw_lock);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500293 return 0;
294}
295
Thomas Falconf185a492017-05-26 10:30:48 -0400296static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
297{
298 int i;
299
300 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
301 i++)
302 adapter->rx_pool[i].active = 0;
303}
304
Thomas Falcon032c5e82015-12-21 11:26:06 -0600305static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
306 struct ibmvnic_rx_pool *pool)
307{
308 int count = pool->size - atomic_read(&pool->available);
309 struct device *dev = &adapter->vdev->dev;
310 int buffers_added = 0;
311 unsigned long lpar_rc;
312 union sub_crq sub_crq;
313 struct sk_buff *skb;
314 unsigned int offset;
315 dma_addr_t dma_addr;
316 unsigned char *dst;
317 u64 *handle_array;
318 int shift = 0;
319 int index;
320 int i;
321
Thomas Falconf185a492017-05-26 10:30:48 -0400322 if (!pool->active)
323 return;
324
Thomas Falcon032c5e82015-12-21 11:26:06 -0600325 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
326 be32_to_cpu(adapter->login_rsp_buf->
327 off_rxadd_subcrqs));
328
329 for (i = 0; i < count; ++i) {
330 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
331 if (!skb) {
332 dev_err(dev, "Couldn't replenish rx buff\n");
333 adapter->replenish_no_mem++;
334 break;
335 }
336
337 index = pool->free_map[pool->next_free];
338
339 if (pool->rx_buff[index].skb)
340 dev_err(dev, "Inconsistent free_map!\n");
341
342 /* Copy the skb to the long term mapped DMA buffer */
343 offset = index * pool->buff_size;
344 dst = pool->long_term_buff.buff + offset;
345 memset(dst, 0, pool->buff_size);
346 dma_addr = pool->long_term_buff.addr + offset;
347 pool->rx_buff[index].data = dst;
348
349 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
350 pool->rx_buff[index].dma = dma_addr;
351 pool->rx_buff[index].skb = skb;
352 pool->rx_buff[index].pool_index = pool->index;
353 pool->rx_buff[index].size = pool->buff_size;
354
355 memset(&sub_crq, 0, sizeof(sub_crq));
356 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
357 sub_crq.rx_add.correlator =
358 cpu_to_be64((u64)&pool->rx_buff[index]);
359 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
360 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
361
362 /* The length field of the sCRQ is defined to be 24 bits so the
363 * buffer size needs to be left shifted by a byte before it is
364 * converted to big endian to prevent the last byte from being
365 * truncated.
366 */
367#ifdef __LITTLE_ENDIAN__
368 shift = 8;
369#endif
370 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
371
372 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
373 &sub_crq);
374 if (lpar_rc != H_SUCCESS)
375 goto failure;
376
377 buffers_added++;
378 adapter->replenish_add_buff_success++;
379 pool->next_free = (pool->next_free + 1) % pool->size;
380 }
381 atomic_add(buffers_added, &pool->available);
382 return;
383
384failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500385 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
386 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -0600387 pool->free_map[pool->next_free] = index;
388 pool->rx_buff[index].skb = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600389
390 dev_kfree_skb_any(skb);
391 adapter->replenish_add_buff_failure++;
392 atomic_add(buffers_added, &pool->available);
Thomas Falconf185a492017-05-26 10:30:48 -0400393
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500394 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400395 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500396 * queue is closed or pending failover.
397 * Firmware guarantees that a signal will be sent to the
398 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400399 */
400 deactivate_rx_pools(adapter);
401 netif_carrier_off(adapter->netdev);
402 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600403}
404
405static void replenish_pools(struct ibmvnic_adapter *adapter)
406{
407 int i;
408
Thomas Falcon032c5e82015-12-21 11:26:06 -0600409 adapter->replenish_task_cycles++;
410 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
411 i++) {
412 if (adapter->rx_pool[i].active)
413 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
414 }
415}
416
John Allen3d52b592017-08-02 16:44:14 -0500417static void release_stats_buffers(struct ibmvnic_adapter *adapter)
418{
419 kfree(adapter->tx_stats_buffers);
420 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600421 adapter->tx_stats_buffers = NULL;
422 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500423}
424
425static int init_stats_buffers(struct ibmvnic_adapter *adapter)
426{
427 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600428 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500429 sizeof(struct ibmvnic_tx_queue_stats),
430 GFP_KERNEL);
431 if (!adapter->tx_stats_buffers)
432 return -ENOMEM;
433
434 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600435 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500436 sizeof(struct ibmvnic_rx_queue_stats),
437 GFP_KERNEL);
438 if (!adapter->rx_stats_buffers)
439 return -ENOMEM;
440
441 return 0;
442}
443
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400444static void release_stats_token(struct ibmvnic_adapter *adapter)
445{
446 struct device *dev = &adapter->vdev->dev;
447
448 if (!adapter->stats_token)
449 return;
450
451 dma_unmap_single(dev, adapter->stats_token,
452 sizeof(struct ibmvnic_statistics),
453 DMA_FROM_DEVICE);
454 adapter->stats_token = 0;
455}
456
457static int init_stats_token(struct ibmvnic_adapter *adapter)
458{
459 struct device *dev = &adapter->vdev->dev;
460 dma_addr_t stok;
461
462 stok = dma_map_single(dev, &adapter->stats,
463 sizeof(struct ibmvnic_statistics),
464 DMA_FROM_DEVICE);
465 if (dma_mapping_error(dev, stok)) {
466 dev_err(dev, "Couldn't map stats buffer\n");
467 return -1;
468 }
469
470 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500471 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400472 return 0;
473}
474
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400475static int reset_rx_pools(struct ibmvnic_adapter *adapter)
476{
477 struct ibmvnic_rx_pool *rx_pool;
478 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500479 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600480 u64 *size_array;
481
482 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
483 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400484
485 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
486 for (i = 0; i < rx_scrqs; i++) {
487 rx_pool = &adapter->rx_pool[i];
488
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500489 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
490
John Allen896d8692018-01-18 16:26:31 -0600491 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
492 free_long_term_buff(adapter, &rx_pool->long_term_buff);
493 rx_pool->buff_size = be64_to_cpu(size_array[i]);
Thomas Falcon7c940b12019-06-07 16:03:55 -0500494 rc = alloc_long_term_buff(adapter,
495 &rx_pool->long_term_buff,
496 rx_pool->size *
497 rx_pool->buff_size);
John Allen896d8692018-01-18 16:26:31 -0600498 } else {
499 rc = reset_long_term_buff(adapter,
500 &rx_pool->long_term_buff);
501 }
502
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500503 if (rc)
504 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400505
506 for (j = 0; j < rx_pool->size; j++)
507 rx_pool->free_map[j] = j;
508
509 memset(rx_pool->rx_buff, 0,
510 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
511
512 atomic_set(&rx_pool->available, 0);
513 rx_pool->next_alloc = 0;
514 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500515 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400516 }
517
518 return 0;
519}
520
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400521static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600522{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400523 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400524 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600525
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400526 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600527 return;
528
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600529 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400530 rx_pool = &adapter->rx_pool[i];
531
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500532 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
533
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400534 kfree(rx_pool->free_map);
535 free_long_term_buff(adapter, &rx_pool->long_term_buff);
536
537 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400538 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400539
540 for (j = 0; j < rx_pool->size; j++) {
541 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600542 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
543 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400544 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600545 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400546
547 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600548 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400549
550 kfree(adapter->rx_pool);
551 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600552 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400553}
554
555static int init_rx_pools(struct net_device *netdev)
556{
557 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
558 struct device *dev = &adapter->vdev->dev;
559 struct ibmvnic_rx_pool *rx_pool;
560 int rxadd_subcrqs;
561 u64 *size_array;
562 int i, j;
563
564 rxadd_subcrqs =
565 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
566 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
567 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
568
569 adapter->rx_pool = kcalloc(rxadd_subcrqs,
570 sizeof(struct ibmvnic_rx_pool),
571 GFP_KERNEL);
572 if (!adapter->rx_pool) {
573 dev_err(dev, "Failed to allocate rx pools\n");
574 return -1;
575 }
576
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600577 adapter->num_active_rx_pools = rxadd_subcrqs;
578
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400579 for (i = 0; i < rxadd_subcrqs; i++) {
580 rx_pool = &adapter->rx_pool[i];
581
582 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500583 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400584 i, adapter->req_rx_add_entries_per_subcrq,
585 be64_to_cpu(size_array[i]));
586
587 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
588 rx_pool->index = i;
589 rx_pool->buff_size = be64_to_cpu(size_array[i]);
590 rx_pool->active = 1;
591
592 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
593 GFP_KERNEL);
594 if (!rx_pool->free_map) {
595 release_rx_pools(adapter);
596 return -1;
597 }
598
599 rx_pool->rx_buff = kcalloc(rx_pool->size,
600 sizeof(struct ibmvnic_rx_buff),
601 GFP_KERNEL);
602 if (!rx_pool->rx_buff) {
603 dev_err(dev, "Couldn't alloc rx buffers\n");
604 release_rx_pools(adapter);
605 return -1;
606 }
607
608 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
609 rx_pool->size * rx_pool->buff_size)) {
610 release_rx_pools(adapter);
611 return -1;
612 }
613
614 for (j = 0; j < rx_pool->size; ++j)
615 rx_pool->free_map[j] = j;
616
617 atomic_set(&rx_pool->available, 0);
618 rx_pool->next_alloc = 0;
619 rx_pool->next_free = 0;
620 }
621
622 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600623}
624
Thomas Falcone26dc252018-03-16 20:00:25 -0500625static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
626 struct ibmvnic_tx_pool *tx_pool)
627{
628 int rc, i;
629
630 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
631 if (rc)
632 return rc;
633
634 memset(tx_pool->tx_buff, 0,
635 tx_pool->num_buffers *
636 sizeof(struct ibmvnic_tx_buff));
637
638 for (i = 0; i < tx_pool->num_buffers; i++)
639 tx_pool->free_map[i] = i;
640
641 tx_pool->consumer_index = 0;
642 tx_pool->producer_index = 0;
643
644 return 0;
645}
646
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400647static int reset_tx_pools(struct ibmvnic_adapter *adapter)
648{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400649 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500650 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400651
652 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
653 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500654 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500655 if (rc)
656 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500657 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500658 if (rc)
659 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400660 }
661
662 return 0;
663}
664
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200665static void release_vpd_data(struct ibmvnic_adapter *adapter)
666{
667 if (!adapter->vpd)
668 return;
669
670 kfree(adapter->vpd->buff);
671 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600672
673 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200674}
675
Thomas Falconfb794212018-03-16 20:00:26 -0500676static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
677 struct ibmvnic_tx_pool *tx_pool)
678{
679 kfree(tx_pool->tx_buff);
680 kfree(tx_pool->free_map);
681 free_long_term_buff(adapter, &tx_pool->long_term_buff);
682}
683
Nathan Fontenotc657e322017-03-30 02:49:06 -0400684static void release_tx_pools(struct ibmvnic_adapter *adapter)
685{
John Allen896d8692018-01-18 16:26:31 -0600686 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400687
688 if (!adapter->tx_pool)
689 return;
690
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600691 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500692 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
693 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400694 }
695
696 kfree(adapter->tx_pool);
697 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500698 kfree(adapter->tso_pool);
699 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600700 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400701}
702
Thomas Falcon32053062018-03-16 20:00:27 -0500703static int init_one_tx_pool(struct net_device *netdev,
704 struct ibmvnic_tx_pool *tx_pool,
705 int num_entries, int buf_size)
706{
707 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
708 int i;
709
710 tx_pool->tx_buff = kcalloc(num_entries,
711 sizeof(struct ibmvnic_tx_buff),
712 GFP_KERNEL);
713 if (!tx_pool->tx_buff)
714 return -1;
715
716 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
717 num_entries * buf_size))
718 return -1;
719
720 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
721 if (!tx_pool->free_map)
722 return -1;
723
724 for (i = 0; i < num_entries; i++)
725 tx_pool->free_map[i] = i;
726
727 tx_pool->consumer_index = 0;
728 tx_pool->producer_index = 0;
729 tx_pool->num_buffers = num_entries;
730 tx_pool->buf_size = buf_size;
731
732 return 0;
733}
734
Nathan Fontenotc657e322017-03-30 02:49:06 -0400735static int init_tx_pools(struct net_device *netdev)
736{
737 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400738 int tx_subcrqs;
Thomas Falcon32053062018-03-16 20:00:27 -0500739 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400740
741 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
742 adapter->tx_pool = kcalloc(tx_subcrqs,
743 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
744 if (!adapter->tx_pool)
745 return -1;
746
Thomas Falcon32053062018-03-16 20:00:27 -0500747 adapter->tso_pool = kcalloc(tx_subcrqs,
748 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
749 if (!adapter->tso_pool)
750 return -1;
751
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600752 adapter->num_active_tx_pools = tx_subcrqs;
753
Nathan Fontenotc657e322017-03-30 02:49:06 -0400754 for (i = 0; i < tx_subcrqs; i++) {
Thomas Falcon32053062018-03-16 20:00:27 -0500755 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
756 adapter->req_tx_entries_per_subcrq,
757 adapter->req_mtu + VLAN_HLEN);
758 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400759 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500760 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400761 }
762
Thomas Falcon7c940b12019-06-07 16:03:55 -0500763 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
764 IBMVNIC_TSO_BUFS,
765 IBMVNIC_TSO_BUF_SZ);
Thomas Falcon32053062018-03-16 20:00:27 -0500766 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400767 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500768 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400769 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400770 }
771
772 return 0;
773}
774
John Allend944c3d62017-05-26 10:30:13 -0400775static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
776{
777 int i;
778
779 if (adapter->napi_enabled)
780 return;
781
782 for (i = 0; i < adapter->req_rx_queues; i++)
783 napi_enable(&adapter->napi[i]);
784
785 adapter->napi_enabled = true;
786}
787
788static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
789{
790 int i;
791
792 if (!adapter->napi_enabled)
793 return;
794
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500795 for (i = 0; i < adapter->req_rx_queues; i++) {
796 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400797 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500798 }
John Allend944c3d62017-05-26 10:30:13 -0400799
800 adapter->napi_enabled = false;
801}
802
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600803static int init_napi(struct ibmvnic_adapter *adapter)
804{
805 int i;
806
807 adapter->napi = kcalloc(adapter->req_rx_queues,
808 sizeof(struct napi_struct), GFP_KERNEL);
809 if (!adapter->napi)
810 return -ENOMEM;
811
812 for (i = 0; i < adapter->req_rx_queues; i++) {
813 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
814 netif_napi_add(adapter->netdev, &adapter->napi[i],
815 ibmvnic_poll, NAPI_POLL_WEIGHT);
816 }
817
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600818 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600819 return 0;
820}
821
822static void release_napi(struct ibmvnic_adapter *adapter)
823{
824 int i;
825
826 if (!adapter->napi)
827 return;
828
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600829 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800830 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
831 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600832 }
833
834 kfree(adapter->napi);
835 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600836 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500837 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600838}
839
John Allena57a5d22017-03-17 17:13:41 -0500840static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600841{
842 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500843 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500844 int retry_count = 0;
Thomas Falconeb110412018-05-24 14:37:53 -0500845 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500846 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600847
John Allenbd0b6722017-03-17 17:13:40 -0500848 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500849 retry = false;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500850 if (retry_count > IBMVNIC_MAX_QUEUES) {
851 netdev_warn(netdev, "Login attempts exceeded\n");
852 return -1;
853 }
854
855 adapter->init_done_rc = 0;
856 reinit_completion(&adapter->init_done);
857 rc = send_login(adapter);
858 if (rc) {
859 netdev_warn(netdev, "Unable to login\n");
860 return rc;
861 }
862
863 if (!wait_for_completion_timeout(&adapter->init_done,
864 timeout)) {
865 netdev_warn(netdev, "Login timed out\n");
866 return -1;
867 }
868
869 if (adapter->init_done_rc == PARTIALSUCCESS) {
870 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600871 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500872
Thomas Falconeb110412018-05-24 14:37:53 -0500873 retry = true;
874 netdev_dbg(netdev,
875 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500876 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -0500877 reinit_completion(&adapter->init_done);
878 send_cap_queries(adapter);
879 if (!wait_for_completion_timeout(&adapter->init_done,
880 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500881 netdev_warn(netdev,
882 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -0500883 return -1;
884 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500885
Thomas Falcon4d96f122017-08-01 15:04:36 -0500886 rc = init_sub_crqs(adapter);
887 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500888 netdev_warn(netdev,
889 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500890 return -1;
891 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500892
Thomas Falcon4d96f122017-08-01 15:04:36 -0500893 rc = init_sub_crq_irqs(adapter);
894 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500895 netdev_warn(netdev,
896 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500897 return -1;
898 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500899 } else if (adapter->init_done_rc) {
900 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -0500901 return -1;
902 }
Thomas Falconeb110412018-05-24 14:37:53 -0500903 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -0500904
Thomas Falcon62740e92019-05-09 23:13:43 -0500905 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -0600906
John Allena57a5d22017-03-17 17:13:41 -0500907 return 0;
908}
909
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600910static void release_login_buffer(struct ibmvnic_adapter *adapter)
911{
912 kfree(adapter->login_buf);
913 adapter->login_buf = NULL;
914}
915
916static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
917{
918 kfree(adapter->login_rsp_buf);
919 adapter->login_rsp_buf = NULL;
920}
921
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400922static void release_resources(struct ibmvnic_adapter *adapter)
923{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200924 release_vpd_data(adapter);
925
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400926 release_tx_pools(adapter);
927 release_rx_pools(adapter);
928
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600929 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600930 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400931}
932
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400933static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
934{
935 struct net_device *netdev = adapter->netdev;
936 unsigned long timeout = msecs_to_jiffies(30000);
937 union ibmvnic_crq crq;
938 bool resend;
939 int rc;
940
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500941 netdev_dbg(netdev, "setting link state %d\n", link_state);
942
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400943 memset(&crq, 0, sizeof(crq));
944 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
945 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
946 crq.logical_link_state.link_state = link_state;
947
948 do {
949 resend = false;
950
951 reinit_completion(&adapter->init_done);
952 rc = ibmvnic_send_crq(adapter, &crq);
953 if (rc) {
954 netdev_err(netdev, "Failed to set link state\n");
955 return rc;
956 }
957
958 if (!wait_for_completion_timeout(&adapter->init_done,
959 timeout)) {
960 netdev_err(netdev, "timeout setting link state\n");
961 return -1;
962 }
963
964 if (adapter->init_done_rc == 1) {
965 /* Partuial success, delay and re-send */
966 mdelay(1000);
967 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -0500968 } else if (adapter->init_done_rc) {
969 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
970 adapter->init_done_rc);
971 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400972 }
973 } while (resend);
974
975 return 0;
976}
977
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400978static int set_real_num_queues(struct net_device *netdev)
979{
980 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
981 int rc;
982
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500983 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
984 adapter->req_tx_queues, adapter->req_rx_queues);
985
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400986 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
987 if (rc) {
988 netdev_err(netdev, "failed to set the number of tx queues\n");
989 return rc;
990 }
991
992 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
993 if (rc)
994 netdev_err(netdev, "failed to set the number of rx queues\n");
995
996 return rc;
997}
998
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200999static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1000{
1001 struct device *dev = &adapter->vdev->dev;
1002 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001003 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001004 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001005
1006 if (adapter->vpd->buff)
1007 len = adapter->vpd->len;
1008
Thomas Falconff25dcb2019-11-25 17:12:56 -06001009 mutex_lock(&adapter->fw_lock);
1010 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001011 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001012
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001013 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1014 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001015 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001016 if (rc) {
1017 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001018 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001019 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001020
1021 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1022 if (rc) {
1023 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001024 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001025 return rc;
1026 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001027 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001028
1029 if (!adapter->vpd->len)
1030 return -ENODATA;
1031
1032 if (!adapter->vpd->buff)
1033 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1034 else if (adapter->vpd->len != len)
1035 adapter->vpd->buff =
1036 krealloc(adapter->vpd->buff,
1037 adapter->vpd->len, GFP_KERNEL);
1038
1039 if (!adapter->vpd->buff) {
1040 dev_err(dev, "Could allocate VPD buffer\n");
1041 return -ENOMEM;
1042 }
1043
1044 adapter->vpd->dma_addr =
1045 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1046 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -02001047 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001048 dev_err(dev, "Could not map VPD buffer\n");
1049 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -06001050 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001051 return -ENOMEM;
1052 }
1053
Thomas Falconff25dcb2019-11-25 17:12:56 -06001054 mutex_lock(&adapter->fw_lock);
1055 adapter->fw_done_rc = 0;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001056 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001057
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001058 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1059 crq.get_vpd.cmd = GET_VPD;
1060 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1061 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001062 rc = ibmvnic_send_crq(adapter, &crq);
1063 if (rc) {
1064 kfree(adapter->vpd->buff);
1065 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001066 mutex_unlock(&adapter->fw_lock);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001067 return rc;
1068 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06001069
1070 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1071 if (rc) {
1072 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1073 kfree(adapter->vpd->buff);
1074 adapter->vpd->buff = NULL;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001075 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06001076 return rc;
1077 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001078
Thomas Falconff25dcb2019-11-25 17:12:56 -06001079 mutex_unlock(&adapter->fw_lock);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001080 return 0;
1081}
1082
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001083static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -05001084{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001085 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001086 int rc;
John Allena57a5d22017-03-17 17:13:41 -05001087
Thomas Falcon7f3c6e62017-04-21 15:38:40 -04001088 rc = set_real_num_queues(netdev);
1089 if (rc)
1090 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001091
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001092 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1093 if (!adapter->vpd)
1094 return -ENOMEM;
1095
John Allen69d08dc2018-01-18 16:27:58 -06001096 /* Vital Product Data (VPD) */
1097 rc = ibmvnic_get_vpd(adapter);
1098 if (rc) {
1099 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1100 return rc;
1101 }
1102
Thomas Falcon032c5e82015-12-21 11:26:06 -06001103 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001104
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001105 rc = init_napi(adapter);
1106 if (rc)
1107 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001108
Thomas Falcon032c5e82015-12-21 11:26:06 -06001109 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001110
1111 rc = init_rx_pools(netdev);
1112 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001113 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001114
Nathan Fontenotc657e322017-03-30 02:49:06 -04001115 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001116 return rc;
1117}
1118
Nathan Fontenoted651a12017-05-03 14:04:38 -04001119static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001120{
1121 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001122 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001123 int i, rc;
1124
Nathan Fontenot90c80142017-05-03 14:04:32 -04001125 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001126 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001127 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001128
Thomas Falcon032c5e82015-12-21 11:26:06 -06001129 /* We're ready to receive frames, enable the sub-crq interrupts and
1130 * set the logical link state to up
1131 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001132 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001133 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001134 if (prev_state == VNIC_CLOSED)
1135 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001136 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001137 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001138
Nathan Fontenoted651a12017-05-03 14:04:38 -04001139 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001140 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001141 if (prev_state == VNIC_CLOSED)
1142 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001143 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001144 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001145
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001146 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001147 if (rc) {
1148 for (i = 0; i < adapter->req_rx_queues; i++)
1149 napi_disable(&adapter->napi[i]);
1150 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001151 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001152 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001153
Nathan Fontenoted651a12017-05-03 14:04:38 -04001154 netif_tx_start_all_queues(netdev);
1155
1156 if (prev_state == VNIC_CLOSED) {
1157 for (i = 0; i < adapter->req_rx_queues; i++)
1158 napi_schedule(&adapter->napi[i]);
1159 }
1160
1161 adapter->state = VNIC_OPEN;
1162 return rc;
1163}
1164
1165static int ibmvnic_open(struct net_device *netdev)
1166{
1167 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001168 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001169
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001170 /* If device failover is pending, just set device state and return.
1171 * Device operation will be handled by reset routine.
1172 */
1173 if (adapter->failover_pending) {
1174 adapter->state = VNIC_OPEN;
1175 return 0;
1176 }
1177
Nathan Fontenoted651a12017-05-03 14:04:38 -04001178 if (adapter->state != VNIC_CLOSED) {
1179 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001180 if (rc)
Nathan Fontenoted651a12017-05-03 14:04:38 -04001181 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001182
1183 rc = init_resources(adapter);
1184 if (rc) {
1185 netdev_err(netdev, "failed to initialize resources\n");
1186 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001187 return rc;
1188 }
1189 }
1190
1191 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001192
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001193 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001194}
1195
Thomas Falcond0869c02018-02-13 18:23:43 -06001196static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1197{
1198 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001199 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001200 u64 rx_entries;
1201 int rx_scrqs;
1202 int i, j;
1203
1204 if (!adapter->rx_pool)
1205 return;
1206
Thomas Falcon660e3092018-04-20 14:25:32 -05001207 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001208 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1209
1210 /* Free any remaining skbs in the rx buffer pools */
1211 for (i = 0; i < rx_scrqs; i++) {
1212 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001213 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001214 continue;
1215
1216 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1217 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001218 rx_buff = &rx_pool->rx_buff[j];
1219 if (rx_buff && rx_buff->skb) {
1220 dev_kfree_skb_any(rx_buff->skb);
1221 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001222 }
1223 }
1224 }
1225}
1226
Thomas Falcone9e1e972018-03-16 20:00:30 -05001227static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1228 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001229{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001230 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001231 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001232 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001233
Dan Carpenter050e85c2018-03-23 14:36:15 +03001234 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001235 return;
1236
1237 tx_entries = tx_pool->num_buffers;
1238
1239 for (i = 0; i < tx_entries; i++) {
1240 tx_buff = &tx_pool->tx_buff[i];
1241 if (tx_buff && tx_buff->skb) {
1242 dev_kfree_skb_any(tx_buff->skb);
1243 tx_buff->skb = NULL;
1244 }
1245 }
1246}
1247
1248static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1249{
1250 int tx_scrqs;
1251 int i;
1252
1253 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001254 return;
1255
Thomas Falcon660e3092018-04-20 14:25:32 -05001256 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001257
1258 /* Free any remaining skbs in the tx buffer pools */
1259 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001260 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001261 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1262 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001263 }
1264}
1265
John Allen6095e592018-03-30 13:44:21 -05001266static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001267{
John Allen6095e592018-03-30 13:44:21 -05001268 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001269 int i;
1270
Nathan Fontenot46293b92017-05-03 14:05:02 -04001271 if (adapter->tx_scrq) {
1272 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001273 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001274 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001275 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001276 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001277 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001278 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001279 }
1280
Nathan Fontenot46293b92017-05-03 14:05:02 -04001281 if (adapter->rx_scrq) {
1282 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001283 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001284 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001285 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001286 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001287 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001288 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001289 }
1290 }
John Allen6095e592018-03-30 13:44:21 -05001291}
1292
1293static void ibmvnic_cleanup(struct net_device *netdev)
1294{
1295 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1296
1297 /* ensure that transmissions are stopped if called by do_reset */
Juliet Kim7ed5b312019-09-20 16:11:23 -04001298 if (test_bit(0, &adapter->resetting))
John Allen6095e592018-03-30 13:44:21 -05001299 netif_tx_disable(netdev);
1300 else
1301 netif_tx_stop_all_queues(netdev);
1302
1303 ibmvnic_napi_disable(adapter);
1304 ibmvnic_disable_irqs(adapter);
1305
Thomas Falcond0869c02018-02-13 18:23:43 -06001306 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001307 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001308}
1309
1310static int __ibmvnic_close(struct net_device *netdev)
1311{
1312 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1313 int rc = 0;
1314
1315 adapter->state = VNIC_CLOSING;
1316 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1317 if (rc)
1318 return rc;
Nathan Fontenot90c80142017-05-03 14:04:32 -04001319 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001320 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001321}
1322
Nathan Fontenoted651a12017-05-03 14:04:38 -04001323static int ibmvnic_close(struct net_device *netdev)
1324{
1325 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1326 int rc;
1327
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001328 /* If device failover is pending, just set device state and return.
1329 * Device operation will be handled by reset routine.
1330 */
1331 if (adapter->failover_pending) {
1332 adapter->state = VNIC_CLOSED;
1333 return 0;
1334 }
1335
Nathan Fontenoted651a12017-05-03 14:04:38 -04001336 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001337 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001338
1339 return rc;
1340}
1341
Thomas Falconad7775d2016-04-01 17:20:34 -05001342/**
1343 * build_hdr_data - creates L2/L3/L4 header data buffer
1344 * @hdr_field - bitfield determining needed headers
1345 * @skb - socket buffer
1346 * @hdr_len - array of header lengths
1347 * @tot_len - total length of data
1348 *
1349 * Reads hdr_field to determine which headers are needed by firmware.
1350 * Builds a buffer containing these headers. Saves individual header
1351 * lengths and total buffer length to be used to build descriptors.
1352 */
1353static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1354 int *hdr_len, u8 *hdr_data)
1355{
1356 int len = 0;
1357 u8 *hdr;
1358
Thomas Falconda75e3b2018-03-12 11:51:02 -05001359 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1360 hdr_len[0] = sizeof(struct vlan_ethhdr);
1361 else
1362 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001363
1364 if (skb->protocol == htons(ETH_P_IP)) {
1365 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1366 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1367 hdr_len[2] = tcp_hdrlen(skb);
1368 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1369 hdr_len[2] = sizeof(struct udphdr);
1370 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1371 hdr_len[1] = sizeof(struct ipv6hdr);
1372 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1373 hdr_len[2] = tcp_hdrlen(skb);
1374 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1375 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001376 } else if (skb->protocol == htons(ETH_P_ARP)) {
1377 hdr_len[1] = arp_hdr_len(skb->dev);
1378 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001379 }
1380
1381 memset(hdr_data, 0, 120);
1382 if ((hdr_field >> 6) & 1) {
1383 hdr = skb_mac_header(skb);
1384 memcpy(hdr_data, hdr, hdr_len[0]);
1385 len += hdr_len[0];
1386 }
1387
1388 if ((hdr_field >> 5) & 1) {
1389 hdr = skb_network_header(skb);
1390 memcpy(hdr_data + len, hdr, hdr_len[1]);
1391 len += hdr_len[1];
1392 }
1393
1394 if ((hdr_field >> 4) & 1) {
1395 hdr = skb_transport_header(skb);
1396 memcpy(hdr_data + len, hdr, hdr_len[2]);
1397 len += hdr_len[2];
1398 }
1399 return len;
1400}
1401
1402/**
1403 * create_hdr_descs - create header and header extension descriptors
1404 * @hdr_field - bitfield determining needed headers
1405 * @data - buffer containing header data
1406 * @len - length of data buffer
1407 * @hdr_len - array of individual header lengths
1408 * @scrq_arr - descriptor array
1409 *
1410 * Creates header and, if needed, header extension descriptors and
1411 * places them in a descriptor array, scrq_arr
1412 */
1413
Thomas Falcon2de09682017-10-16 10:02:11 -05001414static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1415 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001416{
1417 union sub_crq hdr_desc;
1418 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001419 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001420 u8 *data, *cur;
1421 int tmp;
1422
1423 while (tmp_len > 0) {
1424 cur = hdr_data + len - tmp_len;
1425
1426 memset(&hdr_desc, 0, sizeof(hdr_desc));
1427 if (cur != hdr_data) {
1428 data = hdr_desc.hdr_ext.data;
1429 tmp = tmp_len > 29 ? 29 : tmp_len;
1430 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1431 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1432 hdr_desc.hdr_ext.len = tmp;
1433 } else {
1434 data = hdr_desc.hdr.data;
1435 tmp = tmp_len > 24 ? 24 : tmp_len;
1436 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1437 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1438 hdr_desc.hdr.len = tmp;
1439 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1440 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1441 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1442 hdr_desc.hdr.flag = hdr_field << 1;
1443 }
1444 memcpy(data, cur, tmp);
1445 tmp_len -= tmp;
1446 *scrq_arr = hdr_desc;
1447 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001448 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001449 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001450
1451 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001452}
1453
1454/**
1455 * build_hdr_descs_arr - build a header descriptor array
1456 * @skb - socket buffer
1457 * @num_entries - number of descriptors to be sent
1458 * @subcrq - first TX descriptor
1459 * @hdr_field - bit field determining which headers will be sent
1460 *
1461 * This function will build a TX descriptor array with applicable
1462 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1463 */
1464
1465static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1466 int *num_entries, u8 hdr_field)
1467{
1468 int hdr_len[3] = {0, 0, 0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001469 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001470 u8 *hdr_data = txbuff->hdr_data;
1471
1472 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1473 txbuff->hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001474 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconad7775d2016-04-01 17:20:34 -05001475 txbuff->indir_arr + 1);
1476}
1477
Thomas Falcon1f247a62018-03-12 11:51:04 -05001478static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1479 struct net_device *netdev)
1480{
1481 /* For some backing devices, mishandling of small packets
1482 * can result in a loss of connection or TX stall. Device
1483 * architects recommend that no packet should be smaller
1484 * than the minimum MTU value provided to the driver, so
1485 * pad any packets to that length
1486 */
1487 if (skb->len < netdev->min_mtu)
1488 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001489
1490 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001491}
1492
YueHaibing94b2bb22018-09-18 14:35:47 +08001493static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001494{
1495 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1496 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001497 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001498 struct device *dev = &adapter->vdev->dev;
1499 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001500 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001501 struct ibmvnic_tx_pool *tx_pool;
1502 unsigned int tx_send_failed = 0;
1503 unsigned int tx_map_failed = 0;
1504 unsigned int tx_dropped = 0;
1505 unsigned int tx_packets = 0;
1506 unsigned int tx_bytes = 0;
1507 dma_addr_t data_dma_addr;
1508 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001509 unsigned long lpar_rc;
1510 union sub_crq tx_crq;
1511 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001512 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001513 unsigned char *dst;
1514 u64 *handle_array;
1515 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001516 u8 proto = 0;
YueHaibing94b2bb22018-09-18 14:35:47 +08001517 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001518
Juliet Kim7ed5b312019-09-20 16:11:23 -04001519 if (test_bit(0, &adapter->resetting)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001520 if (!netif_subqueue_stopped(netdev, skb))
1521 netif_stop_subqueue(netdev, queue_num);
1522 dev_kfree_skb_any(skb);
1523
Thomas Falcon032c5e82015-12-21 11:26:06 -06001524 tx_send_failed++;
1525 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001526 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001527 goto out;
1528 }
1529
Thomas Falcon7083a452018-03-12 21:05:26 -05001530 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001531 tx_dropped++;
1532 tx_send_failed++;
1533 ret = NETDEV_TX_OK;
1534 goto out;
1535 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001536 if (skb_is_gso(skb))
1537 tx_pool = &adapter->tso_pool[queue_num];
1538 else
1539 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001540
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001541 tx_scrq = adapter->tx_scrq[queue_num];
1542 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1543 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1544 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1545
Thomas Falcon032c5e82015-12-21 11:26:06 -06001546 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001547
Thomas Falcon86b61a52018-03-16 20:00:29 -05001548 if (index == IBMVNIC_INVALID_MAP) {
1549 dev_kfree_skb_any(skb);
1550 tx_send_failed++;
1551 tx_dropped++;
1552 ret = NETDEV_TX_OK;
1553 goto out;
1554 }
1555
1556 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1557
Thomas Falcon06b3e352018-03-16 20:00:28 -05001558 offset = index * tx_pool->buf_size;
1559 dst = tx_pool->long_term_buff.buff + offset;
1560 memset(dst, 0, tx_pool->buf_size);
1561 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001562
Thomas Falcon15482052017-10-17 12:36:54 -05001563 if (skb_shinfo(skb)->nr_frags) {
1564 int cur, i;
1565
1566 /* Copy the head */
1567 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1568 cur = skb_headlen(skb);
1569
1570 /* Copy the frags */
1571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1572 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1573
1574 memcpy(dst + cur,
1575 page_address(skb_frag_page(frag)) +
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07001576 skb_frag_off(frag), skb_frag_size(frag));
Thomas Falcon15482052017-10-17 12:36:54 -05001577 cur += skb_frag_size(frag);
1578 }
1579 } else {
1580 skb_copy_from_linear_data(skb, dst, skb->len);
1581 }
1582
Thomas Falcon032c5e82015-12-21 11:26:06 -06001583 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001584 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001585
1586 tx_buff = &tx_pool->tx_buff[index];
1587 tx_buff->skb = skb;
1588 tx_buff->data_dma[0] = data_dma_addr;
1589 tx_buff->data_len[0] = skb->len;
1590 tx_buff->index = index;
1591 tx_buff->pool_index = queue_num;
1592 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001593
1594 memset(&tx_crq, 0, sizeof(tx_crq));
1595 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1596 tx_crq.v1.type = IBMVNIC_TX_DESC;
1597 tx_crq.v1.n_crq_elem = 1;
1598 tx_crq.v1.n_sge = 1;
1599 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001600
Thomas Falconfdb06102017-10-17 12:36:55 -05001601 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001602 tx_crq.v1.correlator =
1603 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001604 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001605 tx_crq.v1.correlator = cpu_to_be32(index);
1606 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001607 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1608 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1609
Michał Mirosławe84b4792018-11-07 17:50:52 +01001610 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001611 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1612 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1613 }
1614
1615 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001616 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1617 proto = ip_hdr(skb)->protocol;
1618 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1619 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1620 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001621 }
1622
Thomas Falcona0dca102018-01-18 19:29:48 -06001623 if (proto == IPPROTO_TCP)
1624 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1625 else if (proto == IPPROTO_UDP)
1626 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1627
Thomas Falconad7775d2016-04-01 17:20:34 -05001628 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001629 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001630 hdrs += 2;
1631 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001632 if (skb_is_gso(skb)) {
1633 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1634 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1635 hdrs += 2;
1636 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001637 /* determine if l2/3/4 headers are sent to firmware */
John Allen2fa56a42018-02-09 13:19:46 -06001638 if ((*hdrs >> 7) & 1) {
Thomas Falconad7775d2016-04-01 17:20:34 -05001639 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1640 tx_crq.v1.n_crq_elem = num_entries;
Thomas Falconecba6162018-02-26 18:10:55 -06001641 tx_buff->num_entries = num_entries;
Thomas Falconad7775d2016-04-01 17:20:34 -05001642 tx_buff->indir_arr[0] = tx_crq;
1643 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1644 sizeof(tx_buff->indir_arr),
1645 DMA_TO_DEVICE);
1646 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001647 dev_kfree_skb_any(skb);
1648 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001649 if (!firmware_has_feature(FW_FEATURE_CMO))
1650 dev_err(dev, "tx: unable to map descriptor array\n");
1651 tx_map_failed++;
1652 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001653 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001654 goto tx_err_out;
Thomas Falconad7775d2016-04-01 17:20:34 -05001655 }
John Allen498cd8e2016-04-06 11:49:55 -05001656 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -05001657 (u64)tx_buff->indir_dma,
1658 (u64)num_entries);
Thomas Falcon80f0fe02019-08-14 14:57:05 -05001659 dma_unmap_single(dev, tx_buff->indir_dma,
1660 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
Thomas Falconad7775d2016-04-01 17:20:34 -05001661 } else {
Thomas Falconecba6162018-02-26 18:10:55 -06001662 tx_buff->num_entries = num_entries;
John Allen498cd8e2016-04-06 11:49:55 -05001663 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1664 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001665 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001666 if (lpar_rc != H_SUCCESS) {
Thomas Falcon2d14d372018-07-13 12:03:32 -05001667 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1668 dev_err_ratelimited(dev, "tx: send failed\n");
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001669 dev_kfree_skb_any(skb);
1670 tx_buff->skb = NULL;
1671
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001672 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1673 /* Disable TX and report carrier off if queue is closed
1674 * or pending failover.
Thomas Falconb8c80b82017-05-26 10:30:42 -04001675 * Firmware guarantees that a signal will be sent to the
1676 * driver, triggering a reset or some other action.
1677 */
1678 netif_tx_stop_all_queues(netdev);
1679 netif_carrier_off(netdev);
1680 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001681
Thomas Falcon032c5e82015-12-21 11:26:06 -06001682 tx_send_failed++;
1683 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001684 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001685 goto tx_err_out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001686 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001687
Thomas Falconffc385b2018-02-18 10:08:41 -06001688 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001689 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001690 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001691 netif_stop_subqueue(netdev, queue_num);
1692 }
1693
Thomas Falcon032c5e82015-12-21 11:26:06 -06001694 tx_packets++;
1695 tx_bytes += skb->len;
1696 txq->trans_start = jiffies;
1697 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001698 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001699
Thomas Falcon86b61a52018-03-16 20:00:29 -05001700tx_err_out:
1701 /* roll back consumer index and map array*/
1702 if (tx_pool->consumer_index == 0)
1703 tx_pool->consumer_index =
1704 tx_pool->num_buffers - 1;
1705 else
1706 tx_pool->consumer_index--;
1707 tx_pool->free_map[tx_pool->consumer_index] = index;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001708out:
1709 netdev->stats.tx_dropped += tx_dropped;
1710 netdev->stats.tx_bytes += tx_bytes;
1711 netdev->stats.tx_packets += tx_packets;
1712 adapter->tx_send_failed += tx_send_failed;
1713 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001714 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1715 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1716 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001717
1718 return ret;
1719}
1720
1721static void ibmvnic_set_multi(struct net_device *netdev)
1722{
1723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1724 struct netdev_hw_addr *ha;
1725 union ibmvnic_crq crq;
1726
1727 memset(&crq, 0, sizeof(crq));
1728 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1729 crq.request_capability.cmd = REQUEST_CAPABILITY;
1730
1731 if (netdev->flags & IFF_PROMISC) {
1732 if (!adapter->promisc_supported)
1733 return;
1734 } else {
1735 if (netdev->flags & IFF_ALLMULTI) {
1736 /* Accept all multicast */
1737 memset(&crq, 0, sizeof(crq));
1738 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1739 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1740 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1741 ibmvnic_send_crq(adapter, &crq);
1742 } else if (netdev_mc_empty(netdev)) {
1743 /* Reject all multicast */
1744 memset(&crq, 0, sizeof(crq));
1745 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1746 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1747 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1748 ibmvnic_send_crq(adapter, &crq);
1749 } else {
1750 /* Accept one or more multicast(s) */
1751 netdev_for_each_mc_addr(ha, netdev) {
1752 memset(&crq, 0, sizeof(crq));
1753 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1754 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1755 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1756 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1757 ha->addr);
1758 ibmvnic_send_crq(adapter, &crq);
1759 }
1760 }
1761 }
1762}
1763
Thomas Falcon62740e92019-05-09 23:13:43 -05001764static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001765{
1766 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001767 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001768 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001769
Thomas Falcon62740e92019-05-09 23:13:43 -05001770 if (!is_valid_ether_addr(dev_addr)) {
1771 rc = -EADDRNOTAVAIL;
1772 goto err;
1773 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001774
1775 memset(&crq, 0, sizeof(crq));
1776 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1777 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05001778 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06001779
Thomas Falconff25dcb2019-11-25 17:12:56 -06001780 mutex_lock(&adapter->fw_lock);
1781 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06001782 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06001783
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001784 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05001785 if (rc) {
1786 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001787 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001788 goto err;
1789 }
1790
Thomas Falcon476d96c2019-11-25 17:12:55 -06001791 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001792 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon476d96c2019-11-25 17:12:55 -06001793 if (rc || adapter->fw_done_rc) {
Thomas Falcon62740e92019-05-09 23:13:43 -05001794 rc = -EIO;
Thomas Falconff25dcb2019-11-25 17:12:56 -06001795 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001796 goto err;
1797 }
Thomas Falconff25dcb2019-11-25 17:12:56 -06001798 mutex_unlock(&adapter->fw_lock);
Thomas Falcon62740e92019-05-09 23:13:43 -05001799 return 0;
1800err:
1801 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1802 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001803}
1804
John Allenc26eba02017-10-26 16:23:25 -05001805static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1806{
1807 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1808 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001809 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001810
Thomas Falcon62740e92019-05-09 23:13:43 -05001811 rc = 0;
1812 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1813 if (adapter->state != VNIC_PROBED)
1814 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
John Allenc26eba02017-10-26 16:23:25 -05001815
Thomas Falconf8136142018-01-29 13:45:05 -06001816 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001817}
1818
Nathan Fontenoted651a12017-05-03 14:04:38 -04001819/**
Juliet Kimb27507b2019-09-20 16:11:22 -04001820 * do_change_param_reset returns zero if we are able to keep processing reset
1821 * events, or non-zero if we hit a fatal error and must halt.
1822 */
1823static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1824 struct ibmvnic_rwi *rwi,
1825 u32 reset_state)
1826{
1827 struct net_device *netdev = adapter->netdev;
1828 int i, rc;
1829
1830 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1831 rwi->reset_reason);
1832
1833 netif_carrier_off(netdev);
1834 adapter->reset_reason = rwi->reset_reason;
1835
1836 ibmvnic_cleanup(netdev);
1837
1838 if (reset_state == VNIC_OPEN) {
1839 rc = __ibmvnic_close(netdev);
1840 if (rc)
1841 return rc;
1842 }
1843
1844 release_resources(adapter);
1845 release_sub_crqs(adapter, 1);
1846 release_crq_queue(adapter);
1847
1848 adapter->state = VNIC_PROBED;
1849
1850 rc = init_crq_queue(adapter);
1851
1852 if (rc) {
1853 netdev_err(adapter->netdev,
1854 "Couldn't initialize crq. rc=%d\n", rc);
1855 return rc;
1856 }
1857
1858 rc = ibmvnic_reset_init(adapter);
1859 if (rc)
1860 return IBMVNIC_INIT_FAILED;
1861
1862 /* If the adapter was in PROBE state prior to the reset,
1863 * exit here.
1864 */
1865 if (reset_state == VNIC_PROBED)
1866 return 0;
1867
1868 rc = ibmvnic_login(netdev);
1869 if (rc) {
1870 adapter->state = reset_state;
1871 return rc;
1872 }
1873
1874 rc = init_resources(adapter);
1875 if (rc)
1876 return rc;
1877
1878 ibmvnic_disable_irqs(adapter);
1879
1880 adapter->state = VNIC_CLOSED;
1881
1882 if (reset_state == VNIC_CLOSED)
1883 return 0;
1884
1885 rc = __ibmvnic_open(netdev);
1886 if (rc)
1887 return IBMVNIC_OPEN_FAILED;
1888
1889 /* refresh device's multicast list */
1890 ibmvnic_set_multi(netdev);
1891
1892 /* kick napi */
1893 for (i = 0; i < adapter->req_rx_queues; i++)
1894 napi_schedule(&adapter->napi[i]);
1895
1896 return 0;
1897}
1898
1899/**
Nathan Fontenoted651a12017-05-03 14:04:38 -04001900 * do_reset returns zero if we are able to keep processing reset events, or
1901 * non-zero if we hit a fatal error and must halt.
1902 */
1903static int do_reset(struct ibmvnic_adapter *adapter,
1904 struct ibmvnic_rwi *rwi, u32 reset_state)
1905{
John Allen896d8692018-01-18 16:26:31 -06001906 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001907 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001908 struct net_device *netdev = adapter->netdev;
1909 int i, rc;
1910
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001911 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1912 rwi->reset_reason);
1913
Juliet Kimb27507b2019-09-20 16:11:22 -04001914 rtnl_lock();
1915
Nathan Fontenoted651a12017-05-03 14:04:38 -04001916 netif_carrier_off(netdev);
1917 adapter->reset_reason = rwi->reset_reason;
1918
John Allen896d8692018-01-18 16:26:31 -06001919 old_num_rx_queues = adapter->req_rx_queues;
1920 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001921 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1922 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06001923
Nathan Fontenot30f79622018-04-06 18:37:06 -05001924 ibmvnic_cleanup(netdev);
1925
Thomas Falcon1f946082019-06-07 16:03:53 -05001926 if (reset_state == VNIC_OPEN &&
1927 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05001928 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Juliet Kimb27507b2019-09-20 16:11:22 -04001929 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001930
Juliet Kimb27507b2019-09-20 16:11:22 -04001931 /* Release the RTNL lock before link state change and
1932 * re-acquire after the link state change to allow
1933 * linkwatch_event to grab the RTNL lock and run during
1934 * a reset.
1935 */
1936 rtnl_unlock();
1937 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1938 rtnl_lock();
1939 if (rc)
1940 goto out;
1941
1942 if (adapter->state != VNIC_CLOSING) {
1943 rc = -1;
1944 goto out;
1945 }
1946
1947 adapter->state = VNIC_CLOSED;
John Allenc26eba02017-10-26 16:23:25 -05001948 }
1949
John Allen8cb31cf2017-05-26 10:30:37 -04001950 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1951 /* remove the closed state so when we call open it appears
1952 * we are coming from the probed state.
1953 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001954 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04001955
Juliet Kimb27507b2019-09-20 16:11:22 -04001956 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
Nathan Fontenot30f79622018-04-06 18:37:06 -05001957 rc = ibmvnic_reenable_crq_queue(adapter);
1958 release_sub_crqs(adapter, 1);
1959 } else {
1960 rc = ibmvnic_reset_crq(adapter);
1961 if (!rc)
1962 rc = vio_enable_interrupts(adapter->vdev);
1963 }
1964
1965 if (rc) {
1966 netdev_err(adapter->netdev,
1967 "Couldn't initialize crq. rc=%d\n", rc);
Juliet Kimb27507b2019-09-20 16:11:22 -04001968 goto out;
Nathan Fontenot30f79622018-04-06 18:37:06 -05001969 }
1970
Thomas Falcon8a348452018-05-23 13:38:00 -05001971 rc = ibmvnic_reset_init(adapter);
Juliet Kimb27507b2019-09-20 16:11:22 -04001972 if (rc) {
1973 rc = IBMVNIC_INIT_FAILED;
1974 goto out;
1975 }
John Allen8cb31cf2017-05-26 10:30:37 -04001976
1977 /* If the adapter was in PROBE state prior to the reset,
1978 * exit here.
1979 */
Juliet Kimb27507b2019-09-20 16:11:22 -04001980 if (reset_state == VNIC_PROBED) {
1981 rc = 0;
1982 goto out;
1983 }
John Allen8cb31cf2017-05-26 10:30:37 -04001984
1985 rc = ibmvnic_login(netdev);
1986 if (rc) {
John Allen3578a7e2018-07-16 10:29:30 -05001987 adapter->state = reset_state;
Juliet Kimb27507b2019-09-20 16:11:22 -04001988 goto out;
John Allen8cb31cf2017-05-26 10:30:37 -04001989 }
1990
Juliet Kimb27507b2019-09-20 16:11:22 -04001991 if (adapter->req_rx_queues != old_num_rx_queues ||
1992 adapter->req_tx_queues != old_num_tx_queues ||
1993 adapter->req_rx_add_entries_per_subcrq !=
1994 old_num_rx_slots ||
1995 adapter->req_tx_entries_per_subcrq !=
1996 old_num_tx_slots) {
John Allen896d8692018-01-18 16:26:31 -06001997 release_rx_pools(adapter);
1998 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06001999 release_napi(adapter);
2000 release_vpd_data(adapter);
2001
2002 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05002003 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002004 goto out;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06002005
John Allenc26eba02017-10-26 16:23:25 -05002006 } else {
2007 rc = reset_tx_pools(adapter);
2008 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002009 goto out;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04002010
John Allenc26eba02017-10-26 16:23:25 -05002011 rc = reset_rx_pools(adapter);
2012 if (rc)
Juliet Kimb27507b2019-09-20 16:11:22 -04002013 goto out;
John Allenc26eba02017-10-26 16:23:25 -05002014 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05002015 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002016 }
John Allene676d812018-03-14 10:41:29 -05002017 adapter->state = VNIC_CLOSED;
2018
Juliet Kimb27507b2019-09-20 16:11:22 -04002019 if (reset_state == VNIC_CLOSED) {
2020 rc = 0;
2021 goto out;
2022 }
John Allene676d812018-03-14 10:41:29 -05002023
Nathan Fontenoted651a12017-05-03 14:04:38 -04002024 rc = __ibmvnic_open(netdev);
2025 if (rc) {
Juliet Kimb27507b2019-09-20 16:11:22 -04002026 rc = IBMVNIC_OPEN_FAILED;
2027 goto out;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002028 }
2029
Thomas Falconbe32a242019-06-07 16:03:54 -05002030 /* refresh device's multicast list */
2031 ibmvnic_set_multi(netdev);
2032
Nathan Fontenoted651a12017-05-03 14:04:38 -04002033 /* kick napi */
2034 for (i = 0; i < adapter->req_rx_queues; i++)
2035 napi_schedule(&adapter->napi[i]);
2036
Juliet Kimb27507b2019-09-20 16:11:22 -04002037 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
Thomas Falcon986103e2018-11-30 10:59:08 -06002038 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04002039
Juliet Kimb27507b2019-09-20 16:11:22 -04002040 rc = 0;
2041
2042out:
2043 rtnl_unlock();
2044
2045 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002046}
2047
Thomas Falcon2770a792018-05-23 13:38:02 -05002048static int do_hard_reset(struct ibmvnic_adapter *adapter,
2049 struct ibmvnic_rwi *rwi, u32 reset_state)
2050{
2051 struct net_device *netdev = adapter->netdev;
2052 int rc;
2053
2054 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2055 rwi->reset_reason);
2056
2057 netif_carrier_off(netdev);
2058 adapter->reset_reason = rwi->reset_reason;
2059
2060 ibmvnic_cleanup(netdev);
2061 release_resources(adapter);
2062 release_sub_crqs(adapter, 0);
2063 release_crq_queue(adapter);
2064
2065 /* remove the closed state so when we call open it appears
2066 * we are coming from the probed state.
2067 */
2068 adapter->state = VNIC_PROBED;
2069
Thomas Falconbbd669a2019-04-04 18:58:26 -05002070 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05002071 rc = init_crq_queue(adapter);
2072 if (rc) {
2073 netdev_err(adapter->netdev,
2074 "Couldn't initialize crq. rc=%d\n", rc);
2075 return rc;
2076 }
2077
2078 rc = ibmvnic_init(adapter);
2079 if (rc)
2080 return rc;
2081
2082 /* If the adapter was in PROBE state prior to the reset,
2083 * exit here.
2084 */
2085 if (reset_state == VNIC_PROBED)
2086 return 0;
2087
2088 rc = ibmvnic_login(netdev);
2089 if (rc) {
2090 adapter->state = VNIC_PROBED;
2091 return 0;
2092 }
Juliet Kima5681e22018-11-19 15:59:22 -06002093
2094 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05002095 if (rc)
2096 return rc;
2097
2098 ibmvnic_disable_irqs(adapter);
2099 adapter->state = VNIC_CLOSED;
2100
2101 if (reset_state == VNIC_CLOSED)
2102 return 0;
2103
2104 rc = __ibmvnic_open(netdev);
Juliet Kimb27507b2019-09-20 16:11:22 -04002105 if (rc)
2106 return IBMVNIC_OPEN_FAILED;
Thomas Falcon2770a792018-05-23 13:38:02 -05002107
Thomas Falcon2770a792018-05-23 13:38:02 -05002108 return 0;
2109}
2110
Nathan Fontenoted651a12017-05-03 14:04:38 -04002111static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2112{
2113 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002114 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002115
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002116 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002117
2118 if (!list_empty(&adapter->rwi_list)) {
2119 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2120 list);
2121 list_del(&rwi->list);
2122 } else {
2123 rwi = NULL;
2124 }
2125
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002126 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002127 return rwi;
2128}
2129
2130static void free_all_rwi(struct ibmvnic_adapter *adapter)
2131{
2132 struct ibmvnic_rwi *rwi;
2133
2134 rwi = get_next_rwi(adapter);
2135 while (rwi) {
2136 kfree(rwi);
2137 rwi = get_next_rwi(adapter);
2138 }
2139}
2140
2141static void __ibmvnic_reset(struct work_struct *work)
2142{
2143 struct ibmvnic_rwi *rwi;
2144 struct ibmvnic_adapter *adapter;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002145 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05002146 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002147
2148 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002149
Juliet Kim7ed5b312019-09-20 16:11:23 -04002150 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2151 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2152 IBMVNIC_RESET_DELAY);
2153 return;
2154 }
2155
Nathan Fontenoted651a12017-05-03 14:04:38 -04002156 reset_state = adapter->state;
2157
2158 rwi = get_next_rwi(adapter);
2159 while (rwi) {
Thomas Falcon36f10312019-08-27 11:10:04 -05002160 if (adapter->state == VNIC_REMOVING ||
Michal Suchanekc8dc5592019-09-09 22:44:51 +02002161 adapter->state == VNIC_REMOVED) {
Juliet Kim1c2977c2019-09-05 17:30:01 -04002162 kfree(rwi);
2163 rc = EBUSY;
2164 break;
2165 }
Thomas Falcon36f10312019-08-27 11:10:04 -05002166
Juliet Kimb27507b2019-09-20 16:11:22 -04002167 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2168 /* CHANGE_PARAM requestor holds rtnl_lock */
2169 rc = do_change_param_reset(adapter, rwi, reset_state);
2170 } else if (adapter->force_reset_recovery) {
2171 /* Transport event occurred during previous reset */
2172 if (adapter->wait_for_reset) {
2173 /* Previous was CHANGE_PARAM; caller locked */
2174 adapter->force_reset_recovery = false;
2175 rc = do_hard_reset(adapter, rwi, reset_state);
2176 } else {
2177 rtnl_lock();
2178 adapter->force_reset_recovery = false;
2179 rc = do_hard_reset(adapter, rwi, reset_state);
2180 rtnl_unlock();
2181 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002182 } else {
2183 rc = do_reset(adapter, rwi, reset_state);
2184 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002185 kfree(rwi);
Juliet Kimb27507b2019-09-20 16:11:22 -04002186 if (rc == IBMVNIC_OPEN_FAILED) {
2187 if (list_empty(&adapter->rwi_list))
2188 adapter->state = VNIC_CLOSED;
2189 else
2190 adapter->state = reset_state;
2191 rc = 0;
2192 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
Thomas Falcon2770a792018-05-23 13:38:02 -05002193 !adapter->force_reset_recovery)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002194 break;
2195
2196 rwi = get_next_rwi(adapter);
Juliet Kim7ed5b312019-09-20 16:11:23 -04002197
2198 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2199 rwi->reset_reason == VNIC_RESET_MOBILITY))
2200 adapter->force_reset_recovery = true;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002201 }
2202
John Allenc26eba02017-10-26 16:23:25 -05002203 if (adapter->wait_for_reset) {
John Allenc26eba02017-10-26 16:23:25 -05002204 adapter->reset_done_rc = rc;
2205 complete(&adapter->reset_done);
2206 }
2207
Nathan Fontenoted651a12017-05-03 14:04:38 -04002208 if (rc) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002209 netdev_dbg(adapter->netdev, "Reset failed\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04002210 free_all_rwi(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002211 }
Juliet Kim1c2977c2019-09-05 17:30:01 -04002212
Juliet Kim7ed5b312019-09-20 16:11:23 -04002213 clear_bit_unlock(0, &adapter->resetting);
2214}
2215
2216static void __ibmvnic_delayed_reset(struct work_struct *work)
2217{
2218 struct ibmvnic_adapter *adapter;
2219
2220 adapter = container_of(work, struct ibmvnic_adapter,
2221 ibmvnic_delayed_reset.work);
2222 __ibmvnic_reset(&adapter->ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002223}
2224
Thomas Falconaf894d22018-04-06 18:37:04 -05002225static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2226 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002227{
Thomas Falcon2770a792018-05-23 13:38:02 -05002228 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002229 struct ibmvnic_rwi *rwi, *tmp;
2230 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002231 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002232 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002233
2234 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002235 adapter->state == VNIC_REMOVED ||
2236 adapter->failover_pending) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002237 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002238 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002239 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002240 }
2241
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002242 if (adapter->state == VNIC_PROBING) {
2243 netdev_warn(netdev, "Adapter reset during probe\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002244 ret = adapter->init_done_rc = EAGAIN;
2245 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002246 }
2247
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002248 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002249
2250 list_for_each(entry, &adapter->rwi_list) {
2251 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2252 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002253 netdev_dbg(netdev, "Skipping matching reset\n");
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002254 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falconaf894d22018-04-06 18:37:04 -05002255 ret = EBUSY;
2256 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002257 }
2258 }
2259
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002260 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002261 if (!rwi) {
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002262 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002263 ibmvnic_close(netdev);
Thomas Falconaf894d22018-04-06 18:37:04 -05002264 ret = ENOMEM;
2265 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002266 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002267 /* if we just received a transport event,
2268 * flush reset queue and process this reset
2269 */
2270 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2271 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2272 list_del(entry);
2273 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002274 rwi->reset_reason = reason;
2275 list_add_tail(&rwi->list, &adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002276 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002277 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002278 schedule_work(&adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002279
2280 return 0;
2281err:
Thomas Falconaf894d22018-04-06 18:37:04 -05002282 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002283}
2284
Thomas Falcon032c5e82015-12-21 11:26:06 -06002285static void ibmvnic_tx_timeout(struct net_device *dev)
2286{
2287 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002288
Nathan Fontenoted651a12017-05-03 14:04:38 -04002289 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002290}
2291
2292static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2293 struct ibmvnic_rx_buff *rx_buff)
2294{
2295 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2296
2297 rx_buff->skb = NULL;
2298
2299 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2300 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2301
2302 atomic_dec(&pool->available);
2303}
2304
2305static int ibmvnic_poll(struct napi_struct *napi, int budget)
2306{
2307 struct net_device *netdev = napi->dev;
2308 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2309 int scrq_num = (int)(napi - adapter->napi);
2310 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04002311
Thomas Falcon032c5e82015-12-21 11:26:06 -06002312restart_poll:
2313 while (frames_processed < budget) {
2314 struct sk_buff *skb;
2315 struct ibmvnic_rx_buff *rx_buff;
2316 union sub_crq *next;
2317 u32 length;
2318 u16 offset;
2319 u8 flags = 0;
2320
Juliet Kim7ed5b312019-09-20 16:11:23 -04002321 if (unlikely(test_bit(0, &adapter->resetting) &&
John Allen34686562018-02-06 16:21:49 -06002322 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05002323 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2324 napi_complete_done(napi, frames_processed);
2325 return frames_processed;
2326 }
2327
Thomas Falcon032c5e82015-12-21 11:26:06 -06002328 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2329 break;
2330 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2331 rx_buff =
2332 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2333 rx_comp.correlator);
2334 /* do error checking */
2335 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002336 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2337 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002338 /* free the entry */
2339 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002340 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002341 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002342 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002343 } else if (!rx_buff->skb) {
2344 /* free the entry */
2345 next->rx_comp.first = 0;
2346 remove_buff_from_pool(adapter, rx_buff);
2347 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002348 }
2349
2350 length = be32_to_cpu(next->rx_comp.len);
2351 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2352 flags = next->rx_comp.flags;
2353 skb = rx_buff->skb;
2354 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2355 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002356
2357 /* VLAN Header has been stripped by the system firmware and
2358 * needs to be inserted by the driver
2359 */
2360 if (adapter->rx_vlan_header_insertion &&
2361 (flags & IBMVNIC_VLAN_STRIPPED))
2362 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2363 ntohs(next->rx_comp.vlan_tci));
2364
Thomas Falcon032c5e82015-12-21 11:26:06 -06002365 /* free the entry */
2366 next->rx_comp.first = 0;
2367 remove_buff_from_pool(adapter, rx_buff);
2368
2369 skb_put(skb, length);
2370 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002371 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002372
2373 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2374 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2375 skb->ip_summed = CHECKSUM_UNNECESSARY;
2376 }
2377
2378 length = skb->len;
2379 napi_gro_receive(napi, skb); /* send it up */
2380 netdev->stats.rx_packets++;
2381 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002382 adapter->rx_stats_buffers[scrq_num].packets++;
2383 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002384 frames_processed++;
2385 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002386
2387 if (adapter->state != VNIC_CLOSING)
2388 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002389
2390 if (frames_processed < budget) {
2391 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002392 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002393 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2394 napi_reschedule(napi)) {
2395 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2396 goto restart_poll;
2397 }
2398 }
2399 return frames_processed;
2400}
2401
John Allenc26eba02017-10-26 16:23:25 -05002402static int wait_for_reset(struct ibmvnic_adapter *adapter)
2403{
Thomas Falconaf894d22018-04-06 18:37:04 -05002404 int rc, ret;
2405
John Allenc26eba02017-10-26 16:23:25 -05002406 adapter->fallback.mtu = adapter->req_mtu;
2407 adapter->fallback.rx_queues = adapter->req_rx_queues;
2408 adapter->fallback.tx_queues = adapter->req_tx_queues;
2409 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2410 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2411
Thomas Falcon070eca92019-11-25 17:12:53 -06002412 reinit_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002413 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002414 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002415
2416 if (rc) {
2417 ret = rc;
2418 goto out;
2419 }
2420 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2421 if (rc) {
2422 ret = -ENODEV;
2423 goto out;
2424 }
John Allenc26eba02017-10-26 16:23:25 -05002425
Thomas Falconaf894d22018-04-06 18:37:04 -05002426 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002427 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002428 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002429 adapter->desired.mtu = adapter->fallback.mtu;
2430 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2431 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2432 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2433 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2434
Thomas Falcon070eca92019-11-25 17:12:53 -06002435 reinit_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002436 adapter->wait_for_reset = true;
2437 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
Thomas Falcon476d96c2019-11-25 17:12:55 -06002438 if (rc) {
2439 ret = rc;
2440 goto out;
2441 }
2442 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2443 60000);
2444 if (rc) {
2445 ret = -ENODEV;
2446 goto out;
2447 }
John Allenc26eba02017-10-26 16:23:25 -05002448 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06002449out:
John Allenc26eba02017-10-26 16:23:25 -05002450 adapter->wait_for_reset = false;
2451
Thomas Falconaf894d22018-04-06 18:37:04 -05002452 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002453}
2454
John Allen3a807b72017-06-06 16:55:52 -05002455static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2456{
John Allenc26eba02017-10-26 16:23:25 -05002457 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2458
2459 adapter->desired.mtu = new_mtu + ETH_HLEN;
2460
2461 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002462}
2463
Thomas Falconf10b09e2018-03-12 11:51:05 -05002464static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2465 struct net_device *dev,
2466 netdev_features_t features)
2467{
2468 /* Some backing hardware adapters can not
2469 * handle packets with a MSS less than 224
2470 * or with only one segment.
2471 */
2472 if (skb_is_gso(skb)) {
2473 if (skb_shinfo(skb)->gso_size < 224 ||
2474 skb_shinfo(skb)->gso_segs == 1)
2475 features &= ~NETIF_F_GSO_MASK;
2476 }
2477
2478 return features;
2479}
2480
Thomas Falcon032c5e82015-12-21 11:26:06 -06002481static const struct net_device_ops ibmvnic_netdev_ops = {
2482 .ndo_open = ibmvnic_open,
2483 .ndo_stop = ibmvnic_close,
2484 .ndo_start_xmit = ibmvnic_xmit,
2485 .ndo_set_rx_mode = ibmvnic_set_multi,
2486 .ndo_set_mac_address = ibmvnic_set_mac,
2487 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002488 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002489 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002490 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002491};
2492
2493/* ethtool functions */
2494
Philippe Reynes8a433792017-01-07 22:37:29 +01002495static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2496 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002497{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002498 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2499 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002500
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002501 rc = send_query_phys_parms(adapter);
2502 if (rc) {
2503 adapter->speed = SPEED_UNKNOWN;
2504 adapter->duplex = DUPLEX_UNKNOWN;
2505 }
2506 cmd->base.speed = adapter->speed;
2507 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002508 cmd->base.port = PORT_FIBRE;
2509 cmd->base.phy_address = 0;
2510 cmd->base.autoneg = AUTONEG_ENABLE;
2511
Thomas Falcon032c5e82015-12-21 11:26:06 -06002512 return 0;
2513}
2514
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002515static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002516 struct ethtool_drvinfo *info)
2517{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002518 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2519
Thomas Falcon032c5e82015-12-21 11:26:06 -06002520 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2521 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002522 strlcpy(info->fw_version, adapter->fw_version,
2523 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002524}
2525
2526static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2527{
2528 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2529
2530 return adapter->msg_enable;
2531}
2532
2533static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2534{
2535 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2536
2537 adapter->msg_enable = data;
2538}
2539
2540static u32 ibmvnic_get_link(struct net_device *netdev)
2541{
2542 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2543
2544 /* Don't need to send a query because we request a logical link up at
2545 * init and then we wait for link state indications
2546 */
2547 return adapter->logical_link_state;
2548}
2549
2550static void ibmvnic_get_ringparam(struct net_device *netdev,
2551 struct ethtool_ringparam *ring)
2552{
John Allenbc131b32017-08-02 16:46:30 -05002553 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2554
Thomas Falcon723ad912018-09-28 18:38:26 -05002555 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2556 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2557 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2558 } else {
2559 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2560 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2561 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002562 ring->rx_mini_max_pending = 0;
2563 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002564 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2565 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002566 ring->rx_mini_pending = 0;
2567 ring->rx_jumbo_pending = 0;
2568}
2569
John Allenc26eba02017-10-26 16:23:25 -05002570static int ibmvnic_set_ringparam(struct net_device *netdev,
2571 struct ethtool_ringparam *ring)
2572{
2573 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002574 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002575
Thomas Falcon723ad912018-09-28 18:38:26 -05002576 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002577 adapter->desired.rx_entries = ring->rx_pending;
2578 adapter->desired.tx_entries = ring->tx_pending;
2579
Thomas Falcon723ad912018-09-28 18:38:26 -05002580 ret = wait_for_reset(adapter);
2581
2582 if (!ret &&
2583 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2584 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2585 netdev_info(netdev,
2586 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2587 ring->rx_pending, ring->tx_pending,
2588 adapter->req_rx_add_entries_per_subcrq,
2589 adapter->req_tx_entries_per_subcrq);
2590 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002591}
2592
John Allenc2dbeb62017-08-02 16:47:17 -05002593static void ibmvnic_get_channels(struct net_device *netdev,
2594 struct ethtool_channels *channels)
2595{
2596 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2597
Thomas Falcon723ad912018-09-28 18:38:26 -05002598 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2599 channels->max_rx = adapter->max_rx_queues;
2600 channels->max_tx = adapter->max_tx_queues;
2601 } else {
2602 channels->max_rx = IBMVNIC_MAX_QUEUES;
2603 channels->max_tx = IBMVNIC_MAX_QUEUES;
2604 }
2605
John Allenc2dbeb62017-08-02 16:47:17 -05002606 channels->max_other = 0;
2607 channels->max_combined = 0;
2608 channels->rx_count = adapter->req_rx_queues;
2609 channels->tx_count = adapter->req_tx_queues;
2610 channels->other_count = 0;
2611 channels->combined_count = 0;
2612}
2613
John Allenc26eba02017-10-26 16:23:25 -05002614static int ibmvnic_set_channels(struct net_device *netdev,
2615 struct ethtool_channels *channels)
2616{
2617 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002618 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002619
Thomas Falcon723ad912018-09-28 18:38:26 -05002620 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002621 adapter->desired.rx_queues = channels->rx_count;
2622 adapter->desired.tx_queues = channels->tx_count;
2623
Thomas Falcon723ad912018-09-28 18:38:26 -05002624 ret = wait_for_reset(adapter);
2625
2626 if (!ret &&
2627 (adapter->req_rx_queues != channels->rx_count ||
2628 adapter->req_tx_queues != channels->tx_count))
2629 netdev_info(netdev,
2630 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2631 channels->rx_count, channels->tx_count,
2632 adapter->req_rx_queues, adapter->req_tx_queues);
2633 return ret;
2634
John Allenc26eba02017-10-26 16:23:25 -05002635}
2636
Thomas Falcon032c5e82015-12-21 11:26:06 -06002637static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2638{
John Allen3d52b592017-08-02 16:44:14 -05002639 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002640 int i;
2641
Thomas Falcon723ad912018-09-28 18:38:26 -05002642 switch (stringset) {
2643 case ETH_SS_STATS:
2644 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2645 i++, data += ETH_GSTRING_LEN)
2646 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2647
2648 for (i = 0; i < adapter->req_tx_queues; i++) {
2649 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2650 data += ETH_GSTRING_LEN;
2651
2652 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2653 data += ETH_GSTRING_LEN;
2654
2655 snprintf(data, ETH_GSTRING_LEN,
2656 "tx%d_dropped_packets", i);
2657 data += ETH_GSTRING_LEN;
2658 }
2659
2660 for (i = 0; i < adapter->req_rx_queues; i++) {
2661 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2662 data += ETH_GSTRING_LEN;
2663
2664 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2665 data += ETH_GSTRING_LEN;
2666
2667 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2668 data += ETH_GSTRING_LEN;
2669 }
2670 break;
2671
2672 case ETH_SS_PRIV_FLAGS:
2673 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2674 strcpy(data + i * ETH_GSTRING_LEN,
2675 ibmvnic_priv_flags[i]);
2676 break;
2677 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06002678 return;
John Allen3d52b592017-08-02 16:44:14 -05002679 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002680}
2681
2682static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2683{
John Allen3d52b592017-08-02 16:44:14 -05002684 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2685
Thomas Falcon032c5e82015-12-21 11:26:06 -06002686 switch (sset) {
2687 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002688 return ARRAY_SIZE(ibmvnic_stats) +
2689 adapter->req_tx_queues * NUM_TX_STATS +
2690 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05002691 case ETH_SS_PRIV_FLAGS:
2692 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002693 default:
2694 return -EOPNOTSUPP;
2695 }
2696}
2697
2698static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2699 struct ethtool_stats *stats, u64 *data)
2700{
2701 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2702 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002703 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002704 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002705
2706 memset(&crq, 0, sizeof(crq));
2707 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2708 crq.request_statistics.cmd = REQUEST_STATISTICS;
2709 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2710 crq.request_statistics.len =
2711 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002712
2713 /* Wait for data to be written */
Thomas Falcon070eca92019-11-25 17:12:53 -06002714 reinit_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002715 rc = ibmvnic_send_crq(adapter, &crq);
2716 if (rc)
2717 return;
Thomas Falcon476d96c2019-11-25 17:12:55 -06002718 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2719 if (rc)
2720 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002721
2722 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002723 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2724 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002725
2726 for (j = 0; j < adapter->req_tx_queues; j++) {
2727 data[i] = adapter->tx_stats_buffers[j].packets;
2728 i++;
2729 data[i] = adapter->tx_stats_buffers[j].bytes;
2730 i++;
2731 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2732 i++;
2733 }
2734
2735 for (j = 0; j < adapter->req_rx_queues; j++) {
2736 data[i] = adapter->rx_stats_buffers[j].packets;
2737 i++;
2738 data[i] = adapter->rx_stats_buffers[j].bytes;
2739 i++;
2740 data[i] = adapter->rx_stats_buffers[j].interrupts;
2741 i++;
2742 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002743}
2744
Thomas Falcon723ad912018-09-28 18:38:26 -05002745static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2746{
2747 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2748
2749 return adapter->priv_flags;
2750}
2751
2752static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2753{
2754 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2755 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2756
2757 if (which_maxes)
2758 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2759 else
2760 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2761
2762 return 0;
2763}
Thomas Falcon032c5e82015-12-21 11:26:06 -06002764static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002765 .get_drvinfo = ibmvnic_get_drvinfo,
2766 .get_msglevel = ibmvnic_get_msglevel,
2767 .set_msglevel = ibmvnic_set_msglevel,
2768 .get_link = ibmvnic_get_link,
2769 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002770 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002771 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002772 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002773 .get_strings = ibmvnic_get_strings,
2774 .get_sset_count = ibmvnic_get_sset_count,
2775 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002776 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05002777 .get_priv_flags = ibmvnic_get_priv_flags,
2778 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002779};
2780
2781/* Routines for managing CRQs/sCRQs */
2782
Nathan Fontenot57a49432017-05-26 10:31:12 -04002783static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2784 struct ibmvnic_sub_crq_queue *scrq)
2785{
2786 int rc;
2787
2788 if (scrq->irq) {
2789 free_irq(scrq->irq, scrq);
2790 irq_dispose_mapping(scrq->irq);
2791 scrq->irq = 0;
2792 }
2793
Thomas Falconc8b2ad02017-06-14 23:50:07 -05002794 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
Thomas Falcon41f71462018-04-06 18:37:03 -05002795 atomic_set(&scrq->used, 0);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002796 scrq->cur = 0;
2797
2798 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2799 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2800 return rc;
2801}
2802
2803static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2804{
2805 int i, rc;
2806
2807 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002808 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002809 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2810 if (rc)
2811 return rc;
2812 }
2813
2814 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002815 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002816 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2817 if (rc)
2818 return rc;
2819 }
2820
Nathan Fontenot57a49432017-05-26 10:31:12 -04002821 return rc;
2822}
2823
Thomas Falcon032c5e82015-12-21 11:26:06 -06002824static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002825 struct ibmvnic_sub_crq_queue *scrq,
2826 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002827{
2828 struct device *dev = &adapter->vdev->dev;
2829 long rc;
2830
2831 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2832
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002833 if (do_h_free) {
2834 /* Close the sub-crqs */
2835 do {
2836 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2837 adapter->vdev->unit_address,
2838 scrq->crq_num);
2839 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002840
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002841 if (rc) {
2842 netdev_err(adapter->netdev,
2843 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2844 scrq->crq_num, rc);
2845 }
Thomas Falconffa73852017-04-19 13:44:29 -04002846 }
2847
Thomas Falcon032c5e82015-12-21 11:26:06 -06002848 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2849 DMA_BIDIRECTIONAL);
2850 free_pages((unsigned long)scrq->msgs, 2);
2851 kfree(scrq);
2852}
2853
2854static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2855 *adapter)
2856{
2857 struct device *dev = &adapter->vdev->dev;
2858 struct ibmvnic_sub_crq_queue *scrq;
2859 int rc;
2860
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002861 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002862 if (!scrq)
2863 return NULL;
2864
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04002865 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002866 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002867 if (!scrq->msgs) {
2868 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2869 goto zero_page_failed;
2870 }
2871
2872 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2873 DMA_BIDIRECTIONAL);
2874 if (dma_mapping_error(dev, scrq->msg_token)) {
2875 dev_warn(dev, "Couldn't map crq queue messages page\n");
2876 goto map_failed;
2877 }
2878
2879 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2880 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2881
2882 if (rc == H_RESOURCE)
2883 rc = ibmvnic_reset_crq(adapter);
2884
2885 if (rc == H_CLOSED) {
2886 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2887 } else if (rc) {
2888 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2889 goto reg_failed;
2890 }
2891
Thomas Falcon032c5e82015-12-21 11:26:06 -06002892 scrq->adapter = adapter;
2893 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002894 spin_lock_init(&scrq->lock);
2895
2896 netdev_dbg(adapter->netdev,
2897 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2898 scrq->crq_num, scrq->hw_irq, scrq->irq);
2899
2900 return scrq;
2901
Thomas Falcon032c5e82015-12-21 11:26:06 -06002902reg_failed:
2903 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2904 DMA_BIDIRECTIONAL);
2905map_failed:
2906 free_pages((unsigned long)scrq->msgs, 2);
2907zero_page_failed:
2908 kfree(scrq);
2909
2910 return NULL;
2911}
2912
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002913static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002914{
2915 int i;
2916
2917 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002918 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002919 if (!adapter->tx_scrq[i])
2920 continue;
2921
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002922 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2923 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002924 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002925 free_irq(adapter->tx_scrq[i]->irq,
2926 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002927 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002928 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002929 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002930
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002931 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2932 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002933 }
2934
Nathan Fontenot9501df32017-03-15 23:38:07 -04002935 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002936 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002937 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002938 }
2939
2940 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002941 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002942 if (!adapter->rx_scrq[i])
2943 continue;
2944
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002945 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2946 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002947 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002948 free_irq(adapter->rx_scrq[i]->irq,
2949 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002950 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002951 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002952 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002953
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002954 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2955 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002956 }
2957
Nathan Fontenot9501df32017-03-15 23:38:07 -04002958 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002959 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002960 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002961 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002962}
2963
2964static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2965 struct ibmvnic_sub_crq_queue *scrq)
2966{
2967 struct device *dev = &adapter->vdev->dev;
2968 unsigned long rc;
2969
2970 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2971 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2972 if (rc)
2973 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2974 scrq->hw_irq, rc);
2975 return rc;
2976}
2977
2978static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2979 struct ibmvnic_sub_crq_queue *scrq)
2980{
2981 struct device *dev = &adapter->vdev->dev;
2982 unsigned long rc;
2983
2984 if (scrq->hw_irq > 0x100000000ULL) {
2985 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2986 return 1;
2987 }
2988
Juliet Kim7ed5b312019-09-20 16:11:23 -04002989 if (test_bit(0, &adapter->resetting) &&
Nathan Fontenot73f9d362018-05-22 11:21:10 -05002990 adapter->reset_reason == VNIC_RESET_MOBILITY) {
Juliet Kim284f87d2019-11-20 10:50:03 -05002991 u64 val = (0xff000000) | scrq->hw_irq;
Nathan Fontenot73f9d362018-05-22 11:21:10 -05002992
Juliet Kim284f87d2019-11-20 10:50:03 -05002993 rc = plpar_hcall_norets(H_EOI, val);
Juliet Kim2df5c602019-11-20 10:50:04 -05002994 /* H_EOI would fail with rc = H_FUNCTION when running
2995 * in XIVE mode which is expected, but not an error.
2996 */
2997 if (rc && (rc != H_FUNCTION))
Juliet Kim284f87d2019-11-20 10:50:03 -05002998 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2999 val, rc);
Nathan Fontenot73f9d362018-05-22 11:21:10 -05003000 }
Thomas Falconf23e0642018-04-15 18:53:36 -05003001
Thomas Falcon032c5e82015-12-21 11:26:06 -06003002 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3003 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3004 if (rc)
3005 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3006 scrq->hw_irq, rc);
3007 return rc;
3008}
3009
3010static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3011 struct ibmvnic_sub_crq_queue *scrq)
3012{
3013 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05003014 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003015 struct ibmvnic_tx_buff *txbuff;
3016 union sub_crq *next;
3017 int index;
3018 int i, j;
3019
3020restart_loop:
3021 while (pending_scrq(adapter, scrq)) {
3022 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06003023 int num_entries = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003024
3025 next = ibmvnic_next_scrq(adapter, scrq);
3026 for (i = 0; i < next->tx_comp.num_comps; i++) {
3027 if (next->tx_comp.rcs[i]) {
3028 dev_err(dev, "tx error %x\n",
3029 next->tx_comp.rcs[i]);
3030 continue;
3031 }
3032 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05003033 if (index & IBMVNIC_TSO_POOL_MASK) {
3034 tx_pool = &adapter->tso_pool[pool];
3035 index &= ~IBMVNIC_TSO_POOL_MASK;
3036 } else {
3037 tx_pool = &adapter->tx_pool[pool];
3038 }
3039
3040 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon032c5e82015-12-21 11:26:06 -06003041
3042 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
3043 if (!txbuff->data_dma[j])
3044 continue;
3045
3046 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003047 }
3048
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003049 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003050 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003051 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06003052 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003053
Thomas Falconffc385b2018-02-18 10:08:41 -06003054 num_entries += txbuff->num_entries;
3055
Thomas Falcon06b3e352018-03-16 20:00:28 -05003056 tx_pool->free_map[tx_pool->producer_index] = index;
3057 tx_pool->producer_index =
3058 (tx_pool->producer_index + 1) %
3059 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003060 }
3061 /* remove tx_comp scrq*/
3062 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003063
Thomas Falconffc385b2018-02-18 10:08:41 -06003064 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003065 (adapter->req_tx_entries_per_subcrq / 2) &&
3066 __netif_subqueue_stopped(adapter->netdev,
3067 scrq->pool_index)) {
3068 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06003069 netdev_dbg(adapter->netdev, "Started queue %d\n",
3070 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04003071 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003072 }
3073
3074 enable_scrq_irq(adapter, scrq);
3075
3076 if (pending_scrq(adapter, scrq)) {
3077 disable_scrq_irq(adapter, scrq);
3078 goto restart_loop;
3079 }
3080
3081 return 0;
3082}
3083
3084static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3085{
3086 struct ibmvnic_sub_crq_queue *scrq = instance;
3087 struct ibmvnic_adapter *adapter = scrq->adapter;
3088
3089 disable_scrq_irq(adapter, scrq);
3090 ibmvnic_complete_tx(adapter, scrq);
3091
3092 return IRQ_HANDLED;
3093}
3094
3095static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3096{
3097 struct ibmvnic_sub_crq_queue *scrq = instance;
3098 struct ibmvnic_adapter *adapter = scrq->adapter;
3099
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06003100 /* When booting a kdump kernel we can hit pending interrupts
3101 * prior to completing driver initialization.
3102 */
3103 if (unlikely(adapter->state != VNIC_OPEN))
3104 return IRQ_NONE;
3105
John Allen3d52b592017-08-02 16:44:14 -05003106 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3107
Thomas Falcon032c5e82015-12-21 11:26:06 -06003108 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3109 disable_scrq_irq(adapter, scrq);
3110 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3111 }
3112
3113 return IRQ_HANDLED;
3114}
3115
Thomas Falconea22d512016-07-06 15:35:17 -05003116static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3117{
3118 struct device *dev = &adapter->vdev->dev;
3119 struct ibmvnic_sub_crq_queue *scrq;
3120 int i = 0, j = 0;
3121 int rc = 0;
3122
3123 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003124 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3125 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003126 scrq = adapter->tx_scrq[i];
3127 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3128
Michael Ellerman99c17902016-09-10 19:59:05 +10003129 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003130 rc = -EINVAL;
3131 dev_err(dev, "Error mapping irq\n");
3132 goto req_tx_irq_failed;
3133 }
3134
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003135 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3136 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003137 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003138 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003139
3140 if (rc) {
3141 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3142 scrq->irq, rc);
3143 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06003144 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05003145 }
3146 }
3147
3148 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05003149 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3150 i);
Thomas Falconea22d512016-07-06 15:35:17 -05003151 scrq = adapter->rx_scrq[i];
3152 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10003153 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05003154 rc = -EINVAL;
3155 dev_err(dev, "Error mapping irq\n");
3156 goto req_rx_irq_failed;
3157 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003158 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3159 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05003160 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03003161 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05003162 if (rc) {
3163 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3164 scrq->irq, rc);
3165 irq_dispose_mapping(scrq->irq);
3166 goto req_rx_irq_failed;
3167 }
3168 }
3169 return rc;
3170
3171req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003172 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003173 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3174 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003175 }
Thomas Falconea22d512016-07-06 15:35:17 -05003176 i = adapter->req_tx_queues;
3177req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003178 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05003179 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3180 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05003181 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003182 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05003183 return rc;
3184}
3185
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003186static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003187{
3188 struct device *dev = &adapter->vdev->dev;
3189 struct ibmvnic_sub_crq_queue **allqueues;
3190 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003191 int total_queues;
3192 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05003193 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003194
Thomas Falcon032c5e82015-12-21 11:26:06 -06003195 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3196
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003197 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003198 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003199 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003200
3201 for (i = 0; i < total_queues; i++) {
3202 allqueues[i] = init_sub_crq_queue(adapter);
3203 if (!allqueues[i]) {
3204 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3205 break;
3206 }
3207 registered_queues++;
3208 }
3209
3210 /* Make sure we were able to register the minimum number of queues */
3211 if (registered_queues <
3212 adapter->min_tx_queues + adapter->min_rx_queues) {
3213 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3214 goto tx_failed;
3215 }
3216
3217 /* Distribute the failed allocated queues*/
3218 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3219 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3220 switch (i % 3) {
3221 case 0:
3222 if (adapter->req_rx_queues > adapter->min_rx_queues)
3223 adapter->req_rx_queues--;
3224 else
3225 more++;
3226 break;
3227 case 1:
3228 if (adapter->req_tx_queues > adapter->min_tx_queues)
3229 adapter->req_tx_queues--;
3230 else
3231 more++;
3232 break;
3233 }
3234 }
3235
3236 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003237 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003238 if (!adapter->tx_scrq)
3239 goto tx_failed;
3240
3241 for (i = 0; i < adapter->req_tx_queues; i++) {
3242 adapter->tx_scrq[i] = allqueues[i];
3243 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003244 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003245 }
3246
3247 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003248 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003249 if (!adapter->rx_scrq)
3250 goto rx_failed;
3251
3252 for (i = 0; i < adapter->req_rx_queues; i++) {
3253 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3254 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003255 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003256 }
3257
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003258 kfree(allqueues);
3259 return 0;
3260
3261rx_failed:
3262 kfree(adapter->tx_scrq);
3263 adapter->tx_scrq = NULL;
3264tx_failed:
3265 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003266 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003267 kfree(allqueues);
3268 return -1;
3269}
3270
3271static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3272{
3273 struct device *dev = &adapter->vdev->dev;
3274 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003275 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003276
3277 if (!retry) {
3278 /* Sub-CRQ entries are 32 byte long */
3279 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3280
3281 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3282 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3283 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3284 return;
3285 }
3286
John Allenc26eba02017-10-26 16:23:25 -05003287 if (adapter->desired.mtu)
3288 adapter->req_mtu = adapter->desired.mtu;
3289 else
3290 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003291
John Allenc26eba02017-10-26 16:23:25 -05003292 if (!adapter->desired.tx_entries)
3293 adapter->desired.tx_entries =
3294 adapter->max_tx_entries_per_subcrq;
3295 if (!adapter->desired.rx_entries)
3296 adapter->desired.rx_entries =
3297 adapter->max_rx_add_entries_per_subcrq;
3298
3299 max_entries = IBMVNIC_MAX_LTB_SIZE /
3300 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3301
3302 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3303 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3304 adapter->desired.tx_entries = max_entries;
3305 }
3306
3307 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3308 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3309 adapter->desired.rx_entries = max_entries;
3310 }
3311
3312 if (adapter->desired.tx_entries)
3313 adapter->req_tx_entries_per_subcrq =
3314 adapter->desired.tx_entries;
3315 else
3316 adapter->req_tx_entries_per_subcrq =
3317 adapter->max_tx_entries_per_subcrq;
3318
3319 if (adapter->desired.rx_entries)
3320 adapter->req_rx_add_entries_per_subcrq =
3321 adapter->desired.rx_entries;
3322 else
3323 adapter->req_rx_add_entries_per_subcrq =
3324 adapter->max_rx_add_entries_per_subcrq;
3325
3326 if (adapter->desired.tx_queues)
3327 adapter->req_tx_queues =
3328 adapter->desired.tx_queues;
3329 else
3330 adapter->req_tx_queues =
3331 adapter->opt_tx_comp_sub_queues;
3332
3333 if (adapter->desired.rx_queues)
3334 adapter->req_rx_queues =
3335 adapter->desired.rx_queues;
3336 else
3337 adapter->req_rx_queues =
3338 adapter->opt_rx_comp_queues;
3339
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003340 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003341 }
3342
Thomas Falcon032c5e82015-12-21 11:26:06 -06003343 memset(&crq, 0, sizeof(crq));
3344 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3345 crq.request_capability.cmd = REQUEST_CAPABILITY;
3346
3347 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003348 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003349 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003350 ibmvnic_send_crq(adapter, &crq);
3351
3352 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003353 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003354 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003355 ibmvnic_send_crq(adapter, &crq);
3356
3357 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003358 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003359 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003360 ibmvnic_send_crq(adapter, &crq);
3361
3362 crq.request_capability.capability =
3363 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3364 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003365 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003366 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003367 ibmvnic_send_crq(adapter, &crq);
3368
3369 crq.request_capability.capability =
3370 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3371 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003372 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003373 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003374 ibmvnic_send_crq(adapter, &crq);
3375
3376 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003377 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003378 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003379 ibmvnic_send_crq(adapter, &crq);
3380
3381 if (adapter->netdev->flags & IFF_PROMISC) {
3382 if (adapter->promisc_supported) {
3383 crq.request_capability.capability =
3384 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003385 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003386 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003387 ibmvnic_send_crq(adapter, &crq);
3388 }
3389 } else {
3390 crq.request_capability.capability =
3391 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003392 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003393 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003394 ibmvnic_send_crq(adapter, &crq);
3395 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003396}
3397
3398static int pending_scrq(struct ibmvnic_adapter *adapter,
3399 struct ibmvnic_sub_crq_queue *scrq)
3400{
3401 union sub_crq *entry = &scrq->msgs[scrq->cur];
3402
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05003403 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003404 return 1;
3405 else
3406 return 0;
3407}
3408
3409static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3410 struct ibmvnic_sub_crq_queue *scrq)
3411{
3412 union sub_crq *entry;
3413 unsigned long flags;
3414
3415 spin_lock_irqsave(&scrq->lock, flags);
3416 entry = &scrq->msgs[scrq->cur];
3417 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3418 if (++scrq->cur == scrq->size)
3419 scrq->cur = 0;
3420 } else {
3421 entry = NULL;
3422 }
3423 spin_unlock_irqrestore(&scrq->lock, flags);
3424
3425 return entry;
3426}
3427
3428static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3429{
3430 struct ibmvnic_crq_queue *queue = &adapter->crq;
3431 union ibmvnic_crq *crq;
3432
3433 crq = &queue->msgs[queue->cur];
3434 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3435 if (++queue->cur == queue->size)
3436 queue->cur = 0;
3437 } else {
3438 crq = NULL;
3439 }
3440
3441 return crq;
3442}
3443
Thomas Falcon2d14d372018-07-13 12:03:32 -05003444static void print_subcrq_error(struct device *dev, int rc, const char *func)
3445{
3446 switch (rc) {
3447 case H_PARAMETER:
3448 dev_warn_ratelimited(dev,
3449 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3450 func, rc);
3451 break;
3452 case H_CLOSED:
3453 dev_warn_ratelimited(dev,
3454 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3455 func, rc);
3456 break;
3457 default:
3458 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3459 break;
3460 }
3461}
3462
Thomas Falcon032c5e82015-12-21 11:26:06 -06003463static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3464 union sub_crq *sub_crq)
3465{
3466 unsigned int ua = adapter->vdev->unit_address;
3467 struct device *dev = &adapter->vdev->dev;
3468 u64 *u64_crq = (u64 *)sub_crq;
3469 int rc;
3470
3471 netdev_dbg(adapter->netdev,
3472 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3473 (unsigned long int)cpu_to_be64(remote_handle),
3474 (unsigned long int)cpu_to_be64(u64_crq[0]),
3475 (unsigned long int)cpu_to_be64(u64_crq[1]),
3476 (unsigned long int)cpu_to_be64(u64_crq[2]),
3477 (unsigned long int)cpu_to_be64(u64_crq[3]));
3478
3479 /* Make sure the hypervisor sees the complete request */
3480 mb();
3481
3482 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3483 cpu_to_be64(remote_handle),
3484 cpu_to_be64(u64_crq[0]),
3485 cpu_to_be64(u64_crq[1]),
3486 cpu_to_be64(u64_crq[2]),
3487 cpu_to_be64(u64_crq[3]));
3488
Thomas Falcon2d14d372018-07-13 12:03:32 -05003489 if (rc)
3490 print_subcrq_error(dev, rc, __func__);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003491
3492 return rc;
3493}
3494
Thomas Falconad7775d2016-04-01 17:20:34 -05003495static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3496 u64 remote_handle, u64 ioba, u64 num_entries)
3497{
3498 unsigned int ua = adapter->vdev->unit_address;
3499 struct device *dev = &adapter->vdev->dev;
3500 int rc;
3501
3502 /* Make sure the hypervisor sees the complete request */
3503 mb();
3504 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3505 cpu_to_be64(remote_handle),
3506 ioba, num_entries);
3507
Thomas Falcon2d14d372018-07-13 12:03:32 -05003508 if (rc)
3509 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003510
3511 return rc;
3512}
3513
Thomas Falcon032c5e82015-12-21 11:26:06 -06003514static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3515 union ibmvnic_crq *crq)
3516{
3517 unsigned int ua = adapter->vdev->unit_address;
3518 struct device *dev = &adapter->vdev->dev;
3519 u64 *u64_crq = (u64 *)crq;
3520 int rc;
3521
3522 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3523 (unsigned long int)cpu_to_be64(u64_crq[0]),
3524 (unsigned long int)cpu_to_be64(u64_crq[1]));
3525
Thomas Falcon51536982018-05-23 13:37:56 -05003526 if (!adapter->crq.active &&
3527 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3528 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3529 return -EINVAL;
3530 }
3531
Thomas Falcon032c5e82015-12-21 11:26:06 -06003532 /* Make sure the hypervisor sees the complete request */
3533 mb();
3534
3535 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3536 cpu_to_be64(u64_crq[0]),
3537 cpu_to_be64(u64_crq[1]));
3538
3539 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003540 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003541 dev_warn(dev, "CRQ Queue closed\n");
Juliet Kim7ed5b312019-09-20 16:11:23 -04003542 if (test_bit(0, &adapter->resetting))
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003543 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3544 }
3545
Thomas Falcon032c5e82015-12-21 11:26:06 -06003546 dev_warn(dev, "Send error (rc=%d)\n", rc);
3547 }
3548
3549 return rc;
3550}
3551
3552static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3553{
3554 union ibmvnic_crq crq;
3555
3556 memset(&crq, 0, sizeof(crq));
3557 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3558 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3559 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3560
3561 return ibmvnic_send_crq(adapter, &crq);
3562}
3563
Thomas Falcon032c5e82015-12-21 11:26:06 -06003564static int send_version_xchg(struct ibmvnic_adapter *adapter)
3565{
3566 union ibmvnic_crq crq;
3567
3568 memset(&crq, 0, sizeof(crq));
3569 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3570 crq.version_exchange.cmd = VERSION_EXCHANGE;
3571 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3572
3573 return ibmvnic_send_crq(adapter, &crq);
3574}
3575
Nathan Fontenot37798d02017-11-08 11:23:56 -06003576struct vnic_login_client_data {
3577 u8 type;
3578 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003579 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003580} __packed;
3581
3582static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3583{
3584 int len;
3585
3586 /* Calculate the amount of buffer space needed for the
3587 * vnic client data in the login buffer. There are four entries,
3588 * OS name, LPAR name, device name, and a null last entry.
3589 */
3590 len = 4 * sizeof(struct vnic_login_client_data);
3591 len += 6; /* "Linux" plus NULL */
3592 len += strlen(utsname()->nodename) + 1;
3593 len += strlen(adapter->netdev->name) + 1;
3594
3595 return len;
3596}
3597
3598static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3599 struct vnic_login_client_data *vlcd)
3600{
3601 const char *os_name = "Linux";
3602 int len;
3603
3604 /* Type 1 - LPAR OS */
3605 vlcd->type = 1;
3606 len = strlen(os_name) + 1;
3607 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003608 strncpy(vlcd->name, os_name, len);
3609 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003610
3611 /* Type 2 - LPAR name */
3612 vlcd->type = 2;
3613 len = strlen(utsname()->nodename) + 1;
3614 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003615 strncpy(vlcd->name, utsname()->nodename, len);
3616 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003617
3618 /* Type 3 - device name */
3619 vlcd->type = 3;
3620 len = strlen(adapter->netdev->name) + 1;
3621 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003622 strncpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003623}
3624
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003625static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003626{
3627 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3628 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003629 struct device *dev = &adapter->vdev->dev;
3630 dma_addr_t rsp_buffer_token;
3631 dma_addr_t buffer_token;
3632 size_t rsp_buffer_size;
3633 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003634 size_t buffer_size;
3635 __be64 *tx_list_p;
3636 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003637 int client_data_len;
3638 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003639 int i;
3640
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003641 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3642 netdev_err(adapter->netdev,
3643 "RX or TX queues are not allocated, device login failed\n");
3644 return -1;
3645 }
3646
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003647 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003648 client_data_len = vnic_client_data_len(adapter);
3649
Thomas Falcon032c5e82015-12-21 11:26:06 -06003650 buffer_size =
3651 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003652 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3653 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003654
Nathan Fontenot37798d02017-11-08 11:23:56 -06003655 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003656 if (!login_buffer)
3657 goto buf_alloc_failed;
3658
3659 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3660 DMA_TO_DEVICE);
3661 if (dma_mapping_error(dev, buffer_token)) {
3662 dev_err(dev, "Couldn't map login buffer\n");
3663 goto buf_map_failed;
3664 }
3665
John Allen498cd8e2016-04-06 11:49:55 -05003666 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3667 sizeof(u64) * adapter->req_tx_queues +
3668 sizeof(u64) * adapter->req_rx_queues +
3669 sizeof(u64) * adapter->req_rx_queues +
3670 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003671
3672 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3673 if (!login_rsp_buffer)
3674 goto buf_rsp_alloc_failed;
3675
3676 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3677 rsp_buffer_size, DMA_FROM_DEVICE);
3678 if (dma_mapping_error(dev, rsp_buffer_token)) {
3679 dev_err(dev, "Couldn't map login rsp buffer\n");
3680 goto buf_rsp_map_failed;
3681 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003682
Thomas Falcon032c5e82015-12-21 11:26:06 -06003683 adapter->login_buf = login_buffer;
3684 adapter->login_buf_token = buffer_token;
3685 adapter->login_buf_sz = buffer_size;
3686 adapter->login_rsp_buf = login_rsp_buffer;
3687 adapter->login_rsp_buf_token = rsp_buffer_token;
3688 adapter->login_rsp_buf_sz = rsp_buffer_size;
3689
3690 login_buffer->len = cpu_to_be32(buffer_size);
3691 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3692 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3693 login_buffer->off_txcomp_subcrqs =
3694 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3695 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3696 login_buffer->off_rxcomp_subcrqs =
3697 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3698 sizeof(u64) * adapter->req_tx_queues);
3699 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3700 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3701
3702 tx_list_p = (__be64 *)((char *)login_buffer +
3703 sizeof(struct ibmvnic_login_buffer));
3704 rx_list_p = (__be64 *)((char *)login_buffer +
3705 sizeof(struct ibmvnic_login_buffer) +
3706 sizeof(u64) * adapter->req_tx_queues);
3707
3708 for (i = 0; i < adapter->req_tx_queues; i++) {
3709 if (adapter->tx_scrq[i]) {
3710 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3711 crq_num);
3712 }
3713 }
3714
3715 for (i = 0; i < adapter->req_rx_queues; i++) {
3716 if (adapter->rx_scrq[i]) {
3717 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3718 crq_num);
3719 }
3720 }
3721
Nathan Fontenot37798d02017-11-08 11:23:56 -06003722 /* Insert vNIC login client data */
3723 vlcd = (struct vnic_login_client_data *)
3724 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3725 login_buffer->client_data_offset =
3726 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3727 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3728
3729 vnic_add_client_data(adapter, vlcd);
3730
Thomas Falcon032c5e82015-12-21 11:26:06 -06003731 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3732 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3733 netdev_dbg(adapter->netdev, "%016lx\n",
3734 ((unsigned long int *)(adapter->login_buf))[i]);
3735 }
3736
3737 memset(&crq, 0, sizeof(crq));
3738 crq.login.first = IBMVNIC_CRQ_CMD;
3739 crq.login.cmd = LOGIN;
3740 crq.login.ioba = cpu_to_be32(buffer_token);
3741 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003742 ibmvnic_send_crq(adapter, &crq);
3743
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003744 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003745
Thomas Falcon032c5e82015-12-21 11:26:06 -06003746buf_rsp_map_failed:
3747 kfree(login_rsp_buffer);
3748buf_rsp_alloc_failed:
3749 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3750buf_map_failed:
3751 kfree(login_buffer);
3752buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003753 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003754}
3755
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003756static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3757 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003758{
3759 union ibmvnic_crq crq;
3760
3761 memset(&crq, 0, sizeof(crq));
3762 crq.request_map.first = IBMVNIC_CRQ_CMD;
3763 crq.request_map.cmd = REQUEST_MAP;
3764 crq.request_map.map_id = map_id;
3765 crq.request_map.ioba = cpu_to_be32(addr);
3766 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003767 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003768}
3769
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003770static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003771{
3772 union ibmvnic_crq crq;
3773
3774 memset(&crq, 0, sizeof(crq));
3775 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3776 crq.request_unmap.cmd = REQUEST_UNMAP;
3777 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003778 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003779}
3780
3781static void send_map_query(struct ibmvnic_adapter *adapter)
3782{
3783 union ibmvnic_crq crq;
3784
3785 memset(&crq, 0, sizeof(crq));
3786 crq.query_map.first = IBMVNIC_CRQ_CMD;
3787 crq.query_map.cmd = QUERY_MAP;
3788 ibmvnic_send_crq(adapter, &crq);
3789}
3790
3791/* Send a series of CRQs requesting various capabilities of the VNIC server */
3792static void send_cap_queries(struct ibmvnic_adapter *adapter)
3793{
3794 union ibmvnic_crq crq;
3795
Thomas Falcon901e0402017-02-15 12:17:59 -06003796 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003797 memset(&crq, 0, sizeof(crq));
3798 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3799 crq.query_capability.cmd = QUERY_CAPABILITY;
3800
3801 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003802 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003803 ibmvnic_send_crq(adapter, &crq);
3804
3805 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003806 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003807 ibmvnic_send_crq(adapter, &crq);
3808
3809 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003810 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003811 ibmvnic_send_crq(adapter, &crq);
3812
3813 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003814 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003815 ibmvnic_send_crq(adapter, &crq);
3816
3817 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003818 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003819 ibmvnic_send_crq(adapter, &crq);
3820
3821 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003822 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003823 ibmvnic_send_crq(adapter, &crq);
3824
3825 crq.query_capability.capability =
3826 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003827 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003828 ibmvnic_send_crq(adapter, &crq);
3829
3830 crq.query_capability.capability =
3831 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003832 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003833 ibmvnic_send_crq(adapter, &crq);
3834
3835 crq.query_capability.capability =
3836 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003837 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003838 ibmvnic_send_crq(adapter, &crq);
3839
3840 crq.query_capability.capability =
3841 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003842 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003843 ibmvnic_send_crq(adapter, &crq);
3844
3845 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06003846 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003847 ibmvnic_send_crq(adapter, &crq);
3848
3849 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003850 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003851 ibmvnic_send_crq(adapter, &crq);
3852
3853 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003854 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003855 ibmvnic_send_crq(adapter, &crq);
3856
3857 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003858 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003859 ibmvnic_send_crq(adapter, &crq);
3860
3861 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06003862 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003863 ibmvnic_send_crq(adapter, &crq);
3864
3865 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06003866 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003867 ibmvnic_send_crq(adapter, &crq);
3868
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003869 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3870 atomic_inc(&adapter->running_cap_crqs);
3871 ibmvnic_send_crq(adapter, &crq);
3872
Thomas Falcon032c5e82015-12-21 11:26:06 -06003873 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003874 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003875 ibmvnic_send_crq(adapter, &crq);
3876
3877 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003878 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003879 ibmvnic_send_crq(adapter, &crq);
3880
3881 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003882 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003883 ibmvnic_send_crq(adapter, &crq);
3884
3885 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003886 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003887 ibmvnic_send_crq(adapter, &crq);
3888
3889 crq.query_capability.capability =
3890 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06003891 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003892 ibmvnic_send_crq(adapter, &crq);
3893
3894 crq.query_capability.capability =
3895 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003896 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003897 ibmvnic_send_crq(adapter, &crq);
3898
3899 crq.query_capability.capability =
3900 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003901 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003902 ibmvnic_send_crq(adapter, &crq);
3903
3904 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003905 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003906 ibmvnic_send_crq(adapter, &crq);
3907}
3908
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003909static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3910 struct ibmvnic_adapter *adapter)
3911{
3912 struct device *dev = &adapter->vdev->dev;
3913
3914 if (crq->get_vpd_size_rsp.rc.code) {
3915 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3916 crq->get_vpd_size_rsp.rc.code);
3917 complete(&adapter->fw_done);
3918 return;
3919 }
3920
3921 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3922 complete(&adapter->fw_done);
3923}
3924
3925static void handle_vpd_rsp(union ibmvnic_crq *crq,
3926 struct ibmvnic_adapter *adapter)
3927{
3928 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003929 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003930 u8 fw_level_len = 0;
3931
3932 memset(adapter->fw_version, 0, 32);
3933
3934 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3935 DMA_FROM_DEVICE);
3936
3937 if (crq->get_vpd_rsp.rc.code) {
3938 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3939 crq->get_vpd_rsp.rc.code);
3940 goto complete;
3941 }
3942
3943 /* get the position of the firmware version info
3944 * located after the ASCII 'RM' substring in the buffer
3945 */
3946 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3947 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02003948 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003949 goto complete;
3950 }
3951
3952 /* get length of firmware level ASCII substring */
3953 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3954 fw_level_len = *(substr + 2);
3955 } else {
3956 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3957 goto complete;
3958 }
3959
3960 /* copy firmware version string from vpd into adapter */
3961 if ((substr + 3 + fw_level_len) <
3962 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003963 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003964 } else {
3965 dev_info(dev, "FW substr extrapolated VPD buff\n");
3966 }
3967
3968complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003969 if (adapter->fw_version[0] == '\0')
3970 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003971 complete(&adapter->fw_done);
3972}
3973
Thomas Falcon032c5e82015-12-21 11:26:06 -06003974static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3975{
3976 struct device *dev = &adapter->vdev->dev;
3977 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcondde746a2019-04-10 11:07:00 -05003978 netdev_features_t old_hw_features = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003979 union ibmvnic_crq crq;
3980 int i;
3981
3982 dma_unmap_single(dev, adapter->ip_offload_tok,
3983 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3984
3985 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3986 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3987 netdev_dbg(adapter->netdev, "%016lx\n",
3988 ((unsigned long int *)(buf))[i]);
3989
3990 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3991 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3992 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3993 buf->tcp_ipv4_chksum);
3994 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3995 buf->tcp_ipv6_chksum);
3996 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3997 buf->udp_ipv4_chksum);
3998 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3999 buf->udp_ipv6_chksum);
4000 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4001 buf->large_tx_ipv4);
4002 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4003 buf->large_tx_ipv6);
4004 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4005 buf->large_rx_ipv4);
4006 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4007 buf->large_rx_ipv6);
4008 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4009 buf->max_ipv4_header_size);
4010 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4011 buf->max_ipv6_header_size);
4012 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4013 buf->max_tcp_header_size);
4014 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4015 buf->max_udp_header_size);
4016 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4017 buf->max_large_tx_size);
4018 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4019 buf->max_large_rx_size);
4020 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4021 buf->ipv6_extension_header);
4022 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4023 buf->tcp_pseudosum_req);
4024 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4025 buf->num_ipv6_ext_headers);
4026 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4027 buf->off_ipv6_ext_headers);
4028
4029 adapter->ip_offload_ctrl_tok =
4030 dma_map_single(dev, &adapter->ip_offload_ctrl,
4031 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
4032
4033 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4034 dev_err(dev, "Couldn't map ip offload control buffer\n");
4035 return;
4036 }
4037
Thomas Falconf6897942018-01-18 19:05:01 -06004038 adapter->ip_offload_ctrl.len =
4039 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004040 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
Thomas Falconf6897942018-01-18 19:05:01 -06004041 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
4042 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004043 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4044 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
4045 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4046 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
Thomas Falconfdb06102017-10-17 12:36:55 -05004047 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
4048 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004049
Thomas Falconfdb06102017-10-17 12:36:55 -05004050 /* large_rx disabled for now, additional features needed */
Thomas Falcon032c5e82015-12-21 11:26:06 -06004051 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
4052 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
4053
Thomas Falcondde746a2019-04-10 11:07:00 -05004054 if (adapter->state != VNIC_PROBING) {
4055 old_hw_features = adapter->netdev->hw_features;
4056 adapter->netdev->hw_features = 0;
4057 }
4058
Thomas Falconb66b7bd2019-04-10 11:06:59 -05004059 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004060
4061 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
Thomas Falcondde746a2019-04-10 11:07:00 -05004062 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004063
4064 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
Thomas Falcondde746a2019-04-10 11:07:00 -05004065 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004066
Thomas Falcon9be02cd2016-04-01 17:20:35 -05004067 if ((adapter->netdev->features &
4068 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
Thomas Falcondde746a2019-04-10 11:07:00 -05004069 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
Thomas Falcon9be02cd2016-04-01 17:20:35 -05004070
Thomas Falconfdb06102017-10-17 12:36:55 -05004071 if (buf->large_tx_ipv4)
Thomas Falcondde746a2019-04-10 11:07:00 -05004072 adapter->netdev->hw_features |= NETIF_F_TSO;
Thomas Falconfdb06102017-10-17 12:36:55 -05004073 if (buf->large_tx_ipv6)
Thomas Falcondde746a2019-04-10 11:07:00 -05004074 adapter->netdev->hw_features |= NETIF_F_TSO6;
Thomas Falconfdb06102017-10-17 12:36:55 -05004075
Thomas Falcondde746a2019-04-10 11:07:00 -05004076 if (adapter->state == VNIC_PROBING) {
4077 adapter->netdev->features |= adapter->netdev->hw_features;
4078 } else if (old_hw_features != adapter->netdev->hw_features) {
4079 netdev_features_t tmp = 0;
4080
4081 /* disable features no longer supported */
4082 adapter->netdev->features &= adapter->netdev->hw_features;
4083 /* turn on features now supported if previously enabled */
4084 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4085 adapter->netdev->hw_features;
4086 adapter->netdev->features |=
4087 tmp & adapter->netdev->wanted_features;
4088 }
Thomas Falconaa0bf852017-10-17 12:36:56 -05004089
Thomas Falcon032c5e82015-12-21 11:26:06 -06004090 memset(&crq, 0, sizeof(crq));
4091 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4092 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4093 crq.control_ip_offload.len =
4094 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4095 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4096 ibmvnic_send_crq(adapter, &crq);
4097}
4098
Thomas Falconc9008d32018-08-06 21:39:59 -05004099static const char *ibmvnic_fw_err_cause(u16 cause)
4100{
4101 switch (cause) {
4102 case ADAPTER_PROBLEM:
4103 return "adapter problem";
4104 case BUS_PROBLEM:
4105 return "bus problem";
4106 case FW_PROBLEM:
4107 return "firmware problem";
4108 case DD_PROBLEM:
4109 return "device driver problem";
4110 case EEH_RECOVERY:
4111 return "EEH recovery";
4112 case FW_UPDATED:
4113 return "firmware updated";
4114 case LOW_MEMORY:
4115 return "low Memory";
4116 default:
4117 return "unknown";
4118 }
4119}
4120
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004121static void handle_error_indication(union ibmvnic_crq *crq,
4122 struct ibmvnic_adapter *adapter)
4123{
4124 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05004125 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004126
Thomas Falconc9008d32018-08-06 21:39:59 -05004127 cause = be16_to_cpu(crq->error_indication.error_cause);
4128
4129 dev_warn_ratelimited(dev,
4130 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4131 crq->error_indication.flags
4132 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4133 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04004134
Nathan Fontenoted651a12017-05-03 14:04:38 -04004135 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4136 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04004137 else
4138 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004139}
4140
Thomas Falconf8136142018-01-29 13:45:05 -06004141static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4142 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004143{
4144 struct net_device *netdev = adapter->netdev;
4145 struct device *dev = &adapter->vdev->dev;
4146 long rc;
4147
4148 rc = crq->change_mac_addr_rsp.rc.code;
4149 if (rc) {
4150 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06004151 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004152 }
Thomas Falcon62740e92019-05-09 23:13:43 -05004153 ether_addr_copy(netdev->dev_addr,
4154 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06004155out:
4156 complete(&adapter->fw_done);
4157 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004158}
4159
4160static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4161 struct ibmvnic_adapter *adapter)
4162{
4163 struct device *dev = &adapter->vdev->dev;
4164 u64 *req_value;
4165 char *name;
4166
Thomas Falcon901e0402017-02-15 12:17:59 -06004167 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004168 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4169 case REQ_TX_QUEUES:
4170 req_value = &adapter->req_tx_queues;
4171 name = "tx";
4172 break;
4173 case REQ_RX_QUEUES:
4174 req_value = &adapter->req_rx_queues;
4175 name = "rx";
4176 break;
4177 case REQ_RX_ADD_QUEUES:
4178 req_value = &adapter->req_rx_add_queues;
4179 name = "rx_add";
4180 break;
4181 case REQ_TX_ENTRIES_PER_SUBCRQ:
4182 req_value = &adapter->req_tx_entries_per_subcrq;
4183 name = "tx_entries_per_subcrq";
4184 break;
4185 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4186 req_value = &adapter->req_rx_add_entries_per_subcrq;
4187 name = "rx_add_entries_per_subcrq";
4188 break;
4189 case REQ_MTU:
4190 req_value = &adapter->req_mtu;
4191 name = "mtu";
4192 break;
4193 case PROMISC_REQUESTED:
4194 req_value = &adapter->promisc;
4195 name = "promisc";
4196 break;
4197 default:
4198 dev_err(dev, "Got invalid cap request rsp %d\n",
4199 crq->request_capability.capability);
4200 return;
4201 }
4202
4203 switch (crq->request_capability_rsp.rc.code) {
4204 case SUCCESS:
4205 break;
4206 case PARTIALSUCCESS:
4207 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4208 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06004209 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06004210 number), name);
John Allene7913802018-01-18 16:27:12 -06004211
4212 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4213 REQ_MTU) {
4214 pr_err("mtu of %llu is not supported. Reverting.\n",
4215 *req_value);
4216 *req_value = adapter->fallback.mtu;
4217 } else {
4218 *req_value =
4219 be64_to_cpu(crq->request_capability_rsp.number);
4220 }
4221
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04004222 ibmvnic_send_req_caps(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004223 return;
4224 default:
4225 dev_err(dev, "Error %d in request cap rsp\n",
4226 crq->request_capability_rsp.rc.code);
4227 return;
4228 }
4229
4230 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004231 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06004232 union ibmvnic_crq newcrq;
4233 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4234 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4235 &adapter->ip_offload_buf;
4236
Thomas Falcon249168a2017-02-15 12:18:00 -06004237 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004238 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4239 buf_sz,
4240 DMA_FROM_DEVICE);
4241
4242 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4243 if (!firmware_has_feature(FW_FEATURE_CMO))
4244 dev_err(dev, "Couldn't map offload buffer\n");
4245 return;
4246 }
4247
4248 memset(&newcrq, 0, sizeof(newcrq));
4249 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4250 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4251 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4252 newcrq.query_ip_offload.ioba =
4253 cpu_to_be32(adapter->ip_offload_tok);
4254
4255 ibmvnic_send_crq(adapter, &newcrq);
4256 }
4257}
4258
4259static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4260 struct ibmvnic_adapter *adapter)
4261{
4262 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004263 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004264 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4265 struct ibmvnic_login_buffer *login = adapter->login_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004266 int i;
4267
4268 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004269 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004270 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004271 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004272
John Allen498cd8e2016-04-06 11:49:55 -05004273 /* If the number of queues requested can't be allocated by the
4274 * server, the login response will return with code 1. We will need
4275 * to resend the login buffer with fewer queues requested.
4276 */
4277 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004278 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004279 complete(&adapter->init_done);
4280 return 0;
4281 }
4282
John Allenc26eba02017-10-26 16:23:25 -05004283 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4284
Thomas Falcon032c5e82015-12-21 11:26:06 -06004285 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4286 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4287 netdev_dbg(adapter->netdev, "%016lx\n",
4288 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4289 }
4290
4291 /* Sanity checks */
4292 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4293 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4294 adapter->req_rx_add_queues !=
4295 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4296 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4297 ibmvnic_remove(adapter->vdev);
4298 return -EIO;
4299 }
Thomas Falcona2c0f032018-02-21 18:18:30 -06004300 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004301 complete(&adapter->init_done);
4302
Thomas Falcon032c5e82015-12-21 11:26:06 -06004303 return 0;
4304}
4305
Thomas Falcon032c5e82015-12-21 11:26:06 -06004306static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4307 struct ibmvnic_adapter *adapter)
4308{
4309 struct device *dev = &adapter->vdev->dev;
4310 long rc;
4311
4312 rc = crq->request_unmap_rsp.rc.code;
4313 if (rc)
4314 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4315}
4316
4317static void handle_query_map_rsp(union ibmvnic_crq *crq,
4318 struct ibmvnic_adapter *adapter)
4319{
4320 struct net_device *netdev = adapter->netdev;
4321 struct device *dev = &adapter->vdev->dev;
4322 long rc;
4323
4324 rc = crq->query_map_rsp.rc.code;
4325 if (rc) {
4326 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4327 return;
4328 }
4329 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4330 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4331 crq->query_map_rsp.free_pages);
4332}
4333
4334static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4335 struct ibmvnic_adapter *adapter)
4336{
4337 struct net_device *netdev = adapter->netdev;
4338 struct device *dev = &adapter->vdev->dev;
4339 long rc;
4340
Thomas Falcon901e0402017-02-15 12:17:59 -06004341 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004342 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004343 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004344 rc = crq->query_capability.rc.code;
4345 if (rc) {
4346 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4347 goto out;
4348 }
4349
4350 switch (be16_to_cpu(crq->query_capability.capability)) {
4351 case MIN_TX_QUEUES:
4352 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004353 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004354 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4355 adapter->min_tx_queues);
4356 break;
4357 case MIN_RX_QUEUES:
4358 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004359 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004360 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4361 adapter->min_rx_queues);
4362 break;
4363 case MIN_RX_ADD_QUEUES:
4364 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004365 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004366 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4367 adapter->min_rx_add_queues);
4368 break;
4369 case MAX_TX_QUEUES:
4370 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004371 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004372 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4373 adapter->max_tx_queues);
4374 break;
4375 case MAX_RX_QUEUES:
4376 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004377 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004378 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4379 adapter->max_rx_queues);
4380 break;
4381 case MAX_RX_ADD_QUEUES:
4382 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004383 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004384 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4385 adapter->max_rx_add_queues);
4386 break;
4387 case MIN_TX_ENTRIES_PER_SUBCRQ:
4388 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004389 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004390 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4391 adapter->min_tx_entries_per_subcrq);
4392 break;
4393 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4394 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004395 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004396 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4397 adapter->min_rx_add_entries_per_subcrq);
4398 break;
4399 case MAX_TX_ENTRIES_PER_SUBCRQ:
4400 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004401 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004402 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4403 adapter->max_tx_entries_per_subcrq);
4404 break;
4405 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4406 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004407 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004408 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4409 adapter->max_rx_add_entries_per_subcrq);
4410 break;
4411 case TCP_IP_OFFLOAD:
4412 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004413 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004414 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4415 adapter->tcp_ip_offload);
4416 break;
4417 case PROMISC_SUPPORTED:
4418 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004419 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004420 netdev_dbg(netdev, "promisc_supported = %lld\n",
4421 adapter->promisc_supported);
4422 break;
4423 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004424 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004425 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004426 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4427 break;
4428 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004429 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004430 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004431 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4432 break;
4433 case MAX_MULTICAST_FILTERS:
4434 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004435 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004436 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4437 adapter->max_multicast_filters);
4438 break;
4439 case VLAN_HEADER_INSERTION:
4440 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004441 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004442 if (adapter->vlan_header_insertion)
4443 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4444 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4445 adapter->vlan_header_insertion);
4446 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004447 case RX_VLAN_HEADER_INSERTION:
4448 adapter->rx_vlan_header_insertion =
4449 be64_to_cpu(crq->query_capability.number);
4450 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4451 adapter->rx_vlan_header_insertion);
4452 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004453 case MAX_TX_SG_ENTRIES:
4454 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004455 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004456 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4457 adapter->max_tx_sg_entries);
4458 break;
4459 case RX_SG_SUPPORTED:
4460 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004461 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004462 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4463 adapter->rx_sg_supported);
4464 break;
4465 case OPT_TX_COMP_SUB_QUEUES:
4466 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004467 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004468 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4469 adapter->opt_tx_comp_sub_queues);
4470 break;
4471 case OPT_RX_COMP_QUEUES:
4472 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004473 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004474 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4475 adapter->opt_rx_comp_queues);
4476 break;
4477 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4478 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004479 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004480 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4481 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4482 break;
4483 case OPT_TX_ENTRIES_PER_SUBCRQ:
4484 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004485 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004486 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4487 adapter->opt_tx_entries_per_subcrq);
4488 break;
4489 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4490 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004491 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004492 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4493 adapter->opt_rxba_entries_per_subcrq);
4494 break;
4495 case TX_RX_DESC_REQ:
4496 adapter->tx_rx_desc_req = crq->query_capability.number;
4497 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4498 adapter->tx_rx_desc_req);
4499 break;
4500
4501 default:
4502 netdev_err(netdev, "Got invalid cap rsp %d\n",
4503 crq->query_capability.capability);
4504 }
4505
4506out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004507 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4508 adapter->wait_capability = false;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04004509 ibmvnic_send_req_caps(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004510 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004511}
4512
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004513static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4514{
4515 union ibmvnic_crq crq;
4516 int rc;
4517
4518 memset(&crq, 0, sizeof(crq));
4519 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4520 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004521
4522 mutex_lock(&adapter->fw_lock);
4523 adapter->fw_done_rc = 0;
Thomas Falcon070eca92019-11-25 17:12:53 -06004524 reinit_completion(&adapter->fw_done);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004525
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004526 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004527 if (rc) {
4528 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004529 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004530 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004531
4532 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
Thomas Falconff25dcb2019-11-25 17:12:56 -06004533 if (rc) {
4534 mutex_unlock(&adapter->fw_lock);
Thomas Falcon476d96c2019-11-25 17:12:55 -06004535 return rc;
Thomas Falconff25dcb2019-11-25 17:12:56 -06004536 }
Thomas Falcon476d96c2019-11-25 17:12:55 -06004537
Thomas Falconff25dcb2019-11-25 17:12:56 -06004538 mutex_unlock(&adapter->fw_lock);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004539 return adapter->fw_done_rc ? -EIO : 0;
4540}
4541
4542static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4543 struct ibmvnic_adapter *adapter)
4544{
4545 struct net_device *netdev = adapter->netdev;
4546 int rc;
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004547 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004548
4549 rc = crq->query_phys_parms_rsp.rc.code;
4550 if (rc) {
4551 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4552 return rc;
4553 }
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004554 switch (rspeed) {
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004555 case IBMVNIC_10MBPS:
4556 adapter->speed = SPEED_10;
4557 break;
4558 case IBMVNIC_100MBPS:
4559 adapter->speed = SPEED_100;
4560 break;
4561 case IBMVNIC_1GBPS:
4562 adapter->speed = SPEED_1000;
4563 break;
4564 case IBMVNIC_10GBP:
4565 adapter->speed = SPEED_10000;
4566 break;
4567 case IBMVNIC_25GBPS:
4568 adapter->speed = SPEED_25000;
4569 break;
4570 case IBMVNIC_40GBPS:
4571 adapter->speed = SPEED_40000;
4572 break;
4573 case IBMVNIC_50GBPS:
4574 adapter->speed = SPEED_50000;
4575 break;
4576 case IBMVNIC_100GBPS:
4577 adapter->speed = SPEED_100000;
4578 break;
4579 default:
Murilo Fossa Vicentinidd0f9d82019-09-16 11:50:37 -03004580 if (netif_carrier_ok(netdev))
4581 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004582 adapter->speed = SPEED_UNKNOWN;
4583 }
4584 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4585 adapter->duplex = DUPLEX_FULL;
4586 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4587 adapter->duplex = DUPLEX_HALF;
4588 else
4589 adapter->duplex = DUPLEX_UNKNOWN;
4590
4591 return rc;
4592}
4593
Thomas Falcon032c5e82015-12-21 11:26:06 -06004594static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4595 struct ibmvnic_adapter *adapter)
4596{
4597 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4598 struct net_device *netdev = adapter->netdev;
4599 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004600 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004601 long rc;
4602
4603 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004604 (unsigned long int)cpu_to_be64(u64_crq[0]),
4605 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004606 switch (gen_crq->first) {
4607 case IBMVNIC_CRQ_INIT_RSP:
4608 switch (gen_crq->cmd) {
4609 case IBMVNIC_CRQ_INIT:
4610 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004611 adapter->from_passive_init = true;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004612 adapter->failover_pending = false;
Thomas Falcon17c87052018-05-23 13:37:58 -05004613 if (!completion_done(&adapter->init_done)) {
4614 complete(&adapter->init_done);
4615 adapter->init_done_rc = -EIO;
4616 }
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004617 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004618 break;
4619 case IBMVNIC_CRQ_INIT_COMPLETE:
4620 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05004621 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004622 send_version_xchg(adapter);
4623 break;
4624 default:
4625 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4626 }
4627 return;
4628 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004629 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05004630 adapter->crq.active = false;
Thomas Falcon2147e3d2019-11-25 17:12:54 -06004631 /* terminate any thread waiting for a response
4632 * from the device
4633 */
4634 if (!completion_done(&adapter->fw_done)) {
4635 adapter->fw_done_rc = -EIO;
4636 complete(&adapter->fw_done);
4637 }
4638 if (!completion_done(&adapter->stats_done))
4639 complete(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04004640 if (test_bit(0, &adapter->resetting))
Thomas Falcon2770a792018-05-23 13:38:02 -05004641 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004642 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004643 dev_info(dev, "Migrated, re-enabling adapter\n");
4644 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004645 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4646 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004647 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004648 } else {
4649 /* The adapter lost the connection */
4650 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4651 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004652 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004653 }
4654 return;
4655 case IBMVNIC_CRQ_CMD_RSP:
4656 break;
4657 default:
4658 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4659 gen_crq->first);
4660 return;
4661 }
4662
4663 switch (gen_crq->cmd) {
4664 case VERSION_EXCHANGE_RSP:
4665 rc = crq->version_exchange_rsp.rc.code;
4666 if (rc) {
4667 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4668 break;
4669 }
4670 dev_info(dev, "Partner protocol version is %d\n",
4671 crq->version_exchange_rsp.version);
4672 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4673 ibmvnic_version)
4674 ibmvnic_version =
4675 be16_to_cpu(crq->version_exchange_rsp.version);
4676 send_cap_queries(adapter);
4677 break;
4678 case QUERY_CAPABILITY_RSP:
4679 handle_query_cap_rsp(crq, adapter);
4680 break;
4681 case QUERY_MAP_RSP:
4682 handle_query_map_rsp(crq, adapter);
4683 break;
4684 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004685 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4686 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004687 break;
4688 case REQUEST_UNMAP_RSP:
4689 handle_request_unmap_rsp(crq, adapter);
4690 break;
4691 case REQUEST_CAPABILITY_RSP:
4692 handle_request_cap_rsp(crq, adapter);
4693 break;
4694 case LOGIN_RSP:
4695 netdev_dbg(netdev, "Got Login Response\n");
4696 handle_login_rsp(crq, adapter);
4697 break;
4698 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004699 netdev_dbg(netdev,
4700 "Got Logical Link State Response, state: %d rc: %d\n",
4701 crq->logical_link_state_rsp.link_state,
4702 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004703 adapter->logical_link_state =
4704 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004705 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4706 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004707 break;
4708 case LINK_STATE_INDICATION:
4709 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4710 adapter->phys_link_state =
4711 crq->link_state_indication.phys_link_state;
4712 adapter->logical_link_state =
4713 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05004714 if (adapter->phys_link_state && adapter->logical_link_state)
4715 netif_carrier_on(netdev);
4716 else
4717 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004718 break;
4719 case CHANGE_MAC_ADDR_RSP:
4720 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004721 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004722 break;
4723 case ERROR_INDICATION:
4724 netdev_dbg(netdev, "Got Error Indication\n");
4725 handle_error_indication(crq, adapter);
4726 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004727 case REQUEST_STATISTICS_RSP:
4728 netdev_dbg(netdev, "Got Statistics Response\n");
4729 complete(&adapter->stats_done);
4730 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004731 case QUERY_IP_OFFLOAD_RSP:
4732 netdev_dbg(netdev, "Got Query IP offload Response\n");
4733 handle_query_ip_offload_rsp(adapter);
4734 break;
4735 case MULTICAST_CTRL_RSP:
4736 netdev_dbg(netdev, "Got multicast control Response\n");
4737 break;
4738 case CONTROL_IP_OFFLOAD_RSP:
4739 netdev_dbg(netdev, "Got Control IP offload Response\n");
4740 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4741 sizeof(adapter->ip_offload_ctrl),
4742 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004743 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004744 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004745 case COLLECT_FW_TRACE_RSP:
4746 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4747 complete(&adapter->fw_done);
4748 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004749 case GET_VPD_SIZE_RSP:
4750 handle_vpd_size_rsp(crq, adapter);
4751 break;
4752 case GET_VPD_RSP:
4753 handle_vpd_rsp(crq, adapter);
4754 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004755 case QUERY_PHYS_PARMS_RSP:
4756 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4757 complete(&adapter->fw_done);
4758 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004759 default:
4760 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4761 gen_crq->cmd);
4762 }
4763}
4764
4765static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4766{
4767 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004768
Thomas Falcon6c267b32017-02-15 12:17:58 -06004769 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004770 return IRQ_HANDLED;
4771}
4772
4773static void ibmvnic_tasklet(void *data)
4774{
4775 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004776 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004777 union ibmvnic_crq *crq;
4778 unsigned long flags;
4779 bool done = false;
4780
4781 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004782 while (!done) {
4783 /* Pull all the valid messages off the CRQ */
4784 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4785 ibmvnic_handle_crq(crq, adapter);
4786 crq->generic.first = 0;
4787 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004788
4789 /* remain in tasklet until all
4790 * capabilities responses are received
4791 */
4792 if (!adapter->wait_capability)
4793 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004794 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004795 /* if capabilities CRQ's were sent in this tasklet, the following
4796 * tasklet must wait until all responses are received
4797 */
4798 if (atomic_read(&adapter->running_cap_crqs) != 0)
4799 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004800 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004801}
4802
4803static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4804{
4805 struct vio_dev *vdev = adapter->vdev;
4806 int rc;
4807
4808 do {
4809 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4810 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4811
4812 if (rc)
4813 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4814
4815 return rc;
4816}
4817
4818static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4819{
4820 struct ibmvnic_crq_queue *crq = &adapter->crq;
4821 struct device *dev = &adapter->vdev->dev;
4822 struct vio_dev *vdev = adapter->vdev;
4823 int rc;
4824
4825 /* Close the CRQ */
4826 do {
4827 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4828 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4829
4830 /* Clean out the queue */
4831 memset(crq->msgs, 0, PAGE_SIZE);
4832 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05004833 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004834
4835 /* And re-open it again */
4836 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4837 crq->msg_token, PAGE_SIZE);
4838
4839 if (rc == H_CLOSED)
4840 /* Adapter is good, but other end is not ready */
4841 dev_warn(dev, "Partner adapter not ready\n");
4842 else if (rc != 0)
4843 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4844
4845 return rc;
4846}
4847
Nathan Fontenotf9928872017-03-30 02:48:54 -04004848static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004849{
4850 struct ibmvnic_crq_queue *crq = &adapter->crq;
4851 struct vio_dev *vdev = adapter->vdev;
4852 long rc;
4853
Nathan Fontenotf9928872017-03-30 02:48:54 -04004854 if (!crq->msgs)
4855 return;
4856
Thomas Falcon032c5e82015-12-21 11:26:06 -06004857 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4858 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004859 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004860 do {
4861 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4862 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4863
4864 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4865 DMA_BIDIRECTIONAL);
4866 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004867 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05004868 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004869}
4870
Nathan Fontenotf9928872017-03-30 02:48:54 -04004871static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004872{
4873 struct ibmvnic_crq_queue *crq = &adapter->crq;
4874 struct device *dev = &adapter->vdev->dev;
4875 struct vio_dev *vdev = adapter->vdev;
4876 int rc, retrc = -ENOMEM;
4877
Nathan Fontenotf9928872017-03-30 02:48:54 -04004878 if (crq->msgs)
4879 return 0;
4880
Thomas Falcon032c5e82015-12-21 11:26:06 -06004881 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4882 /* Should we allocate more than one page? */
4883
4884 if (!crq->msgs)
4885 return -ENOMEM;
4886
4887 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4888 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4889 DMA_BIDIRECTIONAL);
4890 if (dma_mapping_error(dev, crq->msg_token))
4891 goto map_failed;
4892
4893 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4894 crq->msg_token, PAGE_SIZE);
4895
4896 if (rc == H_RESOURCE)
4897 /* maybe kexecing and resource is busy. try a reset */
4898 rc = ibmvnic_reset_crq(adapter);
4899 retrc = rc;
4900
4901 if (rc == H_CLOSED) {
4902 dev_warn(dev, "Partner adapter not ready\n");
4903 } else if (rc) {
4904 dev_warn(dev, "Error %d opening adapter\n", rc);
4905 goto reg_crq_failed;
4906 }
4907
4908 retrc = 0;
4909
Thomas Falcon6c267b32017-02-15 12:17:58 -06004910 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4911 (unsigned long)adapter);
4912
Thomas Falcon032c5e82015-12-21 11:26:06 -06004913 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03004914 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4915 adapter->vdev->unit_address);
4916 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004917 if (rc) {
4918 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4919 vdev->irq, rc);
4920 goto req_irq_failed;
4921 }
4922
4923 rc = vio_enable_interrupts(vdev);
4924 if (rc) {
4925 dev_err(dev, "Error %d enabling interrupts\n", rc);
4926 goto req_irq_failed;
4927 }
4928
4929 crq->cur = 0;
4930 spin_lock_init(&crq->lock);
4931
4932 return retrc;
4933
4934req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06004935 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004936 do {
4937 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4938 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4939reg_crq_failed:
4940 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4941map_failed:
4942 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004943 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004944 return retrc;
4945}
4946
Thomas Falcon8a348452018-05-23 13:38:00 -05004947static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
John Allenf6ef6402017-03-17 17:13:42 -05004948{
4949 struct device *dev = &adapter->vdev->dev;
4950 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004951 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05004952 int rc;
4953
John Allen017892c12017-05-26 10:30:19 -04004954 adapter->from_passive_init = false;
4955
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004956 old_num_rx_queues = adapter->req_rx_queues;
4957 old_num_tx_queues = adapter->req_tx_queues;
4958
Thomas Falconbbd669a2019-04-04 18:58:26 -05004959 reinit_completion(&adapter->init_done);
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004960 adapter->init_done_rc = 0;
John Allenf6ef6402017-03-17 17:13:42 -05004961 ibmvnic_send_crq_init(adapter);
4962 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4963 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04004964 return -1;
4965 }
4966
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004967 if (adapter->init_done_rc) {
4968 release_crq_queue(adapter);
4969 return adapter->init_done_rc;
4970 }
4971
John Allen017892c12017-05-26 10:30:19 -04004972 if (adapter->from_passive_init) {
4973 adapter->state = VNIC_OPEN;
4974 adapter->from_passive_init = false;
John Allenf6ef6402017-03-17 17:13:42 -05004975 return -1;
4976 }
4977
Juliet Kim7ed5b312019-09-20 16:11:23 -04004978 if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05004979 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004980 if (adapter->req_rx_queues != old_num_rx_queues ||
4981 adapter->req_tx_queues != old_num_tx_queues) {
4982 release_sub_crqs(adapter, 0);
4983 rc = init_sub_crqs(adapter);
4984 } else {
4985 rc = reset_sub_crq_queues(adapter);
4986 }
4987 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04004988 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004989 }
4990
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04004991 if (rc) {
4992 dev_err(dev, "Initialization of sub crqs failed\n");
4993 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05004994 return rc;
4995 }
4996
4997 rc = init_sub_crq_irqs(adapter);
4998 if (rc) {
4999 dev_err(dev, "Failed to initialize sub crq irqs\n");
5000 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04005001 }
5002
5003 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05005004}
5005
Thomas Falcon8a348452018-05-23 13:38:00 -05005006static int ibmvnic_init(struct ibmvnic_adapter *adapter)
5007{
5008 struct device *dev = &adapter->vdev->dev;
5009 unsigned long timeout = msecs_to_jiffies(30000);
5010 int rc;
5011
5012 adapter->from_passive_init = false;
5013
Thomas Falcon8a348452018-05-23 13:38:00 -05005014 adapter->init_done_rc = 0;
5015 ibmvnic_send_crq_init(adapter);
5016 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5017 dev_err(dev, "Initialization sequence timed out\n");
5018 return -1;
5019 }
5020
5021 if (adapter->init_done_rc) {
5022 release_crq_queue(adapter);
5023 return adapter->init_done_rc;
5024 }
5025
5026 if (adapter->from_passive_init) {
5027 adapter->state = VNIC_OPEN;
5028 adapter->from_passive_init = false;
5029 return -1;
5030 }
5031
5032 rc = init_sub_crqs(adapter);
5033 if (rc) {
5034 dev_err(dev, "Initialization of sub crqs failed\n");
5035 release_crq_queue(adapter);
5036 return rc;
5037 }
5038
5039 rc = init_sub_crq_irqs(adapter);
5040 if (rc) {
5041 dev_err(dev, "Failed to initialize sub crq irqs\n");
5042 release_crq_queue(adapter);
5043 }
5044
5045 return rc;
5046}
5047
Thomas Falcon40c9db82017-06-12 12:35:04 -05005048static struct device_attribute dev_attr_failover;
5049
Thomas Falcon032c5e82015-12-21 11:26:06 -06005050static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5051{
5052 struct ibmvnic_adapter *adapter;
5053 struct net_device *netdev;
5054 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005055 int rc;
5056
5057 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5058 dev->unit_address);
5059
5060 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5061 VETH_MAC_ADDR, NULL);
5062 if (!mac_addr_p) {
5063 dev_err(&dev->dev,
5064 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5065 __FILE__, __LINE__);
5066 return 0;
5067 }
5068
5069 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06005070 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005071 if (!netdev)
5072 return -ENOMEM;
5073
5074 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04005075 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005076 dev_set_drvdata(&dev->dev, netdev);
5077 adapter->vdev = dev;
5078 adapter->netdev = netdev;
5079
5080 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5081 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5082 netdev->irq = dev->irq;
5083 netdev->netdev_ops = &ibmvnic_netdev_ops;
5084 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5085 SET_NETDEV_DEV(netdev, &dev->dev);
5086
5087 spin_lock_init(&adapter->stats_lock);
5088
Nathan Fontenoted651a12017-05-03 14:04:38 -04005089 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005090 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5091 __ibmvnic_delayed_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005092 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06005093 spin_lock_init(&adapter->rwi_lock);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005094 mutex_init(&adapter->fw_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05005095 init_completion(&adapter->init_done);
Thomas Falcon070eca92019-11-25 17:12:53 -06005096 init_completion(&adapter->fw_done);
5097 init_completion(&adapter->reset_done);
5098 init_completion(&adapter->stats_done);
Juliet Kim7ed5b312019-09-20 16:11:23 -04005099 clear_bit(0, &adapter->resetting);
Nathan Fontenoted651a12017-05-03 14:04:38 -04005100
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005101 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05005102 rc = init_crq_queue(adapter);
5103 if (rc) {
5104 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5105 rc);
5106 goto ibmvnic_init_fail;
5107 }
5108
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005109 rc = ibmvnic_init(adapter);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005110 if (rc && rc != EAGAIN)
5111 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04005112 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005113
Thomas Falcon07184212018-05-16 15:49:05 -05005114 rc = init_stats_buffers(adapter);
5115 if (rc)
5116 goto ibmvnic_init_fail;
5117
5118 rc = init_stats_token(adapter);
5119 if (rc)
5120 goto ibmvnic_stats_fail;
5121
Thomas Falconf39f0d12017-02-14 10:22:59 -06005122 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05005123 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5124 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005125
Thomas Falcon40c9db82017-06-12 12:35:04 -05005126 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005127 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05005128 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05005129
Mick Tarsele876a8a2017-09-28 13:53:18 -07005130 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005131 rc = register_netdev(netdev);
5132 if (rc) {
5133 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005134 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005135 }
5136 dev_info(&dev->dev, "ibmvnic registered\n");
5137
Nathan Fontenot90c80142017-05-03 14:04:32 -04005138 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05005139
5140 adapter->wait_for_reset = false;
5141
Thomas Falcon032c5e82015-12-21 11:26:06 -06005142 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005143
5144ibmvnic_register_fail:
5145 device_remove_file(&dev->dev, &dev_attr_failover);
5146
Thomas Falcon07184212018-05-16 15:49:05 -05005147ibmvnic_dev_file_err:
5148 release_stats_token(adapter);
5149
5150ibmvnic_stats_fail:
5151 release_stats_buffers(adapter);
5152
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005153ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005154 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005155 release_crq_queue(adapter);
Thomas Falconff25dcb2019-11-25 17:12:56 -06005156 mutex_destroy(&adapter->fw_lock);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05005157 free_netdev(netdev);
5158
5159 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06005160}
5161
5162static int ibmvnic_remove(struct vio_dev *dev)
5163{
5164 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005165 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005166
Nathan Fontenot90c80142017-05-03 14:04:32 -04005167 adapter->state = VNIC_REMOVING;
Juliet Kima5681e22018-11-19 15:59:22 -06005168 rtnl_lock();
5169 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04005170
5171 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06005172 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04005173 release_crq_queue(adapter);
5174
Thomas Falcon53cc7722018-02-26 18:10:56 -06005175 release_stats_token(adapter);
5176 release_stats_buffers(adapter);
5177
Nathan Fontenot90c80142017-05-03 14:04:32 -04005178 adapter->state = VNIC_REMOVED;
5179
Juliet Kima5681e22018-11-19 15:59:22 -06005180 rtnl_unlock();
Thomas Falconff25dcb2019-11-25 17:12:56 -06005181 mutex_destroy(&adapter->fw_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005182 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005183 free_netdev(netdev);
5184 dev_set_drvdata(&dev->dev, NULL);
5185
5186 return 0;
5187}
5188
Thomas Falcon40c9db82017-06-12 12:35:04 -05005189static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5190 const char *buf, size_t count)
5191{
5192 struct net_device *netdev = dev_get_drvdata(dev);
5193 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5194 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5195 __be64 session_token;
5196 long rc;
5197
5198 if (!sysfs_streq(buf, "1"))
5199 return -EINVAL;
5200
5201 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5202 H_GET_SESSION_TOKEN, 0, 0, 0);
5203 if (rc) {
5204 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5205 rc);
5206 return -EINVAL;
5207 }
5208
5209 session_token = (__be64)retbuf[0];
5210 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5211 be64_to_cpu(session_token));
5212 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5213 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5214 if (rc) {
5215 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5216 rc);
5217 return -EINVAL;
5218 }
5219
5220 return count;
5221}
5222
Joe Perches6cbaefb2017-12-19 10:15:09 -08005223static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05005224
Thomas Falcon032c5e82015-12-21 11:26:06 -06005225static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5226{
5227 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5228 struct ibmvnic_adapter *adapter;
5229 struct iommu_table *tbl;
5230 unsigned long ret = 0;
5231 int i;
5232
5233 tbl = get_iommu_table_base(&vdev->dev);
5234
5235 /* netdev inits at probe time along with the structures we need below*/
5236 if (!netdev)
5237 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5238
5239 adapter = netdev_priv(netdev);
5240
5241 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06005242 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5243
5244 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5245 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5246
5247 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5248 i++)
5249 ret += adapter->rx_pool[i].size *
5250 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5251
5252 return ret;
5253}
5254
5255static int ibmvnic_resume(struct device *dev)
5256{
5257 struct net_device *netdev = dev_get_drvdata(dev);
5258 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005259
John Allencb89ba22017-06-19 11:27:53 -05005260 if (adapter->state != VNIC_OPEN)
5261 return 0;
5262
John Allena2488782017-07-24 13:26:06 -05005263 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005264
5265 return 0;
5266}
5267
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305268static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005269 {"network", "IBM,vnic"},
5270 {"", "" }
5271};
5272MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5273
5274static const struct dev_pm_ops ibmvnic_pm_ops = {
5275 .resume = ibmvnic_resume
5276};
5277
5278static struct vio_driver ibmvnic_driver = {
5279 .id_table = ibmvnic_device_table,
5280 .probe = ibmvnic_probe,
5281 .remove = ibmvnic_remove,
5282 .get_desired_dma = ibmvnic_get_desired_dma,
5283 .name = ibmvnic_driver_name,
5284 .pm = &ibmvnic_pm_ops,
5285};
5286
5287/* module functions */
5288static int __init ibmvnic_module_init(void)
5289{
5290 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5291 IBMVNIC_DRIVER_VERSION);
5292
5293 return vio_register_driver(&ibmvnic_driver);
5294}
5295
5296static void __exit ibmvnic_module_exit(void)
5297{
5298 vio_unregister_driver(&ibmvnic_driver);
5299}
5300
5301module_init(ibmvnic_module_init);
5302module_exit(ibmvnic_module_exit);