blob: bc2a91205eeca15e53bfd70bd816c50714541b41 [file] [log] [blame]
Thomas Gleixnerd5bb9942019-05-23 11:14:51 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Thomas Falcon032c5e82015-12-21 11:26:06 -06002/**************************************************************************/
3/* */
4/* IBM System i and System p Virtual NIC Device Driver */
5/* Copyright (C) 2014 IBM Corp. */
6/* Santiago Leon (santi_leon@yahoo.com) */
7/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8/* John Allen (jallen@linux.vnet.ibm.com) */
9/* */
Thomas Falcon032c5e82015-12-21 11:26:06 -060010/* */
11/* This module contains the implementation of a virtual ethernet device */
12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13/* option of the RS/6000 Platform Architecture to interface with virtual */
14/* ethernet NICs that are presented to the partition by the hypervisor. */
15/* */
16/* Messages are passed between the VNIC driver and the VNIC server using */
17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18/* issue and receive commands that initiate communication with the server */
19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20/* are used by the driver to notify the server that a packet is */
21/* ready for transmission or that a buffer has been added to receive a */
22/* packet. Subsequently, sCRQs are used by the server to notify the */
23/* driver that a packet transmission has been completed or that a packet */
24/* has been received and placed in a waiting buffer. */
25/* */
26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27/* which skbs are DMA mapped and immediately unmapped when the transmit */
28/* or receive has been completed, the VNIC driver is required to use */
29/* "long term mapping". This entails that large, continuous DMA mapped */
30/* buffers are allocated on driver initialization and these buffers are */
31/* then continuously reused to pass skbs to and from the VNIC server. */
32/* */
33/**************************************************************************/
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/completion.h>
40#include <linux/ioport.h>
41#include <linux/dma-mapping.h>
42#include <linux/kernel.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/skbuff.h>
46#include <linux/init.h>
47#include <linux/delay.h>
48#include <linux/mm.h>
49#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060051#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060052#include <linux/in.h>
53#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050054#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060055#include <linux/irq.h>
56#include <linux/kthread.h>
57#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060058#include <linux/interrupt.h>
59#include <net/net_namespace.h>
60#include <asm/hvcall.h>
61#include <linux/atomic.h>
62#include <asm/vio.h>
63#include <asm/iommu.h>
64#include <linux/uaccess.h>
65#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050066#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040067#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060068#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069
70#include "ibmvnic.h"
71
72static const char ibmvnic_driver_name[] = "ibmvnic";
73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
Thomas Falcon78b07ac2017-06-01 15:32:34 -050075MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060076MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77MODULE_LICENSE("GPL");
78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060082static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060083static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050089static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060090static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99static int ibmvnic_poll(struct napi_struct *napi, int data);
100static void send_map_query(struct ibmvnic_adapter *adapter);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102static int send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600103static int send_login(struct ibmvnic_adapter *adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500104static void send_cap_queries(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500105static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
John Allenea5509f2017-03-17 17:13:43 -0500107static int ibmvnic_init(struct ibmvnic_adapter *);
Thomas Falcon8a348452018-05-23 13:38:00 -0500108static int ibmvnic_reset_init(struct ibmvnic_adapter *);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400109static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon62740e92019-05-09 23:13:43 -0500110static int __ibmvnic_set_mac(struct net_device *, u8 *);
Nathan Fontenot30f79622018-04-06 18:37:06 -0500111static int init_crq_queue(struct ibmvnic_adapter *adapter);
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -0300112static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151{
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160}
161
Thomas Falcon032c5e82015-12-21 11:26:06 -0600162static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
163 struct ibmvnic_long_term_buff *ltb, int size)
164{
165 struct device *dev = &adapter->vdev->dev;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500166 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600167
168 ltb->size = size;
169 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
170 GFP_KERNEL);
171
172 if (!ltb->buff) {
173 dev_err(dev, "Couldn't alloc long term buffer\n");
174 return -ENOMEM;
175 }
176 ltb->map_id = adapter->map_id;
177 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500178
179 init_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500180 rc = send_request_map(adapter, ltb->addr,
181 ltb->size, ltb->map_id);
182 if (rc) {
183 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
184 return rc;
185 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600186 wait_for_completion(&adapter->fw_done);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500187
188 if (adapter->fw_done_rc) {
189 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
190 adapter->fw_done_rc);
Thomas Falcon4cf2ddf32018-05-16 15:49:03 -0500191 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500192 return -1;
193 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600194 return 0;
195}
196
197static void free_long_term_buff(struct ibmvnic_adapter *adapter,
198 struct ibmvnic_long_term_buff *ltb)
199{
200 struct device *dev = &adapter->vdev->dev;
201
Nathan Fontenotc657e322017-03-30 02:49:06 -0400202 if (!ltb->buff)
203 return;
204
Nathan Fontenoted651a12017-05-03 14:04:38 -0400205 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
206 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500207 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400208 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600209}
210
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500211static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
212 struct ibmvnic_long_term_buff *ltb)
213{
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500214 int rc;
215
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500216 memset(ltb->buff, 0, ltb->size);
217
218 init_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500219 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 if (rc)
221 return rc;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500222 wait_for_completion(&adapter->fw_done);
223
224 if (adapter->fw_done_rc) {
225 dev_info(&adapter->vdev->dev,
226 "Reset failed, attempting to free and reallocate buffer\n");
227 free_long_term_buff(adapter, ltb);
228 return alloc_long_term_buff(adapter, ltb, ltb->size);
229 }
230 return 0;
231}
232
Thomas Falconf185a492017-05-26 10:30:48 -0400233static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
234{
235 int i;
236
237 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
238 i++)
239 adapter->rx_pool[i].active = 0;
240}
241
Thomas Falcon032c5e82015-12-21 11:26:06 -0600242static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_rx_pool *pool)
244{
245 int count = pool->size - atomic_read(&pool->available);
246 struct device *dev = &adapter->vdev->dev;
247 int buffers_added = 0;
248 unsigned long lpar_rc;
249 union sub_crq sub_crq;
250 struct sk_buff *skb;
251 unsigned int offset;
252 dma_addr_t dma_addr;
253 unsigned char *dst;
254 u64 *handle_array;
255 int shift = 0;
256 int index;
257 int i;
258
Thomas Falconf185a492017-05-26 10:30:48 -0400259 if (!pool->active)
260 return;
261
Thomas Falcon032c5e82015-12-21 11:26:06 -0600262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 be32_to_cpu(adapter->login_rsp_buf->
264 off_rxadd_subcrqs));
265
266 for (i = 0; i < count; ++i) {
267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
268 if (!skb) {
269 dev_err(dev, "Couldn't replenish rx buff\n");
270 adapter->replenish_no_mem++;
271 break;
272 }
273
274 index = pool->free_map[pool->next_free];
275
276 if (pool->rx_buff[index].skb)
277 dev_err(dev, "Inconsistent free_map!\n");
278
279 /* Copy the skb to the long term mapped DMA buffer */
280 offset = index * pool->buff_size;
281 dst = pool->long_term_buff.buff + offset;
282 memset(dst, 0, pool->buff_size);
283 dma_addr = pool->long_term_buff.addr + offset;
284 pool->rx_buff[index].data = dst;
285
286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 pool->rx_buff[index].dma = dma_addr;
288 pool->rx_buff[index].skb = skb;
289 pool->rx_buff[index].pool_index = pool->index;
290 pool->rx_buff[index].size = pool->buff_size;
291
292 memset(&sub_crq, 0, sizeof(sub_crq));
293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 sub_crq.rx_add.correlator =
295 cpu_to_be64((u64)&pool->rx_buff[index]);
296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
298
299 /* The length field of the sCRQ is defined to be 24 bits so the
300 * buffer size needs to be left shifted by a byte before it is
301 * converted to big endian to prevent the last byte from being
302 * truncated.
303 */
304#ifdef __LITTLE_ENDIAN__
305 shift = 8;
306#endif
307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
308
309 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
310 &sub_crq);
311 if (lpar_rc != H_SUCCESS)
312 goto failure;
313
314 buffers_added++;
315 adapter->replenish_add_buff_success++;
316 pool->next_free = (pool->next_free + 1) % pool->size;
317 }
318 atomic_add(buffers_added, &pool->available);
319 return;
320
321failure:
Thomas Falcon2d14d372018-07-13 12:03:32 -0500322 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
323 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -0600324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600326
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
Thomas Falconf185a492017-05-26 10:30:48 -0400330
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500331 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
Thomas Falconf185a492017-05-26 10:30:48 -0400332 /* Disable buffer pool replenishment and report carrier off if
Thomas Falcon5a18e1e2018-04-06 18:37:05 -0500333 * queue is closed or pending failover.
334 * Firmware guarantees that a signal will be sent to the
335 * driver, triggering a reset.
Thomas Falconf185a492017-05-26 10:30:48 -0400336 */
337 deactivate_rx_pools(adapter);
338 netif_carrier_off(adapter->netdev);
339 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600340}
341
342static void replenish_pools(struct ibmvnic_adapter *adapter)
343{
344 int i;
345
Thomas Falcon032c5e82015-12-21 11:26:06 -0600346 adapter->replenish_task_cycles++;
347 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
348 i++) {
349 if (adapter->rx_pool[i].active)
350 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
351 }
352}
353
John Allen3d52b592017-08-02 16:44:14 -0500354static void release_stats_buffers(struct ibmvnic_adapter *adapter)
355{
356 kfree(adapter->tx_stats_buffers);
357 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600358 adapter->tx_stats_buffers = NULL;
359 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500360}
361
362static int init_stats_buffers(struct ibmvnic_adapter *adapter)
363{
364 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600365 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500366 sizeof(struct ibmvnic_tx_queue_stats),
367 GFP_KERNEL);
368 if (!adapter->tx_stats_buffers)
369 return -ENOMEM;
370
371 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600372 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500373 sizeof(struct ibmvnic_rx_queue_stats),
374 GFP_KERNEL);
375 if (!adapter->rx_stats_buffers)
376 return -ENOMEM;
377
378 return 0;
379}
380
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400381static void release_stats_token(struct ibmvnic_adapter *adapter)
382{
383 struct device *dev = &adapter->vdev->dev;
384
385 if (!adapter->stats_token)
386 return;
387
388 dma_unmap_single(dev, adapter->stats_token,
389 sizeof(struct ibmvnic_statistics),
390 DMA_FROM_DEVICE);
391 adapter->stats_token = 0;
392}
393
394static int init_stats_token(struct ibmvnic_adapter *adapter)
395{
396 struct device *dev = &adapter->vdev->dev;
397 dma_addr_t stok;
398
399 stok = dma_map_single(dev, &adapter->stats,
400 sizeof(struct ibmvnic_statistics),
401 DMA_FROM_DEVICE);
402 if (dma_mapping_error(dev, stok)) {
403 dev_err(dev, "Couldn't map stats buffer\n");
404 return -1;
405 }
406
407 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500408 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400409 return 0;
410}
411
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400412static int reset_rx_pools(struct ibmvnic_adapter *adapter)
413{
414 struct ibmvnic_rx_pool *rx_pool;
415 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500416 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600417 u64 *size_array;
418
419 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
420 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400421
422 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
423 for (i = 0; i < rx_scrqs; i++) {
424 rx_pool = &adapter->rx_pool[i];
425
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500426 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
427
John Allen896d8692018-01-18 16:26:31 -0600428 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
429 free_long_term_buff(adapter, &rx_pool->long_term_buff);
430 rx_pool->buff_size = be64_to_cpu(size_array[i]);
431 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
432 rx_pool->size *
433 rx_pool->buff_size);
434 } else {
435 rc = reset_long_term_buff(adapter,
436 &rx_pool->long_term_buff);
437 }
438
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500439 if (rc)
440 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400441
442 for (j = 0; j < rx_pool->size; j++)
443 rx_pool->free_map[j] = j;
444
445 memset(rx_pool->rx_buff, 0,
446 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
447
448 atomic_set(&rx_pool->available, 0);
449 rx_pool->next_alloc = 0;
450 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500451 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400452 }
453
454 return 0;
455}
456
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400457static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600458{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400459 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400460 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600461
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400462 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600463 return;
464
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600465 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400466 rx_pool = &adapter->rx_pool[i];
467
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500468 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
469
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400470 kfree(rx_pool->free_map);
471 free_long_term_buff(adapter, &rx_pool->long_term_buff);
472
473 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400474 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400475
476 for (j = 0; j < rx_pool->size; j++) {
477 if (rx_pool->rx_buff[j].skb) {
Thomas Falconb7cdec32018-11-21 11:17:58 -0600478 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
479 rx_pool->rx_buff[j].skb = NULL;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400480 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600481 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400482
483 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600484 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400485
486 kfree(adapter->rx_pool);
487 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600488 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400489}
490
491static int init_rx_pools(struct net_device *netdev)
492{
493 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
494 struct device *dev = &adapter->vdev->dev;
495 struct ibmvnic_rx_pool *rx_pool;
496 int rxadd_subcrqs;
497 u64 *size_array;
498 int i, j;
499
500 rxadd_subcrqs =
501 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
502 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
503 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
504
505 adapter->rx_pool = kcalloc(rxadd_subcrqs,
506 sizeof(struct ibmvnic_rx_pool),
507 GFP_KERNEL);
508 if (!adapter->rx_pool) {
509 dev_err(dev, "Failed to allocate rx pools\n");
510 return -1;
511 }
512
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600513 adapter->num_active_rx_pools = rxadd_subcrqs;
514
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400515 for (i = 0; i < rxadd_subcrqs; i++) {
516 rx_pool = &adapter->rx_pool[i];
517
518 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500519 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400520 i, adapter->req_rx_add_entries_per_subcrq,
521 be64_to_cpu(size_array[i]));
522
523 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
524 rx_pool->index = i;
525 rx_pool->buff_size = be64_to_cpu(size_array[i]);
526 rx_pool->active = 1;
527
528 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
529 GFP_KERNEL);
530 if (!rx_pool->free_map) {
531 release_rx_pools(adapter);
532 return -1;
533 }
534
535 rx_pool->rx_buff = kcalloc(rx_pool->size,
536 sizeof(struct ibmvnic_rx_buff),
537 GFP_KERNEL);
538 if (!rx_pool->rx_buff) {
539 dev_err(dev, "Couldn't alloc rx buffers\n");
540 release_rx_pools(adapter);
541 return -1;
542 }
543
544 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
545 rx_pool->size * rx_pool->buff_size)) {
546 release_rx_pools(adapter);
547 return -1;
548 }
549
550 for (j = 0; j < rx_pool->size; ++j)
551 rx_pool->free_map[j] = j;
552
553 atomic_set(&rx_pool->available, 0);
554 rx_pool->next_alloc = 0;
555 rx_pool->next_free = 0;
556 }
557
558 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600559}
560
Thomas Falcone26dc252018-03-16 20:00:25 -0500561static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
562 struct ibmvnic_tx_pool *tx_pool)
563{
564 int rc, i;
565
566 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
567 if (rc)
568 return rc;
569
570 memset(tx_pool->tx_buff, 0,
571 tx_pool->num_buffers *
572 sizeof(struct ibmvnic_tx_buff));
573
574 for (i = 0; i < tx_pool->num_buffers; i++)
575 tx_pool->free_map[i] = i;
576
577 tx_pool->consumer_index = 0;
578 tx_pool->producer_index = 0;
579
580 return 0;
581}
582
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400583static int reset_tx_pools(struct ibmvnic_adapter *adapter)
584{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400585 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500586 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400587
588 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
589 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500590 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500591 if (rc)
592 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500593 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500594 if (rc)
595 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400596 }
597
598 return 0;
599}
600
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200601static void release_vpd_data(struct ibmvnic_adapter *adapter)
602{
603 if (!adapter->vpd)
604 return;
605
606 kfree(adapter->vpd->buff);
607 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600608
609 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200610}
611
Thomas Falconfb794212018-03-16 20:00:26 -0500612static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
613 struct ibmvnic_tx_pool *tx_pool)
614{
615 kfree(tx_pool->tx_buff);
616 kfree(tx_pool->free_map);
617 free_long_term_buff(adapter, &tx_pool->long_term_buff);
618}
619
Nathan Fontenotc657e322017-03-30 02:49:06 -0400620static void release_tx_pools(struct ibmvnic_adapter *adapter)
621{
John Allen896d8692018-01-18 16:26:31 -0600622 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400623
624 if (!adapter->tx_pool)
625 return;
626
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600627 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Thomas Falconfb794212018-03-16 20:00:26 -0500628 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
629 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400630 }
631
632 kfree(adapter->tx_pool);
633 adapter->tx_pool = NULL;
Thomas Falconfb794212018-03-16 20:00:26 -0500634 kfree(adapter->tso_pool);
635 adapter->tso_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600636 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400637}
638
Thomas Falcon32053062018-03-16 20:00:27 -0500639static int init_one_tx_pool(struct net_device *netdev,
640 struct ibmvnic_tx_pool *tx_pool,
641 int num_entries, int buf_size)
642{
643 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
644 int i;
645
646 tx_pool->tx_buff = kcalloc(num_entries,
647 sizeof(struct ibmvnic_tx_buff),
648 GFP_KERNEL);
649 if (!tx_pool->tx_buff)
650 return -1;
651
652 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
653 num_entries * buf_size))
654 return -1;
655
656 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
657 if (!tx_pool->free_map)
658 return -1;
659
660 for (i = 0; i < num_entries; i++)
661 tx_pool->free_map[i] = i;
662
663 tx_pool->consumer_index = 0;
664 tx_pool->producer_index = 0;
665 tx_pool->num_buffers = num_entries;
666 tx_pool->buf_size = buf_size;
667
668 return 0;
669}
670
Nathan Fontenotc657e322017-03-30 02:49:06 -0400671static int init_tx_pools(struct net_device *netdev)
672{
673 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400674 int tx_subcrqs;
Thomas Falcon32053062018-03-16 20:00:27 -0500675 int i, rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400676
677 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
678 adapter->tx_pool = kcalloc(tx_subcrqs,
679 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
680 if (!adapter->tx_pool)
681 return -1;
682
Thomas Falcon32053062018-03-16 20:00:27 -0500683 adapter->tso_pool = kcalloc(tx_subcrqs,
684 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
685 if (!adapter->tso_pool)
686 return -1;
687
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600688 adapter->num_active_tx_pools = tx_subcrqs;
689
Nathan Fontenotc657e322017-03-30 02:49:06 -0400690 for (i = 0; i < tx_subcrqs; i++) {
Thomas Falcon32053062018-03-16 20:00:27 -0500691 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
692 adapter->req_tx_entries_per_subcrq,
693 adapter->req_mtu + VLAN_HLEN);
694 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400695 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500696 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400697 }
698
Thomas Falcon32053062018-03-16 20:00:27 -0500699 init_one_tx_pool(netdev, &adapter->tso_pool[i],
700 IBMVNIC_TSO_BUFS,
701 IBMVNIC_TSO_BUF_SZ);
702 if (rc) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400703 release_tx_pools(adapter);
Thomas Falcon32053062018-03-16 20:00:27 -0500704 return rc;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400705 }
Nathan Fontenotc657e322017-03-30 02:49:06 -0400706 }
707
708 return 0;
709}
710
John Allend944c3d62017-05-26 10:30:13 -0400711static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
712{
713 int i;
714
715 if (adapter->napi_enabled)
716 return;
717
718 for (i = 0; i < adapter->req_rx_queues; i++)
719 napi_enable(&adapter->napi[i]);
720
721 adapter->napi_enabled = true;
722}
723
724static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
725{
726 int i;
727
728 if (!adapter->napi_enabled)
729 return;
730
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500731 for (i = 0; i < adapter->req_rx_queues; i++) {
732 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400733 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500734 }
John Allend944c3d62017-05-26 10:30:13 -0400735
736 adapter->napi_enabled = false;
737}
738
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600739static int init_napi(struct ibmvnic_adapter *adapter)
740{
741 int i;
742
743 adapter->napi = kcalloc(adapter->req_rx_queues,
744 sizeof(struct napi_struct), GFP_KERNEL);
745 if (!adapter->napi)
746 return -ENOMEM;
747
748 for (i = 0; i < adapter->req_rx_queues; i++) {
749 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
750 netif_napi_add(adapter->netdev, &adapter->napi[i],
751 ibmvnic_poll, NAPI_POLL_WEIGHT);
752 }
753
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600754 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600755 return 0;
756}
757
758static void release_napi(struct ibmvnic_adapter *adapter)
759{
760 int i;
761
762 if (!adapter->napi)
763 return;
764
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600765 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Wen Yang390de192018-12-11 12:20:46 +0800766 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
767 netif_napi_del(&adapter->napi[i]);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600768 }
769
770 kfree(adapter->napi);
771 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600772 adapter->num_active_rx_napi = 0;
Thomas Falconc3f22412018-05-23 13:37:55 -0500773 adapter->napi_enabled = false;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600774}
775
John Allena57a5d22017-03-17 17:13:41 -0500776static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600777{
778 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500779 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500780 int retry_count = 0;
Thomas Falconeb110412018-05-24 14:37:53 -0500781 bool retry;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500782 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600783
John Allenbd0b6722017-03-17 17:13:40 -0500784 do {
Thomas Falconeb110412018-05-24 14:37:53 -0500785 retry = false;
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500786 if (retry_count > IBMVNIC_MAX_QUEUES) {
787 netdev_warn(netdev, "Login attempts exceeded\n");
788 return -1;
789 }
790
791 adapter->init_done_rc = 0;
792 reinit_completion(&adapter->init_done);
793 rc = send_login(adapter);
794 if (rc) {
795 netdev_warn(netdev, "Unable to login\n");
796 return rc;
797 }
798
799 if (!wait_for_completion_timeout(&adapter->init_done,
800 timeout)) {
801 netdev_warn(netdev, "Login timed out\n");
802 return -1;
803 }
804
805 if (adapter->init_done_rc == PARTIALSUCCESS) {
806 retry_count++;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600807 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500808
Thomas Falconeb110412018-05-24 14:37:53 -0500809 retry = true;
810 netdev_dbg(netdev,
811 "Received partial success, retrying...\n");
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500812 adapter->init_done_rc = 0;
John Allenbd0b6722017-03-17 17:13:40 -0500813 reinit_completion(&adapter->init_done);
814 send_cap_queries(adapter);
815 if (!wait_for_completion_timeout(&adapter->init_done,
816 timeout)) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500817 netdev_warn(netdev,
818 "Capabilities query timed out\n");
John Allenbd0b6722017-03-17 17:13:40 -0500819 return -1;
820 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500821
Thomas Falcon4d96f122017-08-01 15:04:36 -0500822 rc = init_sub_crqs(adapter);
823 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500824 netdev_warn(netdev,
825 "SCRQ initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500826 return -1;
827 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500828
Thomas Falcon4d96f122017-08-01 15:04:36 -0500829 rc = init_sub_crq_irqs(adapter);
830 if (rc) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500831 netdev_warn(netdev,
832 "SCRQ irq initialization failed\n");
Thomas Falcon4d96f122017-08-01 15:04:36 -0500833 return -1;
834 }
Nathan Fontenot64d92aa2018-04-11 10:09:32 -0500835 } else if (adapter->init_done_rc) {
836 netdev_warn(netdev, "Adapter login failed\n");
John Allenbd0b6722017-03-17 17:13:40 -0500837 return -1;
838 }
Thomas Falconeb110412018-05-24 14:37:53 -0500839 } while (retry);
John Allenbd0b6722017-03-17 17:13:40 -0500840
Thomas Falcon62740e92019-05-09 23:13:43 -0500841 __ibmvnic_set_mac(netdev, adapter->mac_addr);
Thomas Falcon3d166132018-01-10 19:39:52 -0600842
John Allena57a5d22017-03-17 17:13:41 -0500843 return 0;
844}
845
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600846static void release_login_buffer(struct ibmvnic_adapter *adapter)
847{
848 kfree(adapter->login_buf);
849 adapter->login_buf = NULL;
850}
851
852static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
853{
854 kfree(adapter->login_rsp_buf);
855 adapter->login_rsp_buf = NULL;
856}
857
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400858static void release_resources(struct ibmvnic_adapter *adapter)
859{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200860 release_vpd_data(adapter);
861
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400862 release_tx_pools(adapter);
863 release_rx_pools(adapter);
864
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600865 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600866 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400867}
868
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400869static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
870{
871 struct net_device *netdev = adapter->netdev;
872 unsigned long timeout = msecs_to_jiffies(30000);
873 union ibmvnic_crq crq;
874 bool resend;
875 int rc;
876
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500877 netdev_dbg(netdev, "setting link state %d\n", link_state);
878
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400879 memset(&crq, 0, sizeof(crq));
880 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
881 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
882 crq.logical_link_state.link_state = link_state;
883
884 do {
885 resend = false;
886
887 reinit_completion(&adapter->init_done);
888 rc = ibmvnic_send_crq(adapter, &crq);
889 if (rc) {
890 netdev_err(netdev, "Failed to set link state\n");
891 return rc;
892 }
893
894 if (!wait_for_completion_timeout(&adapter->init_done,
895 timeout)) {
896 netdev_err(netdev, "timeout setting link state\n");
897 return -1;
898 }
899
900 if (adapter->init_done_rc == 1) {
901 /* Partuial success, delay and re-send */
902 mdelay(1000);
903 resend = true;
Thomas Falconab5ec332018-05-23 13:37:59 -0500904 } else if (adapter->init_done_rc) {
905 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
906 adapter->init_done_rc);
907 return adapter->init_done_rc;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400908 }
909 } while (resend);
910
911 return 0;
912}
913
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400914static int set_real_num_queues(struct net_device *netdev)
915{
916 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
917 int rc;
918
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500919 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
920 adapter->req_tx_queues, adapter->req_rx_queues);
921
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400922 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
923 if (rc) {
924 netdev_err(netdev, "failed to set the number of tx queues\n");
925 return rc;
926 }
927
928 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
929 if (rc)
930 netdev_err(netdev, "failed to set the number of rx queues\n");
931
932 return rc;
933}
934
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200935static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
936{
937 struct device *dev = &adapter->vdev->dev;
938 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200939 int len = 0;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500940 int rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200941
942 if (adapter->vpd->buff)
943 len = adapter->vpd->len;
944
John Allen69d08dc2018-01-18 16:27:58 -0600945 init_completion(&adapter->fw_done);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200946 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
947 crq.get_vpd_size.cmd = GET_VPD_SIZE;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500948 rc = ibmvnic_send_crq(adapter, &crq);
949 if (rc)
950 return rc;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200951 wait_for_completion(&adapter->fw_done);
952
953 if (!adapter->vpd->len)
954 return -ENODATA;
955
956 if (!adapter->vpd->buff)
957 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
958 else if (adapter->vpd->len != len)
959 adapter->vpd->buff =
960 krealloc(adapter->vpd->buff,
961 adapter->vpd->len, GFP_KERNEL);
962
963 if (!adapter->vpd->buff) {
964 dev_err(dev, "Could allocate VPD buffer\n");
965 return -ENOMEM;
966 }
967
968 adapter->vpd->dma_addr =
969 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
970 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -0200971 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200972 dev_err(dev, "Could not map VPD buffer\n");
973 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600974 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200975 return -ENOMEM;
976 }
977
978 reinit_completion(&adapter->fw_done);
979 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
980 crq.get_vpd.cmd = GET_VPD;
981 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
982 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -0500983 rc = ibmvnic_send_crq(adapter, &crq);
984 if (rc) {
985 kfree(adapter->vpd->buff);
986 adapter->vpd->buff = NULL;
987 return rc;
988 }
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200989 wait_for_completion(&adapter->fw_done);
990
991 return 0;
992}
993
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400994static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -0500995{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400996 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600997 int rc;
John Allena57a5d22017-03-17 17:13:41 -0500998
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400999 rc = set_real_num_queues(netdev);
1000 if (rc)
1001 return rc;
John Allenbd0b6722017-03-17 17:13:40 -05001002
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001003 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1004 if (!adapter->vpd)
1005 return -ENOMEM;
1006
John Allen69d08dc2018-01-18 16:27:58 -06001007 /* Vital Product Data (VPD) */
1008 rc = ibmvnic_get_vpd(adapter);
1009 if (rc) {
1010 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1011 return rc;
1012 }
1013
Thomas Falcon032c5e82015-12-21 11:26:06 -06001014 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001015
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001016 rc = init_napi(adapter);
1017 if (rc)
1018 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001019
Thomas Falcon032c5e82015-12-21 11:26:06 -06001020 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001021
1022 rc = init_rx_pools(netdev);
1023 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001024 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001025
Nathan Fontenotc657e322017-03-30 02:49:06 -04001026 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001027 return rc;
1028}
1029
Nathan Fontenoted651a12017-05-03 14:04:38 -04001030static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001031{
1032 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001033 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001034 int i, rc;
1035
Nathan Fontenot90c80142017-05-03 14:04:32 -04001036 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001037 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001038 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001039
Thomas Falcon032c5e82015-12-21 11:26:06 -06001040 /* We're ready to receive frames, enable the sub-crq interrupts and
1041 * set the logical link state to up
1042 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001043 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001044 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001045 if (prev_state == VNIC_CLOSED)
1046 enable_irq(adapter->rx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001047 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001048 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001049
Nathan Fontenoted651a12017-05-03 14:04:38 -04001050 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001051 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001052 if (prev_state == VNIC_CLOSED)
1053 enable_irq(adapter->tx_scrq[i]->irq);
Thomas Falconf23e0642018-04-15 18:53:36 -05001054 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001055 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001056
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001057 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001058 if (rc) {
1059 for (i = 0; i < adapter->req_rx_queues; i++)
1060 napi_disable(&adapter->napi[i]);
1061 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001062 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001063 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001064
Nathan Fontenoted651a12017-05-03 14:04:38 -04001065 netif_tx_start_all_queues(netdev);
1066
1067 if (prev_state == VNIC_CLOSED) {
1068 for (i = 0; i < adapter->req_rx_queues; i++)
1069 napi_schedule(&adapter->napi[i]);
1070 }
1071
1072 adapter->state = VNIC_OPEN;
1073 return rc;
1074}
1075
1076static int ibmvnic_open(struct net_device *netdev)
1077{
1078 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001079 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001080
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001081 /* If device failover is pending, just set device state and return.
1082 * Device operation will be handled by reset routine.
1083 */
1084 if (adapter->failover_pending) {
1085 adapter->state = VNIC_OPEN;
1086 return 0;
1087 }
1088
Nathan Fontenoted651a12017-05-03 14:04:38 -04001089 if (adapter->state != VNIC_CLOSED) {
1090 rc = ibmvnic_login(netdev);
Juliet Kima5681e22018-11-19 15:59:22 -06001091 if (rc)
Nathan Fontenoted651a12017-05-03 14:04:38 -04001092 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001093
1094 rc = init_resources(adapter);
1095 if (rc) {
1096 netdev_err(netdev, "failed to initialize resources\n");
1097 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001098 return rc;
1099 }
1100 }
1101
1102 rc = __ibmvnic_open(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001103
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001104 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001105}
1106
Thomas Falcond0869c02018-02-13 18:23:43 -06001107static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1108{
1109 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001110 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001111 u64 rx_entries;
1112 int rx_scrqs;
1113 int i, j;
1114
1115 if (!adapter->rx_pool)
1116 return;
1117
Thomas Falcon660e3092018-04-20 14:25:32 -05001118 rx_scrqs = adapter->num_active_rx_pools;
Thomas Falcond0869c02018-02-13 18:23:43 -06001119 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1120
1121 /* Free any remaining skbs in the rx buffer pools */
1122 for (i = 0; i < rx_scrqs; i++) {
1123 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001124 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001125 continue;
1126
1127 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1128 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001129 rx_buff = &rx_pool->rx_buff[j];
1130 if (rx_buff && rx_buff->skb) {
1131 dev_kfree_skb_any(rx_buff->skb);
1132 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001133 }
1134 }
1135 }
1136}
1137
Thomas Falcone9e1e972018-03-16 20:00:30 -05001138static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1139 struct ibmvnic_tx_pool *tx_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001140{
Thomas Falcon637f81d2018-02-26 18:10:57 -06001141 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001142 u64 tx_entries;
Thomas Falcone9e1e972018-03-16 20:00:30 -05001143 int i;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001144
Dan Carpenter050e85c2018-03-23 14:36:15 +03001145 if (!tx_pool || !tx_pool->tx_buff)
Thomas Falcone9e1e972018-03-16 20:00:30 -05001146 return;
1147
1148 tx_entries = tx_pool->num_buffers;
1149
1150 for (i = 0; i < tx_entries; i++) {
1151 tx_buff = &tx_pool->tx_buff[i];
1152 if (tx_buff && tx_buff->skb) {
1153 dev_kfree_skb_any(tx_buff->skb);
1154 tx_buff->skb = NULL;
1155 }
1156 }
1157}
1158
1159static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1160{
1161 int tx_scrqs;
1162 int i;
1163
1164 if (!adapter->tx_pool || !adapter->tso_pool)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001165 return;
1166
Thomas Falcon660e3092018-04-20 14:25:32 -05001167 tx_scrqs = adapter->num_active_tx_pools;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001168
1169 /* Free any remaining skbs in the tx buffer pools */
1170 for (i = 0; i < tx_scrqs; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001171 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Thomas Falcone9e1e972018-03-16 20:00:30 -05001172 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1173 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001174 }
1175}
1176
John Allen6095e592018-03-30 13:44:21 -05001177static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
John Allenea5509f2017-03-17 17:13:43 -05001178{
John Allen6095e592018-03-30 13:44:21 -05001179 struct net_device *netdev = adapter->netdev;
John Allenea5509f2017-03-17 17:13:43 -05001180 int i;
1181
Nathan Fontenot46293b92017-05-03 14:05:02 -04001182 if (adapter->tx_scrq) {
1183 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001184 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001185 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001186 "Disabling tx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001187 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001188 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001189 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001190 }
1191
Nathan Fontenot46293b92017-05-03 14:05:02 -04001192 if (adapter->rx_scrq) {
1193 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001194 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001195 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001196 "Disabling rx_scrq[%d] irq\n", i);
Thomas Falconf23e0642018-04-15 18:53:36 -05001197 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001198 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001199 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001200 }
1201 }
John Allen6095e592018-03-30 13:44:21 -05001202}
1203
1204static void ibmvnic_cleanup(struct net_device *netdev)
1205{
1206 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1207
1208 /* ensure that transmissions are stopped if called by do_reset */
1209 if (adapter->resetting)
1210 netif_tx_disable(netdev);
1211 else
1212 netif_tx_stop_all_queues(netdev);
1213
1214 ibmvnic_napi_disable(adapter);
1215 ibmvnic_disable_irqs(adapter);
1216
Thomas Falcond0869c02018-02-13 18:23:43 -06001217 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001218 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001219}
1220
1221static int __ibmvnic_close(struct net_device *netdev)
1222{
1223 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1224 int rc = 0;
1225
1226 adapter->state = VNIC_CLOSING;
1227 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1228 if (rc)
1229 return rc;
Nathan Fontenot90c80142017-05-03 14:04:32 -04001230 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001231 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001232}
1233
Nathan Fontenoted651a12017-05-03 14:04:38 -04001234static int ibmvnic_close(struct net_device *netdev)
1235{
1236 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1237 int rc;
1238
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001239 /* If device failover is pending, just set device state and return.
1240 * Device operation will be handled by reset routine.
1241 */
1242 if (adapter->failover_pending) {
1243 adapter->state = VNIC_CLOSED;
1244 return 0;
1245 }
1246
Nathan Fontenoted651a12017-05-03 14:04:38 -04001247 rc = __ibmvnic_close(netdev);
Nathan Fontenot30f79622018-04-06 18:37:06 -05001248 ibmvnic_cleanup(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001249
1250 return rc;
1251}
1252
Thomas Falconad7775d2016-04-01 17:20:34 -05001253/**
1254 * build_hdr_data - creates L2/L3/L4 header data buffer
1255 * @hdr_field - bitfield determining needed headers
1256 * @skb - socket buffer
1257 * @hdr_len - array of header lengths
1258 * @tot_len - total length of data
1259 *
1260 * Reads hdr_field to determine which headers are needed by firmware.
1261 * Builds a buffer containing these headers. Saves individual header
1262 * lengths and total buffer length to be used to build descriptors.
1263 */
1264static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1265 int *hdr_len, u8 *hdr_data)
1266{
1267 int len = 0;
1268 u8 *hdr;
1269
Thomas Falconda75e3b2018-03-12 11:51:02 -05001270 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1271 hdr_len[0] = sizeof(struct vlan_ethhdr);
1272 else
1273 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001274
1275 if (skb->protocol == htons(ETH_P_IP)) {
1276 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1277 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1278 hdr_len[2] = tcp_hdrlen(skb);
1279 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1280 hdr_len[2] = sizeof(struct udphdr);
1281 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1282 hdr_len[1] = sizeof(struct ipv6hdr);
1283 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1284 hdr_len[2] = tcp_hdrlen(skb);
1285 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1286 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001287 } else if (skb->protocol == htons(ETH_P_ARP)) {
1288 hdr_len[1] = arp_hdr_len(skb->dev);
1289 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001290 }
1291
1292 memset(hdr_data, 0, 120);
1293 if ((hdr_field >> 6) & 1) {
1294 hdr = skb_mac_header(skb);
1295 memcpy(hdr_data, hdr, hdr_len[0]);
1296 len += hdr_len[0];
1297 }
1298
1299 if ((hdr_field >> 5) & 1) {
1300 hdr = skb_network_header(skb);
1301 memcpy(hdr_data + len, hdr, hdr_len[1]);
1302 len += hdr_len[1];
1303 }
1304
1305 if ((hdr_field >> 4) & 1) {
1306 hdr = skb_transport_header(skb);
1307 memcpy(hdr_data + len, hdr, hdr_len[2]);
1308 len += hdr_len[2];
1309 }
1310 return len;
1311}
1312
1313/**
1314 * create_hdr_descs - create header and header extension descriptors
1315 * @hdr_field - bitfield determining needed headers
1316 * @data - buffer containing header data
1317 * @len - length of data buffer
1318 * @hdr_len - array of individual header lengths
1319 * @scrq_arr - descriptor array
1320 *
1321 * Creates header and, if needed, header extension descriptors and
1322 * places them in a descriptor array, scrq_arr
1323 */
1324
Thomas Falcon2de09682017-10-16 10:02:11 -05001325static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1326 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001327{
1328 union sub_crq hdr_desc;
1329 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001330 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001331 u8 *data, *cur;
1332 int tmp;
1333
1334 while (tmp_len > 0) {
1335 cur = hdr_data + len - tmp_len;
1336
1337 memset(&hdr_desc, 0, sizeof(hdr_desc));
1338 if (cur != hdr_data) {
1339 data = hdr_desc.hdr_ext.data;
1340 tmp = tmp_len > 29 ? 29 : tmp_len;
1341 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1342 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1343 hdr_desc.hdr_ext.len = tmp;
1344 } else {
1345 data = hdr_desc.hdr.data;
1346 tmp = tmp_len > 24 ? 24 : tmp_len;
1347 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1348 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1349 hdr_desc.hdr.len = tmp;
1350 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1351 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1352 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1353 hdr_desc.hdr.flag = hdr_field << 1;
1354 }
1355 memcpy(data, cur, tmp);
1356 tmp_len -= tmp;
1357 *scrq_arr = hdr_desc;
1358 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001359 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001360 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001361
1362 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001363}
1364
1365/**
1366 * build_hdr_descs_arr - build a header descriptor array
1367 * @skb - socket buffer
1368 * @num_entries - number of descriptors to be sent
1369 * @subcrq - first TX descriptor
1370 * @hdr_field - bit field determining which headers will be sent
1371 *
1372 * This function will build a TX descriptor array with applicable
1373 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1374 */
1375
1376static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1377 int *num_entries, u8 hdr_field)
1378{
1379 int hdr_len[3] = {0, 0, 0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001380 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001381 u8 *hdr_data = txbuff->hdr_data;
1382
1383 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1384 txbuff->hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001385 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconad7775d2016-04-01 17:20:34 -05001386 txbuff->indir_arr + 1);
1387}
1388
Thomas Falcon1f247a62018-03-12 11:51:04 -05001389static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1390 struct net_device *netdev)
1391{
1392 /* For some backing devices, mishandling of small packets
1393 * can result in a loss of connection or TX stall. Device
1394 * architects recommend that no packet should be smaller
1395 * than the minimum MTU value provided to the driver, so
1396 * pad any packets to that length
1397 */
1398 if (skb->len < netdev->min_mtu)
1399 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001400
1401 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001402}
1403
YueHaibing94b2bb22018-09-18 14:35:47 +08001404static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001405{
1406 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1407 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001408 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001409 struct device *dev = &adapter->vdev->dev;
1410 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001411 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001412 struct ibmvnic_tx_pool *tx_pool;
1413 unsigned int tx_send_failed = 0;
1414 unsigned int tx_map_failed = 0;
1415 unsigned int tx_dropped = 0;
1416 unsigned int tx_packets = 0;
1417 unsigned int tx_bytes = 0;
1418 dma_addr_t data_dma_addr;
1419 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001420 unsigned long lpar_rc;
1421 union sub_crq tx_crq;
1422 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001423 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001424 unsigned char *dst;
1425 u64 *handle_array;
1426 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001427 u8 proto = 0;
YueHaibing94b2bb22018-09-18 14:35:47 +08001428 netdev_tx_t ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001429
Nathan Fontenoted651a12017-05-03 14:04:38 -04001430 if (adapter->resetting) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001431 if (!netif_subqueue_stopped(netdev, skb))
1432 netif_stop_subqueue(netdev, queue_num);
1433 dev_kfree_skb_any(skb);
1434
Thomas Falcon032c5e82015-12-21 11:26:06 -06001435 tx_send_failed++;
1436 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001437 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001438 goto out;
1439 }
1440
Thomas Falcon7083a452018-03-12 21:05:26 -05001441 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001442 tx_dropped++;
1443 tx_send_failed++;
1444 ret = NETDEV_TX_OK;
1445 goto out;
1446 }
Thomas Falcon06b3e352018-03-16 20:00:28 -05001447 if (skb_is_gso(skb))
1448 tx_pool = &adapter->tso_pool[queue_num];
1449 else
1450 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon1f247a62018-03-12 11:51:04 -05001451
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001452 tx_scrq = adapter->tx_scrq[queue_num];
1453 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1454 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1455 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1456
Thomas Falcon032c5e82015-12-21 11:26:06 -06001457 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001458
Thomas Falcon86b61a52018-03-16 20:00:29 -05001459 if (index == IBMVNIC_INVALID_MAP) {
1460 dev_kfree_skb_any(skb);
1461 tx_send_failed++;
1462 tx_dropped++;
1463 ret = NETDEV_TX_OK;
1464 goto out;
1465 }
1466
1467 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1468
Thomas Falcon06b3e352018-03-16 20:00:28 -05001469 offset = index * tx_pool->buf_size;
1470 dst = tx_pool->long_term_buff.buff + offset;
1471 memset(dst, 0, tx_pool->buf_size);
1472 data_dma_addr = tx_pool->long_term_buff.addr + offset;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001473
Thomas Falcon15482052017-10-17 12:36:54 -05001474 if (skb_shinfo(skb)->nr_frags) {
1475 int cur, i;
1476
1477 /* Copy the head */
1478 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1479 cur = skb_headlen(skb);
1480
1481 /* Copy the frags */
1482 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1483 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1484
1485 memcpy(dst + cur,
1486 page_address(skb_frag_page(frag)) +
1487 frag->page_offset, skb_frag_size(frag));
1488 cur += skb_frag_size(frag);
1489 }
1490 } else {
1491 skb_copy_from_linear_data(skb, dst, skb->len);
1492 }
1493
Thomas Falcon032c5e82015-12-21 11:26:06 -06001494 tx_pool->consumer_index =
Thomas Falcon06b3e352018-03-16 20:00:28 -05001495 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001496
1497 tx_buff = &tx_pool->tx_buff[index];
1498 tx_buff->skb = skb;
1499 tx_buff->data_dma[0] = data_dma_addr;
1500 tx_buff->data_len[0] = skb->len;
1501 tx_buff->index = index;
1502 tx_buff->pool_index = queue_num;
1503 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001504
1505 memset(&tx_crq, 0, sizeof(tx_crq));
1506 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1507 tx_crq.v1.type = IBMVNIC_TX_DESC;
1508 tx_crq.v1.n_crq_elem = 1;
1509 tx_crq.v1.n_sge = 1;
1510 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
Thomas Falcon06b3e352018-03-16 20:00:28 -05001511
Thomas Falconfdb06102017-10-17 12:36:55 -05001512 if (skb_is_gso(skb))
Thomas Falcon06b3e352018-03-16 20:00:28 -05001513 tx_crq.v1.correlator =
1514 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
Thomas Falconfdb06102017-10-17 12:36:55 -05001515 else
Thomas Falcon06b3e352018-03-16 20:00:28 -05001516 tx_crq.v1.correlator = cpu_to_be32(index);
1517 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001518 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1519 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1520
Michał Mirosławe84b4792018-11-07 17:50:52 +01001521 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001522 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1523 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1524 }
1525
1526 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001527 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1528 proto = ip_hdr(skb)->protocol;
1529 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1530 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1531 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001532 }
1533
Thomas Falcona0dca102018-01-18 19:29:48 -06001534 if (proto == IPPROTO_TCP)
1535 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1536 else if (proto == IPPROTO_UDP)
1537 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1538
Thomas Falconad7775d2016-04-01 17:20:34 -05001539 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001540 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001541 hdrs += 2;
1542 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001543 if (skb_is_gso(skb)) {
1544 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1545 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1546 hdrs += 2;
1547 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001548 /* determine if l2/3/4 headers are sent to firmware */
John Allen2fa56a42018-02-09 13:19:46 -06001549 if ((*hdrs >> 7) & 1) {
Thomas Falconad7775d2016-04-01 17:20:34 -05001550 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1551 tx_crq.v1.n_crq_elem = num_entries;
Thomas Falconecba6162018-02-26 18:10:55 -06001552 tx_buff->num_entries = num_entries;
Thomas Falconad7775d2016-04-01 17:20:34 -05001553 tx_buff->indir_arr[0] = tx_crq;
1554 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1555 sizeof(tx_buff->indir_arr),
1556 DMA_TO_DEVICE);
1557 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001558 dev_kfree_skb_any(skb);
1559 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001560 if (!firmware_has_feature(FW_FEATURE_CMO))
1561 dev_err(dev, "tx: unable to map descriptor array\n");
1562 tx_map_failed++;
1563 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001564 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001565 goto tx_err_out;
Thomas Falconad7775d2016-04-01 17:20:34 -05001566 }
John Allen498cd8e2016-04-06 11:49:55 -05001567 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -05001568 (u64)tx_buff->indir_dma,
1569 (u64)num_entries);
1570 } else {
Thomas Falconecba6162018-02-26 18:10:55 -06001571 tx_buff->num_entries = num_entries;
John Allen498cd8e2016-04-06 11:49:55 -05001572 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1573 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001574 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001575 if (lpar_rc != H_SUCCESS) {
Thomas Falcon2d14d372018-07-13 12:03:32 -05001576 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1577 dev_err_ratelimited(dev, "tx: send failed\n");
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001578 dev_kfree_skb_any(skb);
1579 tx_buff->skb = NULL;
1580
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05001581 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1582 /* Disable TX and report carrier off if queue is closed
1583 * or pending failover.
Thomas Falconb8c80b82017-05-26 10:30:42 -04001584 * Firmware guarantees that a signal will be sent to the
1585 * driver, triggering a reset or some other action.
1586 */
1587 netif_tx_stop_all_queues(netdev);
1588 netif_carrier_off(netdev);
1589 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001590
Thomas Falcon032c5e82015-12-21 11:26:06 -06001591 tx_send_failed++;
1592 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001593 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001594 goto tx_err_out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001595 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001596
Thomas Falconffc385b2018-02-18 10:08:41 -06001597 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001598 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001599 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001600 netif_stop_subqueue(netdev, queue_num);
1601 }
1602
Thomas Falcon032c5e82015-12-21 11:26:06 -06001603 tx_packets++;
1604 tx_bytes += skb->len;
1605 txq->trans_start = jiffies;
1606 ret = NETDEV_TX_OK;
Thomas Falcon86b61a52018-03-16 20:00:29 -05001607 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001608
Thomas Falcon86b61a52018-03-16 20:00:29 -05001609tx_err_out:
1610 /* roll back consumer index and map array*/
1611 if (tx_pool->consumer_index == 0)
1612 tx_pool->consumer_index =
1613 tx_pool->num_buffers - 1;
1614 else
1615 tx_pool->consumer_index--;
1616 tx_pool->free_map[tx_pool->consumer_index] = index;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001617out:
1618 netdev->stats.tx_dropped += tx_dropped;
1619 netdev->stats.tx_bytes += tx_bytes;
1620 netdev->stats.tx_packets += tx_packets;
1621 adapter->tx_send_failed += tx_send_failed;
1622 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001623 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1624 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1625 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001626
1627 return ret;
1628}
1629
1630static void ibmvnic_set_multi(struct net_device *netdev)
1631{
1632 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1633 struct netdev_hw_addr *ha;
1634 union ibmvnic_crq crq;
1635
1636 memset(&crq, 0, sizeof(crq));
1637 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1638 crq.request_capability.cmd = REQUEST_CAPABILITY;
1639
1640 if (netdev->flags & IFF_PROMISC) {
1641 if (!adapter->promisc_supported)
1642 return;
1643 } else {
1644 if (netdev->flags & IFF_ALLMULTI) {
1645 /* Accept all multicast */
1646 memset(&crq, 0, sizeof(crq));
1647 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1648 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1649 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1650 ibmvnic_send_crq(adapter, &crq);
1651 } else if (netdev_mc_empty(netdev)) {
1652 /* Reject all multicast */
1653 memset(&crq, 0, sizeof(crq));
1654 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1655 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1656 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1657 ibmvnic_send_crq(adapter, &crq);
1658 } else {
1659 /* Accept one or more multicast(s) */
1660 netdev_for_each_mc_addr(ha, netdev) {
1661 memset(&crq, 0, sizeof(crq));
1662 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1663 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1664 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1665 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1666 ha->addr);
1667 ibmvnic_send_crq(adapter, &crq);
1668 }
1669 }
1670 }
1671}
1672
Thomas Falcon62740e92019-05-09 23:13:43 -05001673static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001674{
1675 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001676 union ibmvnic_crq crq;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001677 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001678
Thomas Falcon62740e92019-05-09 23:13:43 -05001679 if (!is_valid_ether_addr(dev_addr)) {
1680 rc = -EADDRNOTAVAIL;
1681 goto err;
1682 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001683
1684 memset(&crq, 0, sizeof(crq));
1685 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1686 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
Thomas Falcon62740e92019-05-09 23:13:43 -05001687 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
Thomas Falconf8136142018-01-29 13:45:05 -06001688
1689 init_completion(&adapter->fw_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05001690 rc = ibmvnic_send_crq(adapter, &crq);
Thomas Falcon62740e92019-05-09 23:13:43 -05001691 if (rc) {
1692 rc = -EIO;
1693 goto err;
1694 }
1695
Thomas Falconf8136142018-01-29 13:45:05 -06001696 wait_for_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001697 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falcon62740e92019-05-09 23:13:43 -05001698 if (adapter->fw_done_rc) {
1699 rc = -EIO;
1700 goto err;
1701 }
1702
1703 return 0;
1704err:
1705 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1706 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001707}
1708
John Allenc26eba02017-10-26 16:23:25 -05001709static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1710{
1711 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1712 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001713 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001714
Thomas Falcon62740e92019-05-09 23:13:43 -05001715 rc = 0;
1716 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1717 if (adapter->state != VNIC_PROBED)
1718 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
John Allenc26eba02017-10-26 16:23:25 -05001719
Thomas Falconf8136142018-01-29 13:45:05 -06001720 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001721}
1722
Nathan Fontenoted651a12017-05-03 14:04:38 -04001723/**
1724 * do_reset returns zero if we are able to keep processing reset events, or
1725 * non-zero if we hit a fatal error and must halt.
1726 */
1727static int do_reset(struct ibmvnic_adapter *adapter,
1728 struct ibmvnic_rwi *rwi, u32 reset_state)
1729{
John Allen896d8692018-01-18 16:26:31 -06001730 u64 old_num_rx_queues, old_num_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001731 u64 old_num_rx_slots, old_num_tx_slots;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001732 struct net_device *netdev = adapter->netdev;
1733 int i, rc;
1734
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001735 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1736 rwi->reset_reason);
1737
Nathan Fontenoted651a12017-05-03 14:04:38 -04001738 netif_carrier_off(netdev);
1739 adapter->reset_reason = rwi->reset_reason;
1740
John Allen896d8692018-01-18 16:26:31 -06001741 old_num_rx_queues = adapter->req_rx_queues;
1742 old_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001743 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1744 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
John Allen896d8692018-01-18 16:26:31 -06001745
Nathan Fontenot30f79622018-04-06 18:37:06 -05001746 ibmvnic_cleanup(netdev);
1747
Thomas Falcon1f946082019-06-07 16:03:53 -05001748 if (reset_state == VNIC_OPEN &&
1749 adapter->reset_reason != VNIC_RESET_MOBILITY &&
Nathan Fontenot30f79622018-04-06 18:37:06 -05001750 adapter->reset_reason != VNIC_RESET_FAILOVER) {
Thomas Falcon18b8d6b2018-03-07 17:51:47 -06001751 rc = __ibmvnic_close(netdev);
1752 if (rc)
1753 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001754 }
1755
John Allenc26eba02017-10-26 16:23:25 -05001756 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1757 adapter->wait_for_reset) {
1758 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06001759 release_sub_crqs(adapter, 1);
John Allenc26eba02017-10-26 16:23:25 -05001760 release_crq_queue(adapter);
1761 }
1762
John Allen8cb31cf2017-05-26 10:30:37 -04001763 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1764 /* remove the closed state so when we call open it appears
1765 * we are coming from the probed state.
1766 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001767 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04001768
Nathan Fontenot30f79622018-04-06 18:37:06 -05001769 if (adapter->wait_for_reset) {
1770 rc = init_crq_queue(adapter);
1771 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1772 rc = ibmvnic_reenable_crq_queue(adapter);
1773 release_sub_crqs(adapter, 1);
1774 } else {
1775 rc = ibmvnic_reset_crq(adapter);
1776 if (!rc)
1777 rc = vio_enable_interrupts(adapter->vdev);
1778 }
1779
1780 if (rc) {
1781 netdev_err(adapter->netdev,
1782 "Couldn't initialize crq. rc=%d\n", rc);
1783 return rc;
1784 }
1785
Thomas Falcon8a348452018-05-23 13:38:00 -05001786 rc = ibmvnic_reset_init(adapter);
John Allen8cb31cf2017-05-26 10:30:37 -04001787 if (rc)
John Allen2a1bf512017-10-26 16:24:15 -05001788 return IBMVNIC_INIT_FAILED;
John Allen8cb31cf2017-05-26 10:30:37 -04001789
1790 /* If the adapter was in PROBE state prior to the reset,
1791 * exit here.
1792 */
1793 if (reset_state == VNIC_PROBED)
1794 return 0;
1795
1796 rc = ibmvnic_login(netdev);
1797 if (rc) {
John Allen3578a7e2018-07-16 10:29:30 -05001798 adapter->state = reset_state;
1799 return rc;
John Allen8cb31cf2017-05-26 10:30:37 -04001800 }
1801
John Allenc26eba02017-10-26 16:23:25 -05001802 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1803 adapter->wait_for_reset) {
1804 rc = init_resources(adapter);
1805 if (rc)
1806 return rc;
John Allen896d8692018-01-18 16:26:31 -06001807 } else if (adapter->req_rx_queues != old_num_rx_queues ||
Thomas Falcon5bf032e2018-11-21 11:17:59 -06001808 adapter->req_tx_queues != old_num_tx_queues ||
1809 adapter->req_rx_add_entries_per_subcrq !=
1810 old_num_rx_slots ||
1811 adapter->req_tx_entries_per_subcrq !=
1812 old_num_tx_slots) {
John Allen896d8692018-01-18 16:26:31 -06001813 release_rx_pools(adapter);
1814 release_tx_pools(adapter);
Juliet Kima5681e22018-11-19 15:59:22 -06001815 release_napi(adapter);
1816 release_vpd_data(adapter);
1817
1818 rc = init_resources(adapter);
Thomas Falconf611a5b2018-08-30 13:19:53 -05001819 if (rc)
1820 return rc;
Nathan Fontenotd9043c12018-02-19 13:30:14 -06001821
John Allenc26eba02017-10-26 16:23:25 -05001822 } else {
1823 rc = reset_tx_pools(adapter);
1824 if (rc)
1825 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04001826
John Allenc26eba02017-10-26 16:23:25 -05001827 rc = reset_rx_pools(adapter);
1828 if (rc)
1829 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001830 }
Thomas Falcon134bbe72018-05-16 15:49:04 -05001831 ibmvnic_disable_irqs(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001832 }
John Allene676d812018-03-14 10:41:29 -05001833 adapter->state = VNIC_CLOSED;
1834
1835 if (reset_state == VNIC_CLOSED)
1836 return 0;
1837
Nathan Fontenoted651a12017-05-03 14:04:38 -04001838 rc = __ibmvnic_open(netdev);
1839 if (rc) {
1840 if (list_empty(&adapter->rwi_list))
1841 adapter->state = VNIC_CLOSED;
1842 else
1843 adapter->state = reset_state;
1844
1845 return 0;
1846 }
1847
Nathan Fontenoted651a12017-05-03 14:04:38 -04001848 /* kick napi */
1849 for (i = 0; i < adapter->req_rx_queues; i++)
1850 napi_schedule(&adapter->napi[i]);
1851
Nathan Fontenotebc701b72018-04-11 10:09:38 -05001852 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1853 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
Thomas Falcon986103e2018-11-30 10:59:08 -06001854 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04001855
Nathan Fontenoted651a12017-05-03 14:04:38 -04001856 return 0;
1857}
1858
Thomas Falcon2770a792018-05-23 13:38:02 -05001859static int do_hard_reset(struct ibmvnic_adapter *adapter,
1860 struct ibmvnic_rwi *rwi, u32 reset_state)
1861{
1862 struct net_device *netdev = adapter->netdev;
1863 int rc;
1864
1865 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1866 rwi->reset_reason);
1867
1868 netif_carrier_off(netdev);
1869 adapter->reset_reason = rwi->reset_reason;
1870
1871 ibmvnic_cleanup(netdev);
1872 release_resources(adapter);
1873 release_sub_crqs(adapter, 0);
1874 release_crq_queue(adapter);
1875
1876 /* remove the closed state so when we call open it appears
1877 * we are coming from the probed state.
1878 */
1879 adapter->state = VNIC_PROBED;
1880
Thomas Falconbbd669a2019-04-04 18:58:26 -05001881 reinit_completion(&adapter->init_done);
Thomas Falcon2770a792018-05-23 13:38:02 -05001882 rc = init_crq_queue(adapter);
1883 if (rc) {
1884 netdev_err(adapter->netdev,
1885 "Couldn't initialize crq. rc=%d\n", rc);
1886 return rc;
1887 }
1888
1889 rc = ibmvnic_init(adapter);
1890 if (rc)
1891 return rc;
1892
1893 /* If the adapter was in PROBE state prior to the reset,
1894 * exit here.
1895 */
1896 if (reset_state == VNIC_PROBED)
1897 return 0;
1898
1899 rc = ibmvnic_login(netdev);
1900 if (rc) {
1901 adapter->state = VNIC_PROBED;
1902 return 0;
1903 }
Juliet Kima5681e22018-11-19 15:59:22 -06001904
1905 rc = init_resources(adapter);
Thomas Falcon2770a792018-05-23 13:38:02 -05001906 if (rc)
1907 return rc;
1908
1909 ibmvnic_disable_irqs(adapter);
1910 adapter->state = VNIC_CLOSED;
1911
1912 if (reset_state == VNIC_CLOSED)
1913 return 0;
1914
1915 rc = __ibmvnic_open(netdev);
1916 if (rc) {
1917 if (list_empty(&adapter->rwi_list))
1918 adapter->state = VNIC_CLOSED;
1919 else
1920 adapter->state = reset_state;
1921
1922 return 0;
1923 }
1924
Thomas Falcon2770a792018-05-23 13:38:02 -05001925 return 0;
1926}
1927
Nathan Fontenoted651a12017-05-03 14:04:38 -04001928static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1929{
1930 struct ibmvnic_rwi *rwi;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06001931 unsigned long flags;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001932
Thomas Falcon6c5c7482018-12-10 15:22:22 -06001933 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001934
1935 if (!list_empty(&adapter->rwi_list)) {
1936 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1937 list);
1938 list_del(&rwi->list);
1939 } else {
1940 rwi = NULL;
1941 }
1942
Thomas Falcon6c5c7482018-12-10 15:22:22 -06001943 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001944 return rwi;
1945}
1946
1947static void free_all_rwi(struct ibmvnic_adapter *adapter)
1948{
1949 struct ibmvnic_rwi *rwi;
1950
1951 rwi = get_next_rwi(adapter);
1952 while (rwi) {
1953 kfree(rwi);
1954 rwi = get_next_rwi(adapter);
1955 }
1956}
1957
1958static void __ibmvnic_reset(struct work_struct *work)
1959{
1960 struct ibmvnic_rwi *rwi;
1961 struct ibmvnic_adapter *adapter;
Juliet Kima5681e22018-11-19 15:59:22 -06001962 bool we_lock_rtnl = false;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001963 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05001964 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001965
1966 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001967
Juliet Kima5681e22018-11-19 15:59:22 -06001968 /* netif_set_real_num_xx_queues needs to take rtnl lock here
1969 * unless wait_for_reset is set, in which case the rtnl lock
1970 * has already been taken before initializing the reset
1971 */
1972 if (!adapter->wait_for_reset) {
1973 rtnl_lock();
1974 we_lock_rtnl = true;
1975 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04001976 reset_state = adapter->state;
1977
1978 rwi = get_next_rwi(adapter);
1979 while (rwi) {
Thomas Falcon2770a792018-05-23 13:38:02 -05001980 if (adapter->force_reset_recovery) {
1981 adapter->force_reset_recovery = false;
1982 rc = do_hard_reset(adapter, rwi, reset_state);
1983 } else {
1984 rc = do_reset(adapter, rwi, reset_state);
1985 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04001986 kfree(rwi);
Thomas Falcon2770a792018-05-23 13:38:02 -05001987 if (rc && rc != IBMVNIC_INIT_FAILED &&
1988 !adapter->force_reset_recovery)
Nathan Fontenoted651a12017-05-03 14:04:38 -04001989 break;
1990
1991 rwi = get_next_rwi(adapter);
1992 }
1993
John Allenc26eba02017-10-26 16:23:25 -05001994 if (adapter->wait_for_reset) {
1995 adapter->wait_for_reset = false;
1996 adapter->reset_done_rc = rc;
1997 complete(&adapter->reset_done);
1998 }
1999
Nathan Fontenoted651a12017-05-03 14:04:38 -04002000 if (rc) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002001 netdev_dbg(adapter->netdev, "Reset failed\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04002002 free_all_rwi(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002003 }
2004
2005 adapter->resetting = false;
Juliet Kima5681e22018-11-19 15:59:22 -06002006 if (we_lock_rtnl)
2007 rtnl_unlock();
Nathan Fontenoted651a12017-05-03 14:04:38 -04002008}
2009
Thomas Falconaf894d22018-04-06 18:37:04 -05002010static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2011 enum ibmvnic_reset_reason reason)
Nathan Fontenoted651a12017-05-03 14:04:38 -04002012{
Thomas Falcon2770a792018-05-23 13:38:02 -05002013 struct list_head *entry, *tmp_entry;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002014 struct ibmvnic_rwi *rwi, *tmp;
2015 struct net_device *netdev = adapter->netdev;
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002016 unsigned long flags;
Thomas Falconaf894d22018-04-06 18:37:04 -05002017 int ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002018
2019 if (adapter->state == VNIC_REMOVING ||
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002020 adapter->state == VNIC_REMOVED ||
2021 adapter->failover_pending) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002022 ret = EBUSY;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05002023 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002024 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002025 }
2026
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002027 if (adapter->state == VNIC_PROBING) {
2028 netdev_warn(netdev, "Adapter reset during probe\n");
Thomas Falconaf894d22018-04-06 18:37:04 -05002029 ret = adapter->init_done_rc = EAGAIN;
2030 goto err;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04002031 }
2032
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002033 spin_lock_irqsave(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002034
2035 list_for_each(entry, &adapter->rwi_list) {
2036 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2037 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002038 netdev_dbg(netdev, "Skipping matching reset\n");
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002039 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falconaf894d22018-04-06 18:37:04 -05002040 ret = EBUSY;
2041 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002042 }
2043 }
2044
Thomas Falcon1d1bbc32018-12-10 15:22:23 -06002045 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002046 if (!rwi) {
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002047 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002048 ibmvnic_close(netdev);
Thomas Falconaf894d22018-04-06 18:37:04 -05002049 ret = ENOMEM;
2050 goto err;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002051 }
Thomas Falcon2770a792018-05-23 13:38:02 -05002052 /* if we just received a transport event,
2053 * flush reset queue and process this reset
2054 */
2055 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2056 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2057 list_del(entry);
2058 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04002059 rwi->reset_reason = reason;
2060 list_add_tail(&rwi->list, &adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06002061 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
Thomas Falcon06e43d72018-05-23 13:38:01 -05002062 adapter->resetting = true;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002063 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002064 schedule_work(&adapter->ibmvnic_reset);
Thomas Falconaf894d22018-04-06 18:37:04 -05002065
2066 return 0;
2067err:
2068 if (adapter->wait_for_reset)
2069 adapter->wait_for_reset = false;
2070 return -ret;
Nathan Fontenoted651a12017-05-03 14:04:38 -04002071}
2072
Thomas Falcon032c5e82015-12-21 11:26:06 -06002073static void ibmvnic_tx_timeout(struct net_device *dev)
2074{
2075 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002076
Nathan Fontenoted651a12017-05-03 14:04:38 -04002077 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002078}
2079
2080static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2081 struct ibmvnic_rx_buff *rx_buff)
2082{
2083 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2084
2085 rx_buff->skb = NULL;
2086
2087 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2088 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2089
2090 atomic_dec(&pool->available);
2091}
2092
2093static int ibmvnic_poll(struct napi_struct *napi, int budget)
2094{
2095 struct net_device *netdev = napi->dev;
2096 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2097 int scrq_num = (int)(napi - adapter->napi);
2098 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04002099
Thomas Falcon032c5e82015-12-21 11:26:06 -06002100restart_poll:
2101 while (frames_processed < budget) {
2102 struct sk_buff *skb;
2103 struct ibmvnic_rx_buff *rx_buff;
2104 union sub_crq *next;
2105 u32 length;
2106 u16 offset;
2107 u8 flags = 0;
2108
John Allen34686562018-02-06 16:21:49 -06002109 if (unlikely(adapter->resetting &&
2110 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05002111 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2112 napi_complete_done(napi, frames_processed);
2113 return frames_processed;
2114 }
2115
Thomas Falcon032c5e82015-12-21 11:26:06 -06002116 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2117 break;
2118 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2119 rx_buff =
2120 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2121 rx_comp.correlator);
2122 /* do error checking */
2123 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05002124 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2125 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002126 /* free the entry */
2127 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06002128 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002129 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04002130 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06002131 } else if (!rx_buff->skb) {
2132 /* free the entry */
2133 next->rx_comp.first = 0;
2134 remove_buff_from_pool(adapter, rx_buff);
2135 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002136 }
2137
2138 length = be32_to_cpu(next->rx_comp.len);
2139 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2140 flags = next->rx_comp.flags;
2141 skb = rx_buff->skb;
2142 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2143 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002144
2145 /* VLAN Header has been stripped by the system firmware and
2146 * needs to be inserted by the driver
2147 */
2148 if (adapter->rx_vlan_header_insertion &&
2149 (flags & IBMVNIC_VLAN_STRIPPED))
2150 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2151 ntohs(next->rx_comp.vlan_tci));
2152
Thomas Falcon032c5e82015-12-21 11:26:06 -06002153 /* free the entry */
2154 next->rx_comp.first = 0;
2155 remove_buff_from_pool(adapter, rx_buff);
2156
2157 skb_put(skb, length);
2158 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04002159 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002160
2161 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2162 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2163 skb->ip_summed = CHECKSUM_UNNECESSARY;
2164 }
2165
2166 length = skb->len;
2167 napi_gro_receive(napi, skb); /* send it up */
2168 netdev->stats.rx_packets++;
2169 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05002170 adapter->rx_stats_buffers[scrq_num].packets++;
2171 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002172 frames_processed++;
2173 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04002174
2175 if (adapter->state != VNIC_CLOSING)
2176 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002177
2178 if (frames_processed < budget) {
2179 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002180 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002181 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2182 napi_reschedule(napi)) {
2183 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2184 goto restart_poll;
2185 }
2186 }
2187 return frames_processed;
2188}
2189
John Allenc26eba02017-10-26 16:23:25 -05002190static int wait_for_reset(struct ibmvnic_adapter *adapter)
2191{
Thomas Falconaf894d22018-04-06 18:37:04 -05002192 int rc, ret;
2193
John Allenc26eba02017-10-26 16:23:25 -05002194 adapter->fallback.mtu = adapter->req_mtu;
2195 adapter->fallback.rx_queues = adapter->req_rx_queues;
2196 adapter->fallback.tx_queues = adapter->req_tx_queues;
2197 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2198 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2199
2200 init_completion(&adapter->reset_done);
John Allenc26eba02017-10-26 16:23:25 -05002201 adapter->wait_for_reset = true;
Thomas Falconaf894d22018-04-06 18:37:04 -05002202 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2203 if (rc)
2204 return rc;
John Allenc26eba02017-10-26 16:23:25 -05002205 wait_for_completion(&adapter->reset_done);
2206
Thomas Falconaf894d22018-04-06 18:37:04 -05002207 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002208 if (adapter->reset_done_rc) {
Thomas Falconaf894d22018-04-06 18:37:04 -05002209 ret = -EIO;
John Allenc26eba02017-10-26 16:23:25 -05002210 adapter->desired.mtu = adapter->fallback.mtu;
2211 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2212 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2213 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2214 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2215
2216 init_completion(&adapter->reset_done);
Thomas Falconaf894d22018-04-06 18:37:04 -05002217 adapter->wait_for_reset = true;
2218 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2219 if (rc)
2220 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002221 wait_for_completion(&adapter->reset_done);
2222 }
2223 adapter->wait_for_reset = false;
2224
Thomas Falconaf894d22018-04-06 18:37:04 -05002225 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002226}
2227
John Allen3a807b72017-06-06 16:55:52 -05002228static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2229{
John Allenc26eba02017-10-26 16:23:25 -05002230 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2231
2232 adapter->desired.mtu = new_mtu + ETH_HLEN;
2233
2234 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002235}
2236
Thomas Falconf10b09e2018-03-12 11:51:05 -05002237static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2238 struct net_device *dev,
2239 netdev_features_t features)
2240{
2241 /* Some backing hardware adapters can not
2242 * handle packets with a MSS less than 224
2243 * or with only one segment.
2244 */
2245 if (skb_is_gso(skb)) {
2246 if (skb_shinfo(skb)->gso_size < 224 ||
2247 skb_shinfo(skb)->gso_segs == 1)
2248 features &= ~NETIF_F_GSO_MASK;
2249 }
2250
2251 return features;
2252}
2253
Thomas Falcon032c5e82015-12-21 11:26:06 -06002254static const struct net_device_ops ibmvnic_netdev_ops = {
2255 .ndo_open = ibmvnic_open,
2256 .ndo_stop = ibmvnic_close,
2257 .ndo_start_xmit = ibmvnic_xmit,
2258 .ndo_set_rx_mode = ibmvnic_set_multi,
2259 .ndo_set_mac_address = ibmvnic_set_mac,
2260 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002261 .ndo_tx_timeout = ibmvnic_tx_timeout,
John Allen3a807b72017-06-06 16:55:52 -05002262 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002263 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002264};
2265
2266/* ethtool functions */
2267
Philippe Reynes8a433792017-01-07 22:37:29 +01002268static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2269 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002270{
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002271 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2272 int rc;
Philippe Reynes8a433792017-01-07 22:37:29 +01002273
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03002274 rc = send_query_phys_parms(adapter);
2275 if (rc) {
2276 adapter->speed = SPEED_UNKNOWN;
2277 adapter->duplex = DUPLEX_UNKNOWN;
2278 }
2279 cmd->base.speed = adapter->speed;
2280 cmd->base.duplex = adapter->duplex;
Philippe Reynes8a433792017-01-07 22:37:29 +01002281 cmd->base.port = PORT_FIBRE;
2282 cmd->base.phy_address = 0;
2283 cmd->base.autoneg = AUTONEG_ENABLE;
2284
Thomas Falcon032c5e82015-12-21 11:26:06 -06002285 return 0;
2286}
2287
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002288static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002289 struct ethtool_drvinfo *info)
2290{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002291 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2292
Thomas Falcon032c5e82015-12-21 11:26:06 -06002293 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2294 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002295 strlcpy(info->fw_version, adapter->fw_version,
2296 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002297}
2298
2299static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2300{
2301 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2302
2303 return adapter->msg_enable;
2304}
2305
2306static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2307{
2308 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2309
2310 adapter->msg_enable = data;
2311}
2312
2313static u32 ibmvnic_get_link(struct net_device *netdev)
2314{
2315 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2316
2317 /* Don't need to send a query because we request a logical link up at
2318 * init and then we wait for link state indications
2319 */
2320 return adapter->logical_link_state;
2321}
2322
2323static void ibmvnic_get_ringparam(struct net_device *netdev,
2324 struct ethtool_ringparam *ring)
2325{
John Allenbc131b32017-08-02 16:46:30 -05002326 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2327
Thomas Falcon723ad912018-09-28 18:38:26 -05002328 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2329 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2330 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2331 } else {
2332 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2333 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2334 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002335 ring->rx_mini_max_pending = 0;
2336 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002337 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2338 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002339 ring->rx_mini_pending = 0;
2340 ring->rx_jumbo_pending = 0;
2341}
2342
John Allenc26eba02017-10-26 16:23:25 -05002343static int ibmvnic_set_ringparam(struct net_device *netdev,
2344 struct ethtool_ringparam *ring)
2345{
2346 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002347 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002348
Thomas Falcon723ad912018-09-28 18:38:26 -05002349 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002350 adapter->desired.rx_entries = ring->rx_pending;
2351 adapter->desired.tx_entries = ring->tx_pending;
2352
Thomas Falcon723ad912018-09-28 18:38:26 -05002353 ret = wait_for_reset(adapter);
2354
2355 if (!ret &&
2356 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2357 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2358 netdev_info(netdev,
2359 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2360 ring->rx_pending, ring->tx_pending,
2361 adapter->req_rx_add_entries_per_subcrq,
2362 adapter->req_tx_entries_per_subcrq);
2363 return ret;
John Allenc26eba02017-10-26 16:23:25 -05002364}
2365
John Allenc2dbeb62017-08-02 16:47:17 -05002366static void ibmvnic_get_channels(struct net_device *netdev,
2367 struct ethtool_channels *channels)
2368{
2369 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2370
Thomas Falcon723ad912018-09-28 18:38:26 -05002371 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2372 channels->max_rx = adapter->max_rx_queues;
2373 channels->max_tx = adapter->max_tx_queues;
2374 } else {
2375 channels->max_rx = IBMVNIC_MAX_QUEUES;
2376 channels->max_tx = IBMVNIC_MAX_QUEUES;
2377 }
2378
John Allenc2dbeb62017-08-02 16:47:17 -05002379 channels->max_other = 0;
2380 channels->max_combined = 0;
2381 channels->rx_count = adapter->req_rx_queues;
2382 channels->tx_count = adapter->req_tx_queues;
2383 channels->other_count = 0;
2384 channels->combined_count = 0;
2385}
2386
John Allenc26eba02017-10-26 16:23:25 -05002387static int ibmvnic_set_channels(struct net_device *netdev,
2388 struct ethtool_channels *channels)
2389{
2390 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon723ad912018-09-28 18:38:26 -05002391 int ret;
John Allenc26eba02017-10-26 16:23:25 -05002392
Thomas Falcon723ad912018-09-28 18:38:26 -05002393 ret = 0;
John Allenc26eba02017-10-26 16:23:25 -05002394 adapter->desired.rx_queues = channels->rx_count;
2395 adapter->desired.tx_queues = channels->tx_count;
2396
Thomas Falcon723ad912018-09-28 18:38:26 -05002397 ret = wait_for_reset(adapter);
2398
2399 if (!ret &&
2400 (adapter->req_rx_queues != channels->rx_count ||
2401 adapter->req_tx_queues != channels->tx_count))
2402 netdev_info(netdev,
2403 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2404 channels->rx_count, channels->tx_count,
2405 adapter->req_rx_queues, adapter->req_tx_queues);
2406 return ret;
2407
John Allenc26eba02017-10-26 16:23:25 -05002408}
2409
Thomas Falcon032c5e82015-12-21 11:26:06 -06002410static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2411{
John Allen3d52b592017-08-02 16:44:14 -05002412 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002413 int i;
2414
Thomas Falcon723ad912018-09-28 18:38:26 -05002415 switch (stringset) {
2416 case ETH_SS_STATS:
2417 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2418 i++, data += ETH_GSTRING_LEN)
2419 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2420
2421 for (i = 0; i < adapter->req_tx_queues; i++) {
2422 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2423 data += ETH_GSTRING_LEN;
2424
2425 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2426 data += ETH_GSTRING_LEN;
2427
2428 snprintf(data, ETH_GSTRING_LEN,
2429 "tx%d_dropped_packets", i);
2430 data += ETH_GSTRING_LEN;
2431 }
2432
2433 for (i = 0; i < adapter->req_rx_queues; i++) {
2434 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2435 data += ETH_GSTRING_LEN;
2436
2437 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2438 data += ETH_GSTRING_LEN;
2439
2440 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2441 data += ETH_GSTRING_LEN;
2442 }
2443 break;
2444
2445 case ETH_SS_PRIV_FLAGS:
2446 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2447 strcpy(data + i * ETH_GSTRING_LEN,
2448 ibmvnic_priv_flags[i]);
2449 break;
2450 default:
Thomas Falcon032c5e82015-12-21 11:26:06 -06002451 return;
John Allen3d52b592017-08-02 16:44:14 -05002452 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002453}
2454
2455static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2456{
John Allen3d52b592017-08-02 16:44:14 -05002457 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2458
Thomas Falcon032c5e82015-12-21 11:26:06 -06002459 switch (sset) {
2460 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002461 return ARRAY_SIZE(ibmvnic_stats) +
2462 adapter->req_tx_queues * NUM_TX_STATS +
2463 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon723ad912018-09-28 18:38:26 -05002464 case ETH_SS_PRIV_FLAGS:
2465 return ARRAY_SIZE(ibmvnic_priv_flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002466 default:
2467 return -EOPNOTSUPP;
2468 }
2469}
2470
2471static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2472 struct ethtool_stats *stats, u64 *data)
2473{
2474 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2475 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002476 int i, j;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002477 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002478
2479 memset(&crq, 0, sizeof(crq));
2480 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2481 crq.request_statistics.cmd = REQUEST_STATISTICS;
2482 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2483 crq.request_statistics.len =
2484 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002485
2486 /* Wait for data to be written */
2487 init_completion(&adapter->stats_done);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05002488 rc = ibmvnic_send_crq(adapter, &crq);
2489 if (rc)
2490 return;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002491 wait_for_completion(&adapter->stats_done);
2492
2493 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002494 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2495 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002496
2497 for (j = 0; j < adapter->req_tx_queues; j++) {
2498 data[i] = adapter->tx_stats_buffers[j].packets;
2499 i++;
2500 data[i] = adapter->tx_stats_buffers[j].bytes;
2501 i++;
2502 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2503 i++;
2504 }
2505
2506 for (j = 0; j < adapter->req_rx_queues; j++) {
2507 data[i] = adapter->rx_stats_buffers[j].packets;
2508 i++;
2509 data[i] = adapter->rx_stats_buffers[j].bytes;
2510 i++;
2511 data[i] = adapter->rx_stats_buffers[j].interrupts;
2512 i++;
2513 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002514}
2515
Thomas Falcon723ad912018-09-28 18:38:26 -05002516static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2517{
2518 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2519
2520 return adapter->priv_flags;
2521}
2522
2523static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2524{
2525 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2526 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2527
2528 if (which_maxes)
2529 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2530 else
2531 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2532
2533 return 0;
2534}
Thomas Falcon032c5e82015-12-21 11:26:06 -06002535static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002536 .get_drvinfo = ibmvnic_get_drvinfo,
2537 .get_msglevel = ibmvnic_get_msglevel,
2538 .set_msglevel = ibmvnic_set_msglevel,
2539 .get_link = ibmvnic_get_link,
2540 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002541 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002542 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002543 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002544 .get_strings = ibmvnic_get_strings,
2545 .get_sset_count = ibmvnic_get_sset_count,
2546 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002547 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon723ad912018-09-28 18:38:26 -05002548 .get_priv_flags = ibmvnic_get_priv_flags,
2549 .set_priv_flags = ibmvnic_set_priv_flags,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002550};
2551
2552/* Routines for managing CRQs/sCRQs */
2553
Nathan Fontenot57a49432017-05-26 10:31:12 -04002554static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2555 struct ibmvnic_sub_crq_queue *scrq)
2556{
2557 int rc;
2558
2559 if (scrq->irq) {
2560 free_irq(scrq->irq, scrq);
2561 irq_dispose_mapping(scrq->irq);
2562 scrq->irq = 0;
2563 }
2564
Thomas Falconc8b2ad02017-06-14 23:50:07 -05002565 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
Thomas Falcon41f71462018-04-06 18:37:03 -05002566 atomic_set(&scrq->used, 0);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002567 scrq->cur = 0;
2568
2569 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2570 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2571 return rc;
2572}
2573
2574static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2575{
2576 int i, rc;
2577
2578 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002579 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002580 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2581 if (rc)
2582 return rc;
2583 }
2584
2585 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002586 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002587 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2588 if (rc)
2589 return rc;
2590 }
2591
Nathan Fontenot57a49432017-05-26 10:31:12 -04002592 return rc;
2593}
2594
Thomas Falcon032c5e82015-12-21 11:26:06 -06002595static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002596 struct ibmvnic_sub_crq_queue *scrq,
2597 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002598{
2599 struct device *dev = &adapter->vdev->dev;
2600 long rc;
2601
2602 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2603
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002604 if (do_h_free) {
2605 /* Close the sub-crqs */
2606 do {
2607 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2608 adapter->vdev->unit_address,
2609 scrq->crq_num);
2610 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002611
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002612 if (rc) {
2613 netdev_err(adapter->netdev,
2614 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2615 scrq->crq_num, rc);
2616 }
Thomas Falconffa73852017-04-19 13:44:29 -04002617 }
2618
Thomas Falcon032c5e82015-12-21 11:26:06 -06002619 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2620 DMA_BIDIRECTIONAL);
2621 free_pages((unsigned long)scrq->msgs, 2);
2622 kfree(scrq);
2623}
2624
2625static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2626 *adapter)
2627{
2628 struct device *dev = &adapter->vdev->dev;
2629 struct ibmvnic_sub_crq_queue *scrq;
2630 int rc;
2631
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002632 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002633 if (!scrq)
2634 return NULL;
2635
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04002636 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002637 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002638 if (!scrq->msgs) {
2639 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2640 goto zero_page_failed;
2641 }
2642
2643 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2644 DMA_BIDIRECTIONAL);
2645 if (dma_mapping_error(dev, scrq->msg_token)) {
2646 dev_warn(dev, "Couldn't map crq queue messages page\n");
2647 goto map_failed;
2648 }
2649
2650 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2651 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2652
2653 if (rc == H_RESOURCE)
2654 rc = ibmvnic_reset_crq(adapter);
2655
2656 if (rc == H_CLOSED) {
2657 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2658 } else if (rc) {
2659 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2660 goto reg_failed;
2661 }
2662
Thomas Falcon032c5e82015-12-21 11:26:06 -06002663 scrq->adapter = adapter;
2664 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002665 spin_lock_init(&scrq->lock);
2666
2667 netdev_dbg(adapter->netdev,
2668 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2669 scrq->crq_num, scrq->hw_irq, scrq->irq);
2670
2671 return scrq;
2672
Thomas Falcon032c5e82015-12-21 11:26:06 -06002673reg_failed:
2674 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2675 DMA_BIDIRECTIONAL);
2676map_failed:
2677 free_pages((unsigned long)scrq->msgs, 2);
2678zero_page_failed:
2679 kfree(scrq);
2680
2681 return NULL;
2682}
2683
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002684static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002685{
2686 int i;
2687
2688 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002689 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002690 if (!adapter->tx_scrq[i])
2691 continue;
2692
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002693 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2694 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002695 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002696 free_irq(adapter->tx_scrq[i]->irq,
2697 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002698 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002699 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002700 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002701
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002702 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2703 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002704 }
2705
Nathan Fontenot9501df32017-03-15 23:38:07 -04002706 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002707 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002708 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002709 }
2710
2711 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002712 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002713 if (!adapter->rx_scrq[i])
2714 continue;
2715
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002716 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2717 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002718 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002719 free_irq(adapter->rx_scrq[i]->irq,
2720 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002721 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002722 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002723 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002724
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002725 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2726 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002727 }
2728
Nathan Fontenot9501df32017-03-15 23:38:07 -04002729 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002730 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002731 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002732 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002733}
2734
2735static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2736 struct ibmvnic_sub_crq_queue *scrq)
2737{
2738 struct device *dev = &adapter->vdev->dev;
2739 unsigned long rc;
2740
2741 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2742 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2743 if (rc)
2744 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2745 scrq->hw_irq, rc);
2746 return rc;
2747}
2748
2749static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2750 struct ibmvnic_sub_crq_queue *scrq)
2751{
2752 struct device *dev = &adapter->vdev->dev;
2753 unsigned long rc;
2754
2755 if (scrq->hw_irq > 0x100000000ULL) {
2756 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2757 return 1;
2758 }
2759
Nathan Fontenot73f9d362018-05-22 11:21:10 -05002760 if (adapter->resetting &&
2761 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2762 u64 val = (0xff000000) | scrq->hw_irq;
2763
2764 rc = plpar_hcall_norets(H_EOI, val);
2765 if (rc)
2766 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2767 val, rc);
2768 }
Thomas Falconf23e0642018-04-15 18:53:36 -05002769
Thomas Falcon032c5e82015-12-21 11:26:06 -06002770 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2771 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2772 if (rc)
2773 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2774 scrq->hw_irq, rc);
2775 return rc;
2776}
2777
2778static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2779 struct ibmvnic_sub_crq_queue *scrq)
2780{
2781 struct device *dev = &adapter->vdev->dev;
Thomas Falcon06b3e352018-03-16 20:00:28 -05002782 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002783 struct ibmvnic_tx_buff *txbuff;
2784 union sub_crq *next;
2785 int index;
2786 int i, j;
Thomas Falcon37e40fa2018-04-06 18:37:02 -05002787 u8 *first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002788
2789restart_loop:
2790 while (pending_scrq(adapter, scrq)) {
2791 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06002792 int num_entries = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002793
2794 next = ibmvnic_next_scrq(adapter, scrq);
2795 for (i = 0; i < next->tx_comp.num_comps; i++) {
2796 if (next->tx_comp.rcs[i]) {
2797 dev_err(dev, "tx error %x\n",
2798 next->tx_comp.rcs[i]);
2799 continue;
2800 }
2801 index = be32_to_cpu(next->tx_comp.correlators[i]);
Thomas Falcon06b3e352018-03-16 20:00:28 -05002802 if (index & IBMVNIC_TSO_POOL_MASK) {
2803 tx_pool = &adapter->tso_pool[pool];
2804 index &= ~IBMVNIC_TSO_POOL_MASK;
2805 } else {
2806 tx_pool = &adapter->tx_pool[pool];
2807 }
2808
2809 txbuff = &tx_pool->tx_buff[index];
Thomas Falcon032c5e82015-12-21 11:26:06 -06002810
2811 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2812 if (!txbuff->data_dma[j])
2813 continue;
2814
2815 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002816 }
Thomas Falconad7775d2016-04-01 17:20:34 -05002817 /* if sub_crq was sent indirectly */
Thomas Falcon37e40fa2018-04-06 18:37:02 -05002818 first = &txbuff->indir_arr[0].generic.first;
2819 if (*first == IBMVNIC_CRQ_CMD) {
Thomas Falconad7775d2016-04-01 17:20:34 -05002820 dma_unmap_single(dev, txbuff->indir_dma,
2821 sizeof(txbuff->indir_arr),
2822 DMA_TO_DEVICE);
Thomas Falcon37e40fa2018-04-06 18:37:02 -05002823 *first = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05002824 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002825
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002826 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002827 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002828 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002829 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002830
Thomas Falconffc385b2018-02-18 10:08:41 -06002831 num_entries += txbuff->num_entries;
2832
Thomas Falcon06b3e352018-03-16 20:00:28 -05002833 tx_pool->free_map[tx_pool->producer_index] = index;
2834 tx_pool->producer_index =
2835 (tx_pool->producer_index + 1) %
2836 tx_pool->num_buffers;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002837 }
2838 /* remove tx_comp scrq*/
2839 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002840
Thomas Falconffc385b2018-02-18 10:08:41 -06002841 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002842 (adapter->req_tx_entries_per_subcrq / 2) &&
2843 __netif_subqueue_stopped(adapter->netdev,
2844 scrq->pool_index)) {
2845 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06002846 netdev_dbg(adapter->netdev, "Started queue %d\n",
2847 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002848 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002849 }
2850
2851 enable_scrq_irq(adapter, scrq);
2852
2853 if (pending_scrq(adapter, scrq)) {
2854 disable_scrq_irq(adapter, scrq);
2855 goto restart_loop;
2856 }
2857
2858 return 0;
2859}
2860
2861static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2862{
2863 struct ibmvnic_sub_crq_queue *scrq = instance;
2864 struct ibmvnic_adapter *adapter = scrq->adapter;
2865
2866 disable_scrq_irq(adapter, scrq);
2867 ibmvnic_complete_tx(adapter, scrq);
2868
2869 return IRQ_HANDLED;
2870}
2871
2872static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2873{
2874 struct ibmvnic_sub_crq_queue *scrq = instance;
2875 struct ibmvnic_adapter *adapter = scrq->adapter;
2876
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06002877 /* When booting a kdump kernel we can hit pending interrupts
2878 * prior to completing driver initialization.
2879 */
2880 if (unlikely(adapter->state != VNIC_OPEN))
2881 return IRQ_NONE;
2882
John Allen3d52b592017-08-02 16:44:14 -05002883 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2884
Thomas Falcon032c5e82015-12-21 11:26:06 -06002885 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2886 disable_scrq_irq(adapter, scrq);
2887 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2888 }
2889
2890 return IRQ_HANDLED;
2891}
2892
Thomas Falconea22d512016-07-06 15:35:17 -05002893static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2894{
2895 struct device *dev = &adapter->vdev->dev;
2896 struct ibmvnic_sub_crq_queue *scrq;
2897 int i = 0, j = 0;
2898 int rc = 0;
2899
2900 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002901 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2902 i);
Thomas Falconea22d512016-07-06 15:35:17 -05002903 scrq = adapter->tx_scrq[i];
2904 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2905
Michael Ellerman99c17902016-09-10 19:59:05 +10002906 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05002907 rc = -EINVAL;
2908 dev_err(dev, "Error mapping irq\n");
2909 goto req_tx_irq_failed;
2910 }
2911
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03002912 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
2913 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05002914 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03002915 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05002916
2917 if (rc) {
2918 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2919 scrq->irq, rc);
2920 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06002921 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05002922 }
2923 }
2924
2925 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002926 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2927 i);
Thomas Falconea22d512016-07-06 15:35:17 -05002928 scrq = adapter->rx_scrq[i];
2929 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10002930 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05002931 rc = -EINVAL;
2932 dev_err(dev, "Error mapping irq\n");
2933 goto req_rx_irq_failed;
2934 }
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03002935 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
2936 adapter->vdev->unit_address, i);
Thomas Falconea22d512016-07-06 15:35:17 -05002937 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03002938 0, scrq->name, scrq);
Thomas Falconea22d512016-07-06 15:35:17 -05002939 if (rc) {
2940 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2941 scrq->irq, rc);
2942 irq_dispose_mapping(scrq->irq);
2943 goto req_rx_irq_failed;
2944 }
2945 }
2946 return rc;
2947
2948req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002949 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05002950 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2951 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002952 }
Thomas Falconea22d512016-07-06 15:35:17 -05002953 i = adapter->req_tx_queues;
2954req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002955 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05002956 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2957 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002958 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002959 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05002960 return rc;
2961}
2962
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002963static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002964{
2965 struct device *dev = &adapter->vdev->dev;
2966 struct ibmvnic_sub_crq_queue **allqueues;
2967 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002968 int total_queues;
2969 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05002970 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002971
Thomas Falcon032c5e82015-12-21 11:26:06 -06002972 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2973
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002974 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002975 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002976 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002977
2978 for (i = 0; i < total_queues; i++) {
2979 allqueues[i] = init_sub_crq_queue(adapter);
2980 if (!allqueues[i]) {
2981 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2982 break;
2983 }
2984 registered_queues++;
2985 }
2986
2987 /* Make sure we were able to register the minimum number of queues */
2988 if (registered_queues <
2989 adapter->min_tx_queues + adapter->min_rx_queues) {
2990 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2991 goto tx_failed;
2992 }
2993
2994 /* Distribute the failed allocated queues*/
2995 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2996 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2997 switch (i % 3) {
2998 case 0:
2999 if (adapter->req_rx_queues > adapter->min_rx_queues)
3000 adapter->req_rx_queues--;
3001 else
3002 more++;
3003 break;
3004 case 1:
3005 if (adapter->req_tx_queues > adapter->min_tx_queues)
3006 adapter->req_tx_queues--;
3007 else
3008 more++;
3009 break;
3010 }
3011 }
3012
3013 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003014 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003015 if (!adapter->tx_scrq)
3016 goto tx_failed;
3017
3018 for (i = 0; i < adapter->req_tx_queues; i++) {
3019 adapter->tx_scrq[i] = allqueues[i];
3020 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003021 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003022 }
3023
3024 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003025 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003026 if (!adapter->rx_scrq)
3027 goto rx_failed;
3028
3029 for (i = 0; i < adapter->req_rx_queues; i++) {
3030 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3031 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06003032 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003033 }
3034
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003035 kfree(allqueues);
3036 return 0;
3037
3038rx_failed:
3039 kfree(adapter->tx_scrq);
3040 adapter->tx_scrq = NULL;
3041tx_failed:
3042 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06003043 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003044 kfree(allqueues);
3045 return -1;
3046}
3047
3048static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3049{
3050 struct device *dev = &adapter->vdev->dev;
3051 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05003052 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003053
3054 if (!retry) {
3055 /* Sub-CRQ entries are 32 byte long */
3056 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3057
3058 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3059 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3060 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3061 return;
3062 }
3063
John Allenc26eba02017-10-26 16:23:25 -05003064 if (adapter->desired.mtu)
3065 adapter->req_mtu = adapter->desired.mtu;
3066 else
3067 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003068
John Allenc26eba02017-10-26 16:23:25 -05003069 if (!adapter->desired.tx_entries)
3070 adapter->desired.tx_entries =
3071 adapter->max_tx_entries_per_subcrq;
3072 if (!adapter->desired.rx_entries)
3073 adapter->desired.rx_entries =
3074 adapter->max_rx_add_entries_per_subcrq;
3075
3076 max_entries = IBMVNIC_MAX_LTB_SIZE /
3077 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3078
3079 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3080 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3081 adapter->desired.tx_entries = max_entries;
3082 }
3083
3084 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3085 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3086 adapter->desired.rx_entries = max_entries;
3087 }
3088
3089 if (adapter->desired.tx_entries)
3090 adapter->req_tx_entries_per_subcrq =
3091 adapter->desired.tx_entries;
3092 else
3093 adapter->req_tx_entries_per_subcrq =
3094 adapter->max_tx_entries_per_subcrq;
3095
3096 if (adapter->desired.rx_entries)
3097 adapter->req_rx_add_entries_per_subcrq =
3098 adapter->desired.rx_entries;
3099 else
3100 adapter->req_rx_add_entries_per_subcrq =
3101 adapter->max_rx_add_entries_per_subcrq;
3102
3103 if (adapter->desired.tx_queues)
3104 adapter->req_tx_queues =
3105 adapter->desired.tx_queues;
3106 else
3107 adapter->req_tx_queues =
3108 adapter->opt_tx_comp_sub_queues;
3109
3110 if (adapter->desired.rx_queues)
3111 adapter->req_rx_queues =
3112 adapter->desired.rx_queues;
3113 else
3114 adapter->req_rx_queues =
3115 adapter->opt_rx_comp_queues;
3116
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003117 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003118 }
3119
Thomas Falcon032c5e82015-12-21 11:26:06 -06003120 memset(&crq, 0, sizeof(crq));
3121 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3122 crq.request_capability.cmd = REQUEST_CAPABILITY;
3123
3124 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003125 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003126 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003127 ibmvnic_send_crq(adapter, &crq);
3128
3129 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003130 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003131 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003132 ibmvnic_send_crq(adapter, &crq);
3133
3134 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06003135 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06003136 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003137 ibmvnic_send_crq(adapter, &crq);
3138
3139 crq.request_capability.capability =
3140 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3141 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003142 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003143 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003144 ibmvnic_send_crq(adapter, &crq);
3145
3146 crq.request_capability.capability =
3147 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3148 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06003149 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06003150 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003151 ibmvnic_send_crq(adapter, &crq);
3152
3153 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06003154 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06003155 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003156 ibmvnic_send_crq(adapter, &crq);
3157
3158 if (adapter->netdev->flags & IFF_PROMISC) {
3159 if (adapter->promisc_supported) {
3160 crq.request_capability.capability =
3161 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003162 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06003163 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003164 ibmvnic_send_crq(adapter, &crq);
3165 }
3166 } else {
3167 crq.request_capability.capability =
3168 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06003169 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06003170 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003171 ibmvnic_send_crq(adapter, &crq);
3172 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003173}
3174
3175static int pending_scrq(struct ibmvnic_adapter *adapter,
3176 struct ibmvnic_sub_crq_queue *scrq)
3177{
3178 union sub_crq *entry = &scrq->msgs[scrq->cur];
3179
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05003180 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003181 return 1;
3182 else
3183 return 0;
3184}
3185
3186static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3187 struct ibmvnic_sub_crq_queue *scrq)
3188{
3189 union sub_crq *entry;
3190 unsigned long flags;
3191
3192 spin_lock_irqsave(&scrq->lock, flags);
3193 entry = &scrq->msgs[scrq->cur];
3194 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3195 if (++scrq->cur == scrq->size)
3196 scrq->cur = 0;
3197 } else {
3198 entry = NULL;
3199 }
3200 spin_unlock_irqrestore(&scrq->lock, flags);
3201
3202 return entry;
3203}
3204
3205static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3206{
3207 struct ibmvnic_crq_queue *queue = &adapter->crq;
3208 union ibmvnic_crq *crq;
3209
3210 crq = &queue->msgs[queue->cur];
3211 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3212 if (++queue->cur == queue->size)
3213 queue->cur = 0;
3214 } else {
3215 crq = NULL;
3216 }
3217
3218 return crq;
3219}
3220
Thomas Falcon2d14d372018-07-13 12:03:32 -05003221static void print_subcrq_error(struct device *dev, int rc, const char *func)
3222{
3223 switch (rc) {
3224 case H_PARAMETER:
3225 dev_warn_ratelimited(dev,
3226 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3227 func, rc);
3228 break;
3229 case H_CLOSED:
3230 dev_warn_ratelimited(dev,
3231 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3232 func, rc);
3233 break;
3234 default:
3235 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3236 break;
3237 }
3238}
3239
Thomas Falcon032c5e82015-12-21 11:26:06 -06003240static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3241 union sub_crq *sub_crq)
3242{
3243 unsigned int ua = adapter->vdev->unit_address;
3244 struct device *dev = &adapter->vdev->dev;
3245 u64 *u64_crq = (u64 *)sub_crq;
3246 int rc;
3247
3248 netdev_dbg(adapter->netdev,
3249 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3250 (unsigned long int)cpu_to_be64(remote_handle),
3251 (unsigned long int)cpu_to_be64(u64_crq[0]),
3252 (unsigned long int)cpu_to_be64(u64_crq[1]),
3253 (unsigned long int)cpu_to_be64(u64_crq[2]),
3254 (unsigned long int)cpu_to_be64(u64_crq[3]));
3255
3256 /* Make sure the hypervisor sees the complete request */
3257 mb();
3258
3259 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3260 cpu_to_be64(remote_handle),
3261 cpu_to_be64(u64_crq[0]),
3262 cpu_to_be64(u64_crq[1]),
3263 cpu_to_be64(u64_crq[2]),
3264 cpu_to_be64(u64_crq[3]));
3265
Thomas Falcon2d14d372018-07-13 12:03:32 -05003266 if (rc)
3267 print_subcrq_error(dev, rc, __func__);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003268
3269 return rc;
3270}
3271
Thomas Falconad7775d2016-04-01 17:20:34 -05003272static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3273 u64 remote_handle, u64 ioba, u64 num_entries)
3274{
3275 unsigned int ua = adapter->vdev->unit_address;
3276 struct device *dev = &adapter->vdev->dev;
3277 int rc;
3278
3279 /* Make sure the hypervisor sees the complete request */
3280 mb();
3281 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3282 cpu_to_be64(remote_handle),
3283 ioba, num_entries);
3284
Thomas Falcon2d14d372018-07-13 12:03:32 -05003285 if (rc)
3286 print_subcrq_error(dev, rc, __func__);
Thomas Falconad7775d2016-04-01 17:20:34 -05003287
3288 return rc;
3289}
3290
Thomas Falcon032c5e82015-12-21 11:26:06 -06003291static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3292 union ibmvnic_crq *crq)
3293{
3294 unsigned int ua = adapter->vdev->unit_address;
3295 struct device *dev = &adapter->vdev->dev;
3296 u64 *u64_crq = (u64 *)crq;
3297 int rc;
3298
3299 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3300 (unsigned long int)cpu_to_be64(u64_crq[0]),
3301 (unsigned long int)cpu_to_be64(u64_crq[1]));
3302
Thomas Falcon51536982018-05-23 13:37:56 -05003303 if (!adapter->crq.active &&
3304 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3305 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3306 return -EINVAL;
3307 }
3308
Thomas Falcon032c5e82015-12-21 11:26:06 -06003309 /* Make sure the hypervisor sees the complete request */
3310 mb();
3311
3312 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3313 cpu_to_be64(u64_crq[0]),
3314 cpu_to_be64(u64_crq[1]));
3315
3316 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003317 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003318 dev_warn(dev, "CRQ Queue closed\n");
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003319 if (adapter->resetting)
3320 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3321 }
3322
Thomas Falcon032c5e82015-12-21 11:26:06 -06003323 dev_warn(dev, "Send error (rc=%d)\n", rc);
3324 }
3325
3326 return rc;
3327}
3328
3329static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3330{
3331 union ibmvnic_crq crq;
3332
3333 memset(&crq, 0, sizeof(crq));
3334 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3335 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3336 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3337
3338 return ibmvnic_send_crq(adapter, &crq);
3339}
3340
Thomas Falcon032c5e82015-12-21 11:26:06 -06003341static int send_version_xchg(struct ibmvnic_adapter *adapter)
3342{
3343 union ibmvnic_crq crq;
3344
3345 memset(&crq, 0, sizeof(crq));
3346 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3347 crq.version_exchange.cmd = VERSION_EXCHANGE;
3348 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3349
3350 return ibmvnic_send_crq(adapter, &crq);
3351}
3352
Nathan Fontenot37798d02017-11-08 11:23:56 -06003353struct vnic_login_client_data {
3354 u8 type;
3355 __be16 len;
Kees Cook08ea5562018-04-10 15:26:43 -07003356 char name[];
Nathan Fontenot37798d02017-11-08 11:23:56 -06003357} __packed;
3358
3359static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3360{
3361 int len;
3362
3363 /* Calculate the amount of buffer space needed for the
3364 * vnic client data in the login buffer. There are four entries,
3365 * OS name, LPAR name, device name, and a null last entry.
3366 */
3367 len = 4 * sizeof(struct vnic_login_client_data);
3368 len += 6; /* "Linux" plus NULL */
3369 len += strlen(utsname()->nodename) + 1;
3370 len += strlen(adapter->netdev->name) + 1;
3371
3372 return len;
3373}
3374
3375static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3376 struct vnic_login_client_data *vlcd)
3377{
3378 const char *os_name = "Linux";
3379 int len;
3380
3381 /* Type 1 - LPAR OS */
3382 vlcd->type = 1;
3383 len = strlen(os_name) + 1;
3384 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003385 strncpy(vlcd->name, os_name, len);
3386 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003387
3388 /* Type 2 - LPAR name */
3389 vlcd->type = 2;
3390 len = strlen(utsname()->nodename) + 1;
3391 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003392 strncpy(vlcd->name, utsname()->nodename, len);
3393 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003394
3395 /* Type 3 - device name */
3396 vlcd->type = 3;
3397 len = strlen(adapter->netdev->name) + 1;
3398 vlcd->len = cpu_to_be16(len);
Kees Cook08ea5562018-04-10 15:26:43 -07003399 strncpy(vlcd->name, adapter->netdev->name, len);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003400}
3401
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003402static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003403{
3404 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3405 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003406 struct device *dev = &adapter->vdev->dev;
3407 dma_addr_t rsp_buffer_token;
3408 dma_addr_t buffer_token;
3409 size_t rsp_buffer_size;
3410 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003411 size_t buffer_size;
3412 __be64 *tx_list_p;
3413 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003414 int client_data_len;
3415 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003416 int i;
3417
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003418 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3419 netdev_err(adapter->netdev,
3420 "RX or TX queues are not allocated, device login failed\n");
3421 return -1;
3422 }
3423
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003424 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003425 client_data_len = vnic_client_data_len(adapter);
3426
Thomas Falcon032c5e82015-12-21 11:26:06 -06003427 buffer_size =
3428 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003429 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3430 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003431
Nathan Fontenot37798d02017-11-08 11:23:56 -06003432 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003433 if (!login_buffer)
3434 goto buf_alloc_failed;
3435
3436 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3437 DMA_TO_DEVICE);
3438 if (dma_mapping_error(dev, buffer_token)) {
3439 dev_err(dev, "Couldn't map login buffer\n");
3440 goto buf_map_failed;
3441 }
3442
John Allen498cd8e2016-04-06 11:49:55 -05003443 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3444 sizeof(u64) * adapter->req_tx_queues +
3445 sizeof(u64) * adapter->req_rx_queues +
3446 sizeof(u64) * adapter->req_rx_queues +
3447 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003448
3449 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3450 if (!login_rsp_buffer)
3451 goto buf_rsp_alloc_failed;
3452
3453 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3454 rsp_buffer_size, DMA_FROM_DEVICE);
3455 if (dma_mapping_error(dev, rsp_buffer_token)) {
3456 dev_err(dev, "Couldn't map login rsp buffer\n");
3457 goto buf_rsp_map_failed;
3458 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003459
Thomas Falcon032c5e82015-12-21 11:26:06 -06003460 adapter->login_buf = login_buffer;
3461 adapter->login_buf_token = buffer_token;
3462 adapter->login_buf_sz = buffer_size;
3463 adapter->login_rsp_buf = login_rsp_buffer;
3464 adapter->login_rsp_buf_token = rsp_buffer_token;
3465 adapter->login_rsp_buf_sz = rsp_buffer_size;
3466
3467 login_buffer->len = cpu_to_be32(buffer_size);
3468 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3469 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3470 login_buffer->off_txcomp_subcrqs =
3471 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3472 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3473 login_buffer->off_rxcomp_subcrqs =
3474 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3475 sizeof(u64) * adapter->req_tx_queues);
3476 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3477 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3478
3479 tx_list_p = (__be64 *)((char *)login_buffer +
3480 sizeof(struct ibmvnic_login_buffer));
3481 rx_list_p = (__be64 *)((char *)login_buffer +
3482 sizeof(struct ibmvnic_login_buffer) +
3483 sizeof(u64) * adapter->req_tx_queues);
3484
3485 for (i = 0; i < adapter->req_tx_queues; i++) {
3486 if (adapter->tx_scrq[i]) {
3487 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3488 crq_num);
3489 }
3490 }
3491
3492 for (i = 0; i < adapter->req_rx_queues; i++) {
3493 if (adapter->rx_scrq[i]) {
3494 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3495 crq_num);
3496 }
3497 }
3498
Nathan Fontenot37798d02017-11-08 11:23:56 -06003499 /* Insert vNIC login client data */
3500 vlcd = (struct vnic_login_client_data *)
3501 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3502 login_buffer->client_data_offset =
3503 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3504 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3505
3506 vnic_add_client_data(adapter, vlcd);
3507
Thomas Falcon032c5e82015-12-21 11:26:06 -06003508 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3509 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3510 netdev_dbg(adapter->netdev, "%016lx\n",
3511 ((unsigned long int *)(adapter->login_buf))[i]);
3512 }
3513
3514 memset(&crq, 0, sizeof(crq));
3515 crq.login.first = IBMVNIC_CRQ_CMD;
3516 crq.login.cmd = LOGIN;
3517 crq.login.ioba = cpu_to_be32(buffer_token);
3518 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003519 ibmvnic_send_crq(adapter, &crq);
3520
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003521 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003522
Thomas Falcon032c5e82015-12-21 11:26:06 -06003523buf_rsp_map_failed:
3524 kfree(login_rsp_buffer);
3525buf_rsp_alloc_failed:
3526 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3527buf_map_failed:
3528 kfree(login_buffer);
3529buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003530 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003531}
3532
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003533static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3534 u32 len, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003535{
3536 union ibmvnic_crq crq;
3537
3538 memset(&crq, 0, sizeof(crq));
3539 crq.request_map.first = IBMVNIC_CRQ_CMD;
3540 crq.request_map.cmd = REQUEST_MAP;
3541 crq.request_map.map_id = map_id;
3542 crq.request_map.ioba = cpu_to_be32(addr);
3543 crq.request_map.len = cpu_to_be32(len);
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003544 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003545}
3546
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003547static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003548{
3549 union ibmvnic_crq crq;
3550
3551 memset(&crq, 0, sizeof(crq));
3552 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3553 crq.request_unmap.cmd = REQUEST_UNMAP;
3554 crq.request_unmap.map_id = map_id;
Thomas Falcon9c4eaab2018-05-23 13:37:57 -05003555 return ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003556}
3557
3558static void send_map_query(struct ibmvnic_adapter *adapter)
3559{
3560 union ibmvnic_crq crq;
3561
3562 memset(&crq, 0, sizeof(crq));
3563 crq.query_map.first = IBMVNIC_CRQ_CMD;
3564 crq.query_map.cmd = QUERY_MAP;
3565 ibmvnic_send_crq(adapter, &crq);
3566}
3567
3568/* Send a series of CRQs requesting various capabilities of the VNIC server */
3569static void send_cap_queries(struct ibmvnic_adapter *adapter)
3570{
3571 union ibmvnic_crq crq;
3572
Thomas Falcon901e0402017-02-15 12:17:59 -06003573 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003574 memset(&crq, 0, sizeof(crq));
3575 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3576 crq.query_capability.cmd = QUERY_CAPABILITY;
3577
3578 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003579 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003580 ibmvnic_send_crq(adapter, &crq);
3581
3582 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003583 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003584 ibmvnic_send_crq(adapter, &crq);
3585
3586 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003587 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003588 ibmvnic_send_crq(adapter, &crq);
3589
3590 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003591 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003592 ibmvnic_send_crq(adapter, &crq);
3593
3594 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003595 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003596 ibmvnic_send_crq(adapter, &crq);
3597
3598 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003599 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003600 ibmvnic_send_crq(adapter, &crq);
3601
3602 crq.query_capability.capability =
3603 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003604 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003605 ibmvnic_send_crq(adapter, &crq);
3606
3607 crq.query_capability.capability =
3608 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003609 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003610 ibmvnic_send_crq(adapter, &crq);
3611
3612 crq.query_capability.capability =
3613 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003614 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003615 ibmvnic_send_crq(adapter, &crq);
3616
3617 crq.query_capability.capability =
3618 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003619 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003620 ibmvnic_send_crq(adapter, &crq);
3621
3622 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06003623 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003624 ibmvnic_send_crq(adapter, &crq);
3625
3626 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003627 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003628 ibmvnic_send_crq(adapter, &crq);
3629
3630 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003631 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003632 ibmvnic_send_crq(adapter, &crq);
3633
3634 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003635 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003636 ibmvnic_send_crq(adapter, &crq);
3637
3638 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06003639 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003640 ibmvnic_send_crq(adapter, &crq);
3641
3642 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06003643 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003644 ibmvnic_send_crq(adapter, &crq);
3645
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003646 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3647 atomic_inc(&adapter->running_cap_crqs);
3648 ibmvnic_send_crq(adapter, &crq);
3649
Thomas Falcon032c5e82015-12-21 11:26:06 -06003650 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003651 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003652 ibmvnic_send_crq(adapter, &crq);
3653
3654 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003655 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003656 ibmvnic_send_crq(adapter, &crq);
3657
3658 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003659 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003660 ibmvnic_send_crq(adapter, &crq);
3661
3662 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003663 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003664 ibmvnic_send_crq(adapter, &crq);
3665
3666 crq.query_capability.capability =
3667 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06003668 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003669 ibmvnic_send_crq(adapter, &crq);
3670
3671 crq.query_capability.capability =
3672 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003673 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003674 ibmvnic_send_crq(adapter, &crq);
3675
3676 crq.query_capability.capability =
3677 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003678 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003679 ibmvnic_send_crq(adapter, &crq);
3680
3681 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003682 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003683 ibmvnic_send_crq(adapter, &crq);
3684}
3685
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003686static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3687 struct ibmvnic_adapter *adapter)
3688{
3689 struct device *dev = &adapter->vdev->dev;
3690
3691 if (crq->get_vpd_size_rsp.rc.code) {
3692 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3693 crq->get_vpd_size_rsp.rc.code);
3694 complete(&adapter->fw_done);
3695 return;
3696 }
3697
3698 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3699 complete(&adapter->fw_done);
3700}
3701
3702static void handle_vpd_rsp(union ibmvnic_crq *crq,
3703 struct ibmvnic_adapter *adapter)
3704{
3705 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003706 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003707 u8 fw_level_len = 0;
3708
3709 memset(adapter->fw_version, 0, 32);
3710
3711 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3712 DMA_FROM_DEVICE);
3713
3714 if (crq->get_vpd_rsp.rc.code) {
3715 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3716 crq->get_vpd_rsp.rc.code);
3717 goto complete;
3718 }
3719
3720 /* get the position of the firmware version info
3721 * located after the ASCII 'RM' substring in the buffer
3722 */
3723 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3724 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02003725 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003726 goto complete;
3727 }
3728
3729 /* get length of firmware level ASCII substring */
3730 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3731 fw_level_len = *(substr + 2);
3732 } else {
3733 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3734 goto complete;
3735 }
3736
3737 /* copy firmware version string from vpd into adapter */
3738 if ((substr + 3 + fw_level_len) <
3739 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003740 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003741 } else {
3742 dev_info(dev, "FW substr extrapolated VPD buff\n");
3743 }
3744
3745complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003746 if (adapter->fw_version[0] == '\0')
3747 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003748 complete(&adapter->fw_done);
3749}
3750
Thomas Falcon032c5e82015-12-21 11:26:06 -06003751static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3752{
3753 struct device *dev = &adapter->vdev->dev;
3754 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
Thomas Falcondde746a2019-04-10 11:07:00 -05003755 netdev_features_t old_hw_features = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003756 union ibmvnic_crq crq;
3757 int i;
3758
3759 dma_unmap_single(dev, adapter->ip_offload_tok,
3760 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3761
3762 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3763 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3764 netdev_dbg(adapter->netdev, "%016lx\n",
3765 ((unsigned long int *)(buf))[i]);
3766
3767 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3768 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3769 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3770 buf->tcp_ipv4_chksum);
3771 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3772 buf->tcp_ipv6_chksum);
3773 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3774 buf->udp_ipv4_chksum);
3775 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3776 buf->udp_ipv6_chksum);
3777 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3778 buf->large_tx_ipv4);
3779 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3780 buf->large_tx_ipv6);
3781 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3782 buf->large_rx_ipv4);
3783 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3784 buf->large_rx_ipv6);
3785 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3786 buf->max_ipv4_header_size);
3787 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3788 buf->max_ipv6_header_size);
3789 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3790 buf->max_tcp_header_size);
3791 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3792 buf->max_udp_header_size);
3793 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3794 buf->max_large_tx_size);
3795 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3796 buf->max_large_rx_size);
3797 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3798 buf->ipv6_extension_header);
3799 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3800 buf->tcp_pseudosum_req);
3801 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3802 buf->num_ipv6_ext_headers);
3803 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3804 buf->off_ipv6_ext_headers);
3805
3806 adapter->ip_offload_ctrl_tok =
3807 dma_map_single(dev, &adapter->ip_offload_ctrl,
3808 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3809
3810 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3811 dev_err(dev, "Couldn't map ip offload control buffer\n");
3812 return;
3813 }
3814
Thomas Falconf6897942018-01-18 19:05:01 -06003815 adapter->ip_offload_ctrl.len =
3816 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003817 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
Thomas Falconf6897942018-01-18 19:05:01 -06003818 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3819 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003820 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3821 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3822 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3823 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
Thomas Falconfdb06102017-10-17 12:36:55 -05003824 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3825 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003826
Thomas Falconfdb06102017-10-17 12:36:55 -05003827 /* large_rx disabled for now, additional features needed */
Thomas Falcon032c5e82015-12-21 11:26:06 -06003828 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3829 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3830
Thomas Falcondde746a2019-04-10 11:07:00 -05003831 if (adapter->state != VNIC_PROBING) {
3832 old_hw_features = adapter->netdev->hw_features;
3833 adapter->netdev->hw_features = 0;
3834 }
3835
Thomas Falconb66b7bd2019-04-10 11:06:59 -05003836 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003837
3838 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
Thomas Falcondde746a2019-04-10 11:07:00 -05003839 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003840
3841 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
Thomas Falcondde746a2019-04-10 11:07:00 -05003842 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003843
Thomas Falcon9be02cd2016-04-01 17:20:35 -05003844 if ((adapter->netdev->features &
3845 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
Thomas Falcondde746a2019-04-10 11:07:00 -05003846 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
Thomas Falcon9be02cd2016-04-01 17:20:35 -05003847
Thomas Falconfdb06102017-10-17 12:36:55 -05003848 if (buf->large_tx_ipv4)
Thomas Falcondde746a2019-04-10 11:07:00 -05003849 adapter->netdev->hw_features |= NETIF_F_TSO;
Thomas Falconfdb06102017-10-17 12:36:55 -05003850 if (buf->large_tx_ipv6)
Thomas Falcondde746a2019-04-10 11:07:00 -05003851 adapter->netdev->hw_features |= NETIF_F_TSO6;
Thomas Falconfdb06102017-10-17 12:36:55 -05003852
Thomas Falcondde746a2019-04-10 11:07:00 -05003853 if (adapter->state == VNIC_PROBING) {
3854 adapter->netdev->features |= adapter->netdev->hw_features;
3855 } else if (old_hw_features != adapter->netdev->hw_features) {
3856 netdev_features_t tmp = 0;
3857
3858 /* disable features no longer supported */
3859 adapter->netdev->features &= adapter->netdev->hw_features;
3860 /* turn on features now supported if previously enabled */
3861 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3862 adapter->netdev->hw_features;
3863 adapter->netdev->features |=
3864 tmp & adapter->netdev->wanted_features;
3865 }
Thomas Falconaa0bf852017-10-17 12:36:56 -05003866
Thomas Falcon032c5e82015-12-21 11:26:06 -06003867 memset(&crq, 0, sizeof(crq));
3868 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3869 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3870 crq.control_ip_offload.len =
3871 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3872 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3873 ibmvnic_send_crq(adapter, &crq);
3874}
3875
Thomas Falconc9008d32018-08-06 21:39:59 -05003876static const char *ibmvnic_fw_err_cause(u16 cause)
3877{
3878 switch (cause) {
3879 case ADAPTER_PROBLEM:
3880 return "adapter problem";
3881 case BUS_PROBLEM:
3882 return "bus problem";
3883 case FW_PROBLEM:
3884 return "firmware problem";
3885 case DD_PROBLEM:
3886 return "device driver problem";
3887 case EEH_RECOVERY:
3888 return "EEH recovery";
3889 case FW_UPDATED:
3890 return "firmware updated";
3891 case LOW_MEMORY:
3892 return "low Memory";
3893 default:
3894 return "unknown";
3895 }
3896}
3897
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003898static void handle_error_indication(union ibmvnic_crq *crq,
3899 struct ibmvnic_adapter *adapter)
3900{
3901 struct device *dev = &adapter->vdev->dev;
Thomas Falconc9008d32018-08-06 21:39:59 -05003902 u16 cause;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003903
Thomas Falconc9008d32018-08-06 21:39:59 -05003904 cause = be16_to_cpu(crq->error_indication.error_cause);
3905
3906 dev_warn_ratelimited(dev,
3907 "Firmware reports %serror, cause: %s. Starting recovery...\n",
3908 crq->error_indication.flags
3909 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3910 ibmvnic_fw_err_cause(cause));
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003911
Nathan Fontenoted651a12017-05-03 14:04:38 -04003912 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3913 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04003914 else
3915 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003916}
3917
Thomas Falconf8136142018-01-29 13:45:05 -06003918static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3919 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003920{
3921 struct net_device *netdev = adapter->netdev;
3922 struct device *dev = &adapter->vdev->dev;
3923 long rc;
3924
3925 rc = crq->change_mac_addr_rsp.rc.code;
3926 if (rc) {
3927 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06003928 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003929 }
Thomas Falcon62740e92019-05-09 23:13:43 -05003930 ether_addr_copy(netdev->dev_addr,
3931 &crq->change_mac_addr_rsp.mac_addr[0]);
Thomas Falconf8136142018-01-29 13:45:05 -06003932out:
3933 complete(&adapter->fw_done);
3934 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003935}
3936
3937static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3938 struct ibmvnic_adapter *adapter)
3939{
3940 struct device *dev = &adapter->vdev->dev;
3941 u64 *req_value;
3942 char *name;
3943
Thomas Falcon901e0402017-02-15 12:17:59 -06003944 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003945 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3946 case REQ_TX_QUEUES:
3947 req_value = &adapter->req_tx_queues;
3948 name = "tx";
3949 break;
3950 case REQ_RX_QUEUES:
3951 req_value = &adapter->req_rx_queues;
3952 name = "rx";
3953 break;
3954 case REQ_RX_ADD_QUEUES:
3955 req_value = &adapter->req_rx_add_queues;
3956 name = "rx_add";
3957 break;
3958 case REQ_TX_ENTRIES_PER_SUBCRQ:
3959 req_value = &adapter->req_tx_entries_per_subcrq;
3960 name = "tx_entries_per_subcrq";
3961 break;
3962 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3963 req_value = &adapter->req_rx_add_entries_per_subcrq;
3964 name = "rx_add_entries_per_subcrq";
3965 break;
3966 case REQ_MTU:
3967 req_value = &adapter->req_mtu;
3968 name = "mtu";
3969 break;
3970 case PROMISC_REQUESTED:
3971 req_value = &adapter->promisc;
3972 name = "promisc";
3973 break;
3974 default:
3975 dev_err(dev, "Got invalid cap request rsp %d\n",
3976 crq->request_capability.capability);
3977 return;
3978 }
3979
3980 switch (crq->request_capability_rsp.rc.code) {
3981 case SUCCESS:
3982 break;
3983 case PARTIALSUCCESS:
3984 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3985 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06003986 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06003987 number), name);
John Allene7913802018-01-18 16:27:12 -06003988
3989 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3990 REQ_MTU) {
3991 pr_err("mtu of %llu is not supported. Reverting.\n",
3992 *req_value);
3993 *req_value = adapter->fallback.mtu;
3994 } else {
3995 *req_value =
3996 be64_to_cpu(crq->request_capability_rsp.number);
3997 }
3998
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003999 ibmvnic_send_req_caps(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004000 return;
4001 default:
4002 dev_err(dev, "Error %d in request cap rsp\n",
4003 crq->request_capability_rsp.rc.code);
4004 return;
4005 }
4006
4007 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06004008 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06004009 union ibmvnic_crq newcrq;
4010 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4011 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4012 &adapter->ip_offload_buf;
4013
Thomas Falcon249168a2017-02-15 12:18:00 -06004014 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004015 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4016 buf_sz,
4017 DMA_FROM_DEVICE);
4018
4019 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4020 if (!firmware_has_feature(FW_FEATURE_CMO))
4021 dev_err(dev, "Couldn't map offload buffer\n");
4022 return;
4023 }
4024
4025 memset(&newcrq, 0, sizeof(newcrq));
4026 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4027 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4028 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4029 newcrq.query_ip_offload.ioba =
4030 cpu_to_be32(adapter->ip_offload_tok);
4031
4032 ibmvnic_send_crq(adapter, &newcrq);
4033 }
4034}
4035
4036static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4037 struct ibmvnic_adapter *adapter)
4038{
4039 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05004040 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004041 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4042 struct ibmvnic_login_buffer *login = adapter->login_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004043 int i;
4044
4045 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004046 DMA_TO_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004047 dma_unmap_single(dev, adapter->login_rsp_buf_token,
Thomas Falcon37e40fa2018-04-06 18:37:02 -05004048 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004049
John Allen498cd8e2016-04-06 11:49:55 -05004050 /* If the number of queues requested can't be allocated by the
4051 * server, the login response will return with code 1. We will need
4052 * to resend the login buffer with fewer queues requested.
4053 */
4054 if (login_rsp_crq->generic.rc.code) {
Nathan Fontenot64d92aa2018-04-11 10:09:32 -05004055 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
John Allen498cd8e2016-04-06 11:49:55 -05004056 complete(&adapter->init_done);
4057 return 0;
4058 }
4059
John Allenc26eba02017-10-26 16:23:25 -05004060 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4061
Thomas Falcon032c5e82015-12-21 11:26:06 -06004062 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4063 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4064 netdev_dbg(adapter->netdev, "%016lx\n",
4065 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4066 }
4067
4068 /* Sanity checks */
4069 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4070 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4071 adapter->req_rx_add_queues !=
4072 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4073 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4074 ibmvnic_remove(adapter->vdev);
4075 return -EIO;
4076 }
Thomas Falcona2c0f032018-02-21 18:18:30 -06004077 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004078 complete(&adapter->init_done);
4079
Thomas Falcon032c5e82015-12-21 11:26:06 -06004080 return 0;
4081}
4082
Thomas Falcon032c5e82015-12-21 11:26:06 -06004083static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4084 struct ibmvnic_adapter *adapter)
4085{
4086 struct device *dev = &adapter->vdev->dev;
4087 long rc;
4088
4089 rc = crq->request_unmap_rsp.rc.code;
4090 if (rc)
4091 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4092}
4093
4094static void handle_query_map_rsp(union ibmvnic_crq *crq,
4095 struct ibmvnic_adapter *adapter)
4096{
4097 struct net_device *netdev = adapter->netdev;
4098 struct device *dev = &adapter->vdev->dev;
4099 long rc;
4100
4101 rc = crq->query_map_rsp.rc.code;
4102 if (rc) {
4103 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4104 return;
4105 }
4106 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4107 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4108 crq->query_map_rsp.free_pages);
4109}
4110
4111static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4112 struct ibmvnic_adapter *adapter)
4113{
4114 struct net_device *netdev = adapter->netdev;
4115 struct device *dev = &adapter->vdev->dev;
4116 long rc;
4117
Thomas Falcon901e0402017-02-15 12:17:59 -06004118 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004119 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06004120 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004121 rc = crq->query_capability.rc.code;
4122 if (rc) {
4123 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4124 goto out;
4125 }
4126
4127 switch (be16_to_cpu(crq->query_capability.capability)) {
4128 case MIN_TX_QUEUES:
4129 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004130 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004131 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4132 adapter->min_tx_queues);
4133 break;
4134 case MIN_RX_QUEUES:
4135 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004136 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004137 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4138 adapter->min_rx_queues);
4139 break;
4140 case MIN_RX_ADD_QUEUES:
4141 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004142 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004143 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4144 adapter->min_rx_add_queues);
4145 break;
4146 case MAX_TX_QUEUES:
4147 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004148 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004149 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4150 adapter->max_tx_queues);
4151 break;
4152 case MAX_RX_QUEUES:
4153 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004154 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004155 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4156 adapter->max_rx_queues);
4157 break;
4158 case MAX_RX_ADD_QUEUES:
4159 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004160 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004161 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4162 adapter->max_rx_add_queues);
4163 break;
4164 case MIN_TX_ENTRIES_PER_SUBCRQ:
4165 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004166 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004167 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4168 adapter->min_tx_entries_per_subcrq);
4169 break;
4170 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4171 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004172 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004173 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4174 adapter->min_rx_add_entries_per_subcrq);
4175 break;
4176 case MAX_TX_ENTRIES_PER_SUBCRQ:
4177 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004178 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004179 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4180 adapter->max_tx_entries_per_subcrq);
4181 break;
4182 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4183 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004184 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004185 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4186 adapter->max_rx_add_entries_per_subcrq);
4187 break;
4188 case TCP_IP_OFFLOAD:
4189 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06004190 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004191 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4192 adapter->tcp_ip_offload);
4193 break;
4194 case PROMISC_SUPPORTED:
4195 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004196 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004197 netdev_dbg(netdev, "promisc_supported = %lld\n",
4198 adapter->promisc_supported);
4199 break;
4200 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004201 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004202 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004203 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4204 break;
4205 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004206 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004207 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004208 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4209 break;
4210 case MAX_MULTICAST_FILTERS:
4211 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004212 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004213 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4214 adapter->max_multicast_filters);
4215 break;
4216 case VLAN_HEADER_INSERTION:
4217 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004218 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004219 if (adapter->vlan_header_insertion)
4220 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4221 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4222 adapter->vlan_header_insertion);
4223 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004224 case RX_VLAN_HEADER_INSERTION:
4225 adapter->rx_vlan_header_insertion =
4226 be64_to_cpu(crq->query_capability.number);
4227 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4228 adapter->rx_vlan_header_insertion);
4229 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004230 case MAX_TX_SG_ENTRIES:
4231 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004232 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004233 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4234 adapter->max_tx_sg_entries);
4235 break;
4236 case RX_SG_SUPPORTED:
4237 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004238 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004239 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4240 adapter->rx_sg_supported);
4241 break;
4242 case OPT_TX_COMP_SUB_QUEUES:
4243 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004244 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004245 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4246 adapter->opt_tx_comp_sub_queues);
4247 break;
4248 case OPT_RX_COMP_QUEUES:
4249 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004250 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004251 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4252 adapter->opt_rx_comp_queues);
4253 break;
4254 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4255 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004256 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004257 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4258 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4259 break;
4260 case OPT_TX_ENTRIES_PER_SUBCRQ:
4261 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004262 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004263 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4264 adapter->opt_tx_entries_per_subcrq);
4265 break;
4266 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4267 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004268 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004269 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4270 adapter->opt_rxba_entries_per_subcrq);
4271 break;
4272 case TX_RX_DESC_REQ:
4273 adapter->tx_rx_desc_req = crq->query_capability.number;
4274 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4275 adapter->tx_rx_desc_req);
4276 break;
4277
4278 default:
4279 netdev_err(netdev, "Got invalid cap rsp %d\n",
4280 crq->query_capability.capability);
4281 }
4282
4283out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004284 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4285 adapter->wait_capability = false;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04004286 ibmvnic_send_req_caps(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004287 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004288}
4289
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004290static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4291{
4292 union ibmvnic_crq crq;
4293 int rc;
4294
4295 memset(&crq, 0, sizeof(crq));
4296 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4297 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4298 init_completion(&adapter->fw_done);
4299 rc = ibmvnic_send_crq(adapter, &crq);
4300 if (rc)
4301 return rc;
4302 wait_for_completion(&adapter->fw_done);
4303 return adapter->fw_done_rc ? -EIO : 0;
4304}
4305
4306static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4307 struct ibmvnic_adapter *adapter)
4308{
4309 struct net_device *netdev = adapter->netdev;
4310 int rc;
4311
4312 rc = crq->query_phys_parms_rsp.rc.code;
4313 if (rc) {
4314 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4315 return rc;
4316 }
4317 switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
4318 case IBMVNIC_10MBPS:
4319 adapter->speed = SPEED_10;
4320 break;
4321 case IBMVNIC_100MBPS:
4322 adapter->speed = SPEED_100;
4323 break;
4324 case IBMVNIC_1GBPS:
4325 adapter->speed = SPEED_1000;
4326 break;
4327 case IBMVNIC_10GBP:
4328 adapter->speed = SPEED_10000;
4329 break;
4330 case IBMVNIC_25GBPS:
4331 adapter->speed = SPEED_25000;
4332 break;
4333 case IBMVNIC_40GBPS:
4334 adapter->speed = SPEED_40000;
4335 break;
4336 case IBMVNIC_50GBPS:
4337 adapter->speed = SPEED_50000;
4338 break;
4339 case IBMVNIC_100GBPS:
4340 adapter->speed = SPEED_100000;
4341 break;
4342 default:
4343 netdev_warn(netdev, "Unknown speed 0x%08x\n",
4344 cpu_to_be32(crq->query_phys_parms_rsp.speed));
4345 adapter->speed = SPEED_UNKNOWN;
4346 }
4347 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4348 adapter->duplex = DUPLEX_FULL;
4349 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4350 adapter->duplex = DUPLEX_HALF;
4351 else
4352 adapter->duplex = DUPLEX_UNKNOWN;
4353
4354 return rc;
4355}
4356
Thomas Falcon032c5e82015-12-21 11:26:06 -06004357static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4358 struct ibmvnic_adapter *adapter)
4359{
4360 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4361 struct net_device *netdev = adapter->netdev;
4362 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004363 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004364 long rc;
4365
4366 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004367 (unsigned long int)cpu_to_be64(u64_crq[0]),
4368 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004369 switch (gen_crq->first) {
4370 case IBMVNIC_CRQ_INIT_RSP:
4371 switch (gen_crq->cmd) {
4372 case IBMVNIC_CRQ_INIT:
4373 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004374 adapter->from_passive_init = true;
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004375 adapter->failover_pending = false;
Thomas Falcon17c87052018-05-23 13:37:58 -05004376 if (!completion_done(&adapter->init_done)) {
4377 complete(&adapter->init_done);
4378 adapter->init_done_rc = -EIO;
4379 }
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004380 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004381 break;
4382 case IBMVNIC_CRQ_INIT_COMPLETE:
4383 dev_info(dev, "Partner initialization complete\n");
Thomas Falcon51536982018-05-23 13:37:56 -05004384 adapter->crq.active = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004385 send_version_xchg(adapter);
4386 break;
4387 default:
4388 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4389 }
4390 return;
4391 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004392 netif_carrier_off(netdev);
Thomas Falcon51536982018-05-23 13:37:56 -05004393 adapter->crq.active = false;
Thomas Falcon2770a792018-05-23 13:38:02 -05004394 if (adapter->resetting)
4395 adapter->force_reset_recovery = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004396 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004397 dev_info(dev, "Migrated, re-enabling adapter\n");
4398 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004399 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4400 dev_info(dev, "Backing device failover detected\n");
Thomas Falcon5a18e1e2018-04-06 18:37:05 -05004401 adapter->failover_pending = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004402 } else {
4403 /* The adapter lost the connection */
4404 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4405 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004406 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004407 }
4408 return;
4409 case IBMVNIC_CRQ_CMD_RSP:
4410 break;
4411 default:
4412 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4413 gen_crq->first);
4414 return;
4415 }
4416
4417 switch (gen_crq->cmd) {
4418 case VERSION_EXCHANGE_RSP:
4419 rc = crq->version_exchange_rsp.rc.code;
4420 if (rc) {
4421 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4422 break;
4423 }
4424 dev_info(dev, "Partner protocol version is %d\n",
4425 crq->version_exchange_rsp.version);
4426 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4427 ibmvnic_version)
4428 ibmvnic_version =
4429 be16_to_cpu(crq->version_exchange_rsp.version);
4430 send_cap_queries(adapter);
4431 break;
4432 case QUERY_CAPABILITY_RSP:
4433 handle_query_cap_rsp(crq, adapter);
4434 break;
4435 case QUERY_MAP_RSP:
4436 handle_query_map_rsp(crq, adapter);
4437 break;
4438 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004439 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4440 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004441 break;
4442 case REQUEST_UNMAP_RSP:
4443 handle_request_unmap_rsp(crq, adapter);
4444 break;
4445 case REQUEST_CAPABILITY_RSP:
4446 handle_request_cap_rsp(crq, adapter);
4447 break;
4448 case LOGIN_RSP:
4449 netdev_dbg(netdev, "Got Login Response\n");
4450 handle_login_rsp(crq, adapter);
4451 break;
4452 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004453 netdev_dbg(netdev,
4454 "Got Logical Link State Response, state: %d rc: %d\n",
4455 crq->logical_link_state_rsp.link_state,
4456 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004457 adapter->logical_link_state =
4458 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004459 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4460 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004461 break;
4462 case LINK_STATE_INDICATION:
4463 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4464 adapter->phys_link_state =
4465 crq->link_state_indication.phys_link_state;
4466 adapter->logical_link_state =
4467 crq->link_state_indication.logical_link_state;
Thomas Falcon0655f992019-05-09 23:13:44 -05004468 if (adapter->phys_link_state && adapter->logical_link_state)
4469 netif_carrier_on(netdev);
4470 else
4471 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004472 break;
4473 case CHANGE_MAC_ADDR_RSP:
4474 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004475 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004476 break;
4477 case ERROR_INDICATION:
4478 netdev_dbg(netdev, "Got Error Indication\n");
4479 handle_error_indication(crq, adapter);
4480 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004481 case REQUEST_STATISTICS_RSP:
4482 netdev_dbg(netdev, "Got Statistics Response\n");
4483 complete(&adapter->stats_done);
4484 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004485 case QUERY_IP_OFFLOAD_RSP:
4486 netdev_dbg(netdev, "Got Query IP offload Response\n");
4487 handle_query_ip_offload_rsp(adapter);
4488 break;
4489 case MULTICAST_CTRL_RSP:
4490 netdev_dbg(netdev, "Got multicast control Response\n");
4491 break;
4492 case CONTROL_IP_OFFLOAD_RSP:
4493 netdev_dbg(netdev, "Got Control IP offload Response\n");
4494 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4495 sizeof(adapter->ip_offload_ctrl),
4496 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004497 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004498 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004499 case COLLECT_FW_TRACE_RSP:
4500 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4501 complete(&adapter->fw_done);
4502 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004503 case GET_VPD_SIZE_RSP:
4504 handle_vpd_size_rsp(crq, adapter);
4505 break;
4506 case GET_VPD_RSP:
4507 handle_vpd_rsp(crq, adapter);
4508 break;
Murilo Fossa Vicentinif8d6ae02019-03-19 10:28:51 -03004509 case QUERY_PHYS_PARMS_RSP:
4510 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4511 complete(&adapter->fw_done);
4512 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004513 default:
4514 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4515 gen_crq->cmd);
4516 }
4517}
4518
4519static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4520{
4521 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004522
Thomas Falcon6c267b32017-02-15 12:17:58 -06004523 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004524 return IRQ_HANDLED;
4525}
4526
4527static void ibmvnic_tasklet(void *data)
4528{
4529 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004530 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004531 union ibmvnic_crq *crq;
4532 unsigned long flags;
4533 bool done = false;
4534
4535 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004536 while (!done) {
4537 /* Pull all the valid messages off the CRQ */
4538 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4539 ibmvnic_handle_crq(crq, adapter);
4540 crq->generic.first = 0;
4541 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004542
4543 /* remain in tasklet until all
4544 * capabilities responses are received
4545 */
4546 if (!adapter->wait_capability)
4547 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004548 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004549 /* if capabilities CRQ's were sent in this tasklet, the following
4550 * tasklet must wait until all responses are received
4551 */
4552 if (atomic_read(&adapter->running_cap_crqs) != 0)
4553 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004554 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004555}
4556
4557static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4558{
4559 struct vio_dev *vdev = adapter->vdev;
4560 int rc;
4561
4562 do {
4563 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4564 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4565
4566 if (rc)
4567 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4568
4569 return rc;
4570}
4571
4572static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4573{
4574 struct ibmvnic_crq_queue *crq = &adapter->crq;
4575 struct device *dev = &adapter->vdev->dev;
4576 struct vio_dev *vdev = adapter->vdev;
4577 int rc;
4578
4579 /* Close the CRQ */
4580 do {
4581 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4582 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4583
4584 /* Clean out the queue */
4585 memset(crq->msgs, 0, PAGE_SIZE);
4586 crq->cur = 0;
Thomas Falcon51536982018-05-23 13:37:56 -05004587 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004588
4589 /* And re-open it again */
4590 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4591 crq->msg_token, PAGE_SIZE);
4592
4593 if (rc == H_CLOSED)
4594 /* Adapter is good, but other end is not ready */
4595 dev_warn(dev, "Partner adapter not ready\n");
4596 else if (rc != 0)
4597 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4598
4599 return rc;
4600}
4601
Nathan Fontenotf9928872017-03-30 02:48:54 -04004602static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004603{
4604 struct ibmvnic_crq_queue *crq = &adapter->crq;
4605 struct vio_dev *vdev = adapter->vdev;
4606 long rc;
4607
Nathan Fontenotf9928872017-03-30 02:48:54 -04004608 if (!crq->msgs)
4609 return;
4610
Thomas Falcon032c5e82015-12-21 11:26:06 -06004611 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4612 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004613 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004614 do {
4615 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4616 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4617
4618 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4619 DMA_BIDIRECTIONAL);
4620 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004621 crq->msgs = NULL;
Thomas Falcon51536982018-05-23 13:37:56 -05004622 crq->active = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004623}
4624
Nathan Fontenotf9928872017-03-30 02:48:54 -04004625static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004626{
4627 struct ibmvnic_crq_queue *crq = &adapter->crq;
4628 struct device *dev = &adapter->vdev->dev;
4629 struct vio_dev *vdev = adapter->vdev;
4630 int rc, retrc = -ENOMEM;
4631
Nathan Fontenotf9928872017-03-30 02:48:54 -04004632 if (crq->msgs)
4633 return 0;
4634
Thomas Falcon032c5e82015-12-21 11:26:06 -06004635 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4636 /* Should we allocate more than one page? */
4637
4638 if (!crq->msgs)
4639 return -ENOMEM;
4640
4641 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4642 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4643 DMA_BIDIRECTIONAL);
4644 if (dma_mapping_error(dev, crq->msg_token))
4645 goto map_failed;
4646
4647 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4648 crq->msg_token, PAGE_SIZE);
4649
4650 if (rc == H_RESOURCE)
4651 /* maybe kexecing and resource is busy. try a reset */
4652 rc = ibmvnic_reset_crq(adapter);
4653 retrc = rc;
4654
4655 if (rc == H_CLOSED) {
4656 dev_warn(dev, "Partner adapter not ready\n");
4657 } else if (rc) {
4658 dev_warn(dev, "Error %d opening adapter\n", rc);
4659 goto reg_crq_failed;
4660 }
4661
4662 retrc = 0;
4663
Thomas Falcon6c267b32017-02-15 12:17:58 -06004664 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4665 (unsigned long)adapter);
4666
Thomas Falcon032c5e82015-12-21 11:26:06 -06004667 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
Murilo Fossa Vicentinie56e2512019-04-25 11:02:33 -03004668 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4669 adapter->vdev->unit_address);
4670 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004671 if (rc) {
4672 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4673 vdev->irq, rc);
4674 goto req_irq_failed;
4675 }
4676
4677 rc = vio_enable_interrupts(vdev);
4678 if (rc) {
4679 dev_err(dev, "Error %d enabling interrupts\n", rc);
4680 goto req_irq_failed;
4681 }
4682
4683 crq->cur = 0;
4684 spin_lock_init(&crq->lock);
4685
4686 return retrc;
4687
4688req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06004689 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004690 do {
4691 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4692 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4693reg_crq_failed:
4694 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4695map_failed:
4696 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004697 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004698 return retrc;
4699}
4700
Thomas Falcon8a348452018-05-23 13:38:00 -05004701static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
John Allenf6ef6402017-03-17 17:13:42 -05004702{
4703 struct device *dev = &adapter->vdev->dev;
4704 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004705 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05004706 int rc;
4707
John Allen017892c12017-05-26 10:30:19 -04004708 adapter->from_passive_init = false;
4709
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004710 old_num_rx_queues = adapter->req_rx_queues;
4711 old_num_tx_queues = adapter->req_tx_queues;
4712
Thomas Falconbbd669a2019-04-04 18:58:26 -05004713 reinit_completion(&adapter->init_done);
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004714 adapter->init_done_rc = 0;
John Allenf6ef6402017-03-17 17:13:42 -05004715 ibmvnic_send_crq_init(adapter);
4716 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4717 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04004718 return -1;
4719 }
4720
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004721 if (adapter->init_done_rc) {
4722 release_crq_queue(adapter);
4723 return adapter->init_done_rc;
4724 }
4725
John Allen017892c12017-05-26 10:30:19 -04004726 if (adapter->from_passive_init) {
4727 adapter->state = VNIC_OPEN;
4728 adapter->from_passive_init = false;
John Allenf6ef6402017-03-17 17:13:42 -05004729 return -1;
4730 }
4731
Nathan Fontenot30f79622018-04-06 18:37:06 -05004732 if (adapter->resetting && !adapter->wait_for_reset &&
4733 adapter->reset_reason != VNIC_RESET_MOBILITY) {
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004734 if (adapter->req_rx_queues != old_num_rx_queues ||
4735 adapter->req_tx_queues != old_num_tx_queues) {
4736 release_sub_crqs(adapter, 0);
4737 rc = init_sub_crqs(adapter);
4738 } else {
4739 rc = reset_sub_crq_queues(adapter);
4740 }
4741 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04004742 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004743 }
4744
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04004745 if (rc) {
4746 dev_err(dev, "Initialization of sub crqs failed\n");
4747 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05004748 return rc;
4749 }
4750
4751 rc = init_sub_crq_irqs(adapter);
4752 if (rc) {
4753 dev_err(dev, "Failed to initialize sub crq irqs\n");
4754 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04004755 }
4756
4757 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05004758}
4759
Thomas Falcon8a348452018-05-23 13:38:00 -05004760static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4761{
4762 struct device *dev = &adapter->vdev->dev;
4763 unsigned long timeout = msecs_to_jiffies(30000);
4764 int rc;
4765
4766 adapter->from_passive_init = false;
4767
Thomas Falcon8a348452018-05-23 13:38:00 -05004768 adapter->init_done_rc = 0;
4769 ibmvnic_send_crq_init(adapter);
4770 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4771 dev_err(dev, "Initialization sequence timed out\n");
4772 return -1;
4773 }
4774
4775 if (adapter->init_done_rc) {
4776 release_crq_queue(adapter);
4777 return adapter->init_done_rc;
4778 }
4779
4780 if (adapter->from_passive_init) {
4781 adapter->state = VNIC_OPEN;
4782 adapter->from_passive_init = false;
4783 return -1;
4784 }
4785
4786 rc = init_sub_crqs(adapter);
4787 if (rc) {
4788 dev_err(dev, "Initialization of sub crqs failed\n");
4789 release_crq_queue(adapter);
4790 return rc;
4791 }
4792
4793 rc = init_sub_crq_irqs(adapter);
4794 if (rc) {
4795 dev_err(dev, "Failed to initialize sub crq irqs\n");
4796 release_crq_queue(adapter);
4797 }
4798
4799 return rc;
4800}
4801
Thomas Falcon40c9db82017-06-12 12:35:04 -05004802static struct device_attribute dev_attr_failover;
4803
Thomas Falcon032c5e82015-12-21 11:26:06 -06004804static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4805{
4806 struct ibmvnic_adapter *adapter;
4807 struct net_device *netdev;
4808 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004809 int rc;
4810
4811 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4812 dev->unit_address);
4813
4814 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4815 VETH_MAC_ADDR, NULL);
4816 if (!mac_addr_p) {
4817 dev_err(&dev->dev,
4818 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4819 __FILE__, __LINE__);
4820 return 0;
4821 }
4822
4823 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06004824 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004825 if (!netdev)
4826 return -ENOMEM;
4827
4828 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04004829 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004830 dev_set_drvdata(&dev->dev, netdev);
4831 adapter->vdev = dev;
4832 adapter->netdev = netdev;
4833
4834 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4835 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4836 netdev->irq = dev->irq;
4837 netdev->netdev_ops = &ibmvnic_netdev_ops;
4838 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4839 SET_NETDEV_DEV(netdev, &dev->dev);
4840
4841 spin_lock_init(&adapter->stats_lock);
4842
Nathan Fontenoted651a12017-05-03 14:04:38 -04004843 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4844 INIT_LIST_HEAD(&adapter->rwi_list);
Thomas Falcon6c5c7482018-12-10 15:22:22 -06004845 spin_lock_init(&adapter->rwi_lock);
Thomas Falconbbd669a2019-04-04 18:58:26 -05004846 init_completion(&adapter->init_done);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004847 adapter->resetting = false;
4848
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004849 do {
Nathan Fontenot30f79622018-04-06 18:37:06 -05004850 rc = init_crq_queue(adapter);
4851 if (rc) {
4852 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4853 rc);
4854 goto ibmvnic_init_fail;
4855 }
4856
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004857 rc = ibmvnic_init(adapter);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004858 if (rc && rc != EAGAIN)
4859 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004860 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004861
Thomas Falcon07184212018-05-16 15:49:05 -05004862 rc = init_stats_buffers(adapter);
4863 if (rc)
4864 goto ibmvnic_init_fail;
4865
4866 rc = init_stats_token(adapter);
4867 if (rc)
4868 goto ibmvnic_stats_fail;
4869
Thomas Falconf39f0d12017-02-14 10:22:59 -06004870 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05004871 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4872 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004873
Thomas Falcon40c9db82017-06-12 12:35:04 -05004874 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004875 if (rc)
Thomas Falcon07184212018-05-16 15:49:05 -05004876 goto ibmvnic_dev_file_err;
Thomas Falcon40c9db82017-06-12 12:35:04 -05004877
Mick Tarsele876a8a2017-09-28 13:53:18 -07004878 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004879 rc = register_netdev(netdev);
4880 if (rc) {
4881 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004882 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004883 }
4884 dev_info(&dev->dev, "ibmvnic registered\n");
4885
Nathan Fontenot90c80142017-05-03 14:04:32 -04004886 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05004887
4888 adapter->wait_for_reset = false;
4889
Thomas Falcon032c5e82015-12-21 11:26:06 -06004890 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004891
4892ibmvnic_register_fail:
4893 device_remove_file(&dev->dev, &dev_attr_failover);
4894
Thomas Falcon07184212018-05-16 15:49:05 -05004895ibmvnic_dev_file_err:
4896 release_stats_token(adapter);
4897
4898ibmvnic_stats_fail:
4899 release_stats_buffers(adapter);
4900
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004901ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004902 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004903 release_crq_queue(adapter);
4904 free_netdev(netdev);
4905
4906 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004907}
4908
4909static int ibmvnic_remove(struct vio_dev *dev)
4910{
4911 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04004912 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004913
Nathan Fontenot90c80142017-05-03 14:04:32 -04004914 adapter->state = VNIC_REMOVING;
Juliet Kima5681e22018-11-19 15:59:22 -06004915 rtnl_lock();
4916 unregister_netdevice(netdev);
Nathan Fontenot37489052017-04-19 13:45:04 -04004917
4918 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004919 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04004920 release_crq_queue(adapter);
4921
Thomas Falcon53cc7722018-02-26 18:10:56 -06004922 release_stats_token(adapter);
4923 release_stats_buffers(adapter);
4924
Nathan Fontenot90c80142017-05-03 14:04:32 -04004925 adapter->state = VNIC_REMOVED;
4926
Juliet Kima5681e22018-11-19 15:59:22 -06004927 rtnl_unlock();
Thomas Falcon40c9db82017-06-12 12:35:04 -05004928 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004929 free_netdev(netdev);
4930 dev_set_drvdata(&dev->dev, NULL);
4931
4932 return 0;
4933}
4934
Thomas Falcon40c9db82017-06-12 12:35:04 -05004935static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4936 const char *buf, size_t count)
4937{
4938 struct net_device *netdev = dev_get_drvdata(dev);
4939 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4940 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4941 __be64 session_token;
4942 long rc;
4943
4944 if (!sysfs_streq(buf, "1"))
4945 return -EINVAL;
4946
4947 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4948 H_GET_SESSION_TOKEN, 0, 0, 0);
4949 if (rc) {
4950 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4951 rc);
4952 return -EINVAL;
4953 }
4954
4955 session_token = (__be64)retbuf[0];
4956 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4957 be64_to_cpu(session_token));
4958 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4959 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4960 if (rc) {
4961 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4962 rc);
4963 return -EINVAL;
4964 }
4965
4966 return count;
4967}
4968
Joe Perches6cbaefb2017-12-19 10:15:09 -08004969static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05004970
Thomas Falcon032c5e82015-12-21 11:26:06 -06004971static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4972{
4973 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4974 struct ibmvnic_adapter *adapter;
4975 struct iommu_table *tbl;
4976 unsigned long ret = 0;
4977 int i;
4978
4979 tbl = get_iommu_table_base(&vdev->dev);
4980
4981 /* netdev inits at probe time along with the structures we need below*/
4982 if (!netdev)
4983 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4984
4985 adapter = netdev_priv(netdev);
4986
4987 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06004988 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4989
4990 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4991 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4992
4993 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4994 i++)
4995 ret += adapter->rx_pool[i].size *
4996 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4997
4998 return ret;
4999}
5000
5001static int ibmvnic_resume(struct device *dev)
5002{
5003 struct net_device *netdev = dev_get_drvdata(dev);
5004 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005005
John Allencb89ba22017-06-19 11:27:53 -05005006 if (adapter->state != VNIC_OPEN)
5007 return 0;
5008
John Allena2488782017-07-24 13:26:06 -05005009 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06005010
5011 return 0;
5012}
5013
Arvind Yadav8c37bc62017-08-17 18:52:54 +05305014static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06005015 {"network", "IBM,vnic"},
5016 {"", "" }
5017};
5018MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5019
5020static const struct dev_pm_ops ibmvnic_pm_ops = {
5021 .resume = ibmvnic_resume
5022};
5023
5024static struct vio_driver ibmvnic_driver = {
5025 .id_table = ibmvnic_device_table,
5026 .probe = ibmvnic_probe,
5027 .remove = ibmvnic_remove,
5028 .get_desired_dma = ibmvnic_get_desired_dma,
5029 .name = ibmvnic_driver_name,
5030 .pm = &ibmvnic_pm_ops,
5031};
5032
5033/* module functions */
5034static int __init ibmvnic_module_init(void)
5035{
5036 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5037 IBMVNIC_DRIVER_VERSION);
5038
5039 return vio_register_driver(&ibmvnic_driver);
5040}
5041
5042static void __exit ibmvnic_module_exit(void)
5043{
5044 vio_unregister_driver(&ibmvnic_driver);
5045}
5046
5047module_init(ibmvnic_module_init);
5048module_exit(ibmvnic_module_exit);