blob: 79d2ab360805fff48a02d3fe8e78748feb28cd38 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
77#include <linux/seq_file.h>
78
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
92static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050098static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060099static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108static int ibmvnic_poll(struct napi_struct *napi, int data);
109static void send_map_query(struct ibmvnic_adapter *adapter);
110static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111static void send_request_unmap(struct ibmvnic_adapter *, u8);
112
113struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
115 int offset;
116};
117
118#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121
122static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145};
146
147static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
149 unsigned long *irq)
150{
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 long rc;
153
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 *number = retbuf[0];
156 *irq = retbuf[1];
157
158 return rc;
159}
160
161/* net_device_ops functions */
162
163static void init_rx_pool(struct ibmvnic_adapter *adapter,
164 struct ibmvnic_rx_pool *rx_pool, int num, int index,
165 int buff_size, int active)
166{
167 netdev_dbg(adapter->netdev,
168 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
169 index, num, buff_size);
170 rx_pool->size = num;
171 rx_pool->index = index;
172 rx_pool->buff_size = buff_size;
173 rx_pool->active = active;
174}
175
176static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
177 struct ibmvnic_long_term_buff *ltb, int size)
178{
179 struct device *dev = &adapter->vdev->dev;
180
181 ltb->size = size;
182 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
183 GFP_KERNEL);
184
185 if (!ltb->buff) {
186 dev_err(dev, "Couldn't alloc long term buffer\n");
187 return -ENOMEM;
188 }
189 ltb->map_id = adapter->map_id;
190 adapter->map_id++;
191 send_request_map(adapter, ltb->addr,
192 ltb->size, ltb->map_id);
193 init_completion(&adapter->fw_done);
194 wait_for_completion(&adapter->fw_done);
195 return 0;
196}
197
198static void free_long_term_buff(struct ibmvnic_adapter *adapter,
199 struct ibmvnic_long_term_buff *ltb)
200{
201 struct device *dev = &adapter->vdev->dev;
202
203 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
204 send_request_unmap(adapter, ltb->map_id);
205}
206
207static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
208 struct ibmvnic_rx_pool *pool)
209{
210 struct device *dev = &adapter->vdev->dev;
211 int i;
212
213 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
214 if (!pool->free_map)
215 return -ENOMEM;
216
217 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
218 GFP_KERNEL);
219
220 if (!pool->rx_buff) {
221 dev_err(dev, "Couldn't alloc rx buffers\n");
222 kfree(pool->free_map);
223 return -ENOMEM;
224 }
225
226 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
227 pool->size * pool->buff_size)) {
228 kfree(pool->free_map);
229 kfree(pool->rx_buff);
230 return -ENOMEM;
231 }
232
233 for (i = 0; i < pool->size; ++i)
234 pool->free_map[i] = i;
235
236 atomic_set(&pool->available, 0);
237 pool->next_alloc = 0;
238 pool->next_free = 0;
239
240 return 0;
241}
242
243static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
244 struct ibmvnic_rx_pool *pool)
245{
246 int count = pool->size - atomic_read(&pool->available);
247 struct device *dev = &adapter->vdev->dev;
248 int buffers_added = 0;
249 unsigned long lpar_rc;
250 union sub_crq sub_crq;
251 struct sk_buff *skb;
252 unsigned int offset;
253 dma_addr_t dma_addr;
254 unsigned char *dst;
255 u64 *handle_array;
256 int shift = 0;
257 int index;
258 int i;
259
260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 be32_to_cpu(adapter->login_rsp_buf->
262 off_rxadd_subcrqs));
263
264 for (i = 0; i < count; ++i) {
265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
266 if (!skb) {
267 dev_err(dev, "Couldn't replenish rx buff\n");
268 adapter->replenish_no_mem++;
269 break;
270 }
271
272 index = pool->free_map[pool->next_free];
273
274 if (pool->rx_buff[index].skb)
275 dev_err(dev, "Inconsistent free_map!\n");
276
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset = index * pool->buff_size;
279 dst = pool->long_term_buff.buff + offset;
280 memset(dst, 0, pool->buff_size);
281 dma_addr = pool->long_term_buff.addr + offset;
282 pool->rx_buff[index].data = dst;
283
284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 pool->rx_buff[index].dma = dma_addr;
286 pool->rx_buff[index].skb = skb;
287 pool->rx_buff[index].pool_index = pool->index;
288 pool->rx_buff[index].size = pool->buff_size;
289
290 memset(&sub_crq, 0, sizeof(sub_crq));
291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 sub_crq.rx_add.correlator =
293 cpu_to_be64((u64)&pool->rx_buff[index]);
294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
296
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
300 * truncated.
301 */
302#ifdef __LITTLE_ENDIAN__
303 shift = 8;
304#endif
305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
306
307 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
308 &sub_crq);
309 if (lpar_rc != H_SUCCESS)
310 goto failure;
311
312 buffers_added++;
313 adapter->replenish_add_buff_success++;
314 pool->next_free = (pool->next_free + 1) % pool->size;
315 }
316 atomic_add(buffers_added, &pool->available);
317 return;
318
319failure:
320 dev_info(dev, "replenish pools failure\n");
321 pool->free_map[pool->next_free] = index;
322 pool->rx_buff[index].skb = NULL;
323 if (!dma_mapping_error(dev, dma_addr))
324 dma_unmap_single(dev, dma_addr, pool->buff_size,
325 DMA_FROM_DEVICE);
326
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
330}
331
332static void replenish_pools(struct ibmvnic_adapter *adapter)
333{
334 int i;
335
336 if (adapter->migrated)
337 return;
338
339 adapter->replenish_task_cycles++;
340 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
341 i++) {
342 if (adapter->rx_pool[i].active)
343 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
344 }
345}
346
347static void free_rx_pool(struct ibmvnic_adapter *adapter,
348 struct ibmvnic_rx_pool *pool)
349{
350 int i;
351
352 kfree(pool->free_map);
353 pool->free_map = NULL;
354
355 if (!pool->rx_buff)
356 return;
357
358 for (i = 0; i < pool->size; i++) {
359 if (pool->rx_buff[i].skb) {
360 dev_kfree_skb_any(pool->rx_buff[i].skb);
361 pool->rx_buff[i].skb = NULL;
362 }
363 }
364 kfree(pool->rx_buff);
365 pool->rx_buff = NULL;
366}
367
368static int ibmvnic_open(struct net_device *netdev)
369{
370 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 struct device *dev = &adapter->vdev->dev;
372 struct ibmvnic_tx_pool *tx_pool;
373 union ibmvnic_crq crq;
374 int rxadd_subcrqs;
375 u64 *size_array;
376 int tx_subcrqs;
377 int i, j;
378
379 rxadd_subcrqs =
380 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
381 tx_subcrqs =
382 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
383 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
384 be32_to_cpu(adapter->login_rsp_buf->
385 off_rxadd_buff_size));
386 adapter->map_id = 1;
387 adapter->napi = kcalloc(adapter->req_rx_queues,
388 sizeof(struct napi_struct), GFP_KERNEL);
389 if (!adapter->napi)
390 goto alloc_napi_failed;
391 for (i = 0; i < adapter->req_rx_queues; i++) {
392 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
393 NAPI_POLL_WEIGHT);
394 napi_enable(&adapter->napi[i]);
395 }
396 adapter->rx_pool =
397 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
398
399 if (!adapter->rx_pool)
400 goto rx_pool_arr_alloc_failed;
401 send_map_query(adapter);
402 for (i = 0; i < rxadd_subcrqs; i++) {
403 init_rx_pool(adapter, &adapter->rx_pool[i],
404 IBMVNIC_BUFFS_PER_POOL, i,
405 be64_to_cpu(size_array[i]), 1);
406 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
407 dev_err(dev, "Couldn't alloc rx pool\n");
408 goto rx_pool_alloc_failed;
409 }
410 }
411 adapter->tx_pool =
412 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
413
414 if (!adapter->tx_pool)
415 goto tx_pool_arr_alloc_failed;
416 for (i = 0; i < tx_subcrqs; i++) {
417 tx_pool = &adapter->tx_pool[i];
418 tx_pool->tx_buff =
419 kcalloc(adapter->max_tx_entries_per_subcrq,
420 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
421 if (!tx_pool->tx_buff)
422 goto tx_pool_alloc_failed;
423
424 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
425 adapter->max_tx_entries_per_subcrq *
426 adapter->req_mtu))
427 goto tx_ltb_alloc_failed;
428
429 tx_pool->free_map =
430 kcalloc(adapter->max_tx_entries_per_subcrq,
431 sizeof(int), GFP_KERNEL);
432 if (!tx_pool->free_map)
433 goto tx_fm_alloc_failed;
434
435 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
436 tx_pool->free_map[j] = j;
437
438 tx_pool->consumer_index = 0;
439 tx_pool->producer_index = 0;
440 }
441 adapter->bounce_buffer_size =
442 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
443 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
444 GFP_KERNEL);
445 if (!adapter->bounce_buffer)
446 goto bounce_alloc_failed;
447
448 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
449 adapter->bounce_buffer_size,
450 DMA_TO_DEVICE);
451 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
452 dev_err(dev, "Couldn't map tx bounce buffer\n");
453 goto bounce_map_failed;
454 }
455 replenish_pools(adapter);
456
457 /* We're ready to receive frames, enable the sub-crq interrupts and
458 * set the logical link state to up
459 */
460 for (i = 0; i < adapter->req_rx_queues; i++)
461 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
462
463 for (i = 0; i < adapter->req_tx_queues; i++)
464 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
465
466 memset(&crq, 0, sizeof(crq));
467 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
468 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
469 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
470 ibmvnic_send_crq(adapter, &crq);
471
Thomas Falconb8efb892016-07-06 15:35:15 -0500472 netif_tx_start_all_queues(netdev);
473
Thomas Falcon032c5e82015-12-21 11:26:06 -0600474 return 0;
475
476bounce_map_failed:
477 kfree(adapter->bounce_buffer);
478bounce_alloc_failed:
479 i = tx_subcrqs - 1;
480 kfree(adapter->tx_pool[i].free_map);
481tx_fm_alloc_failed:
482 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
483tx_ltb_alloc_failed:
484 kfree(adapter->tx_pool[i].tx_buff);
485tx_pool_alloc_failed:
486 for (j = 0; j < i; j++) {
487 kfree(adapter->tx_pool[j].tx_buff);
488 free_long_term_buff(adapter,
489 &adapter->tx_pool[j].long_term_buff);
490 kfree(adapter->tx_pool[j].free_map);
491 }
492 kfree(adapter->tx_pool);
493 adapter->tx_pool = NULL;
494tx_pool_arr_alloc_failed:
495 i = rxadd_subcrqs;
496rx_pool_alloc_failed:
497 for (j = 0; j < i; j++) {
498 free_rx_pool(adapter, &adapter->rx_pool[j]);
499 free_long_term_buff(adapter,
500 &adapter->rx_pool[j].long_term_buff);
501 }
502 kfree(adapter->rx_pool);
503 adapter->rx_pool = NULL;
504rx_pool_arr_alloc_failed:
505 for (i = 0; i < adapter->req_rx_queues; i++)
506 napi_enable(&adapter->napi[i]);
507alloc_napi_failed:
508 return -ENOMEM;
509}
510
511static int ibmvnic_close(struct net_device *netdev)
512{
513 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
514 struct device *dev = &adapter->vdev->dev;
515 union ibmvnic_crq crq;
516 int i;
517
518 adapter->closing = true;
519
520 for (i = 0; i < adapter->req_rx_queues; i++)
521 napi_disable(&adapter->napi[i]);
522
Thomas Falconb8efb892016-07-06 15:35:15 -0500523 netif_tx_stop_all_queues(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600524
525 if (adapter->bounce_buffer) {
526 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
527 dma_unmap_single(&adapter->vdev->dev,
528 adapter->bounce_buffer_dma,
529 adapter->bounce_buffer_size,
530 DMA_BIDIRECTIONAL);
531 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
532 }
533 kfree(adapter->bounce_buffer);
534 adapter->bounce_buffer = NULL;
535 }
536
537 memset(&crq, 0, sizeof(crq));
538 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
539 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
540 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
541 ibmvnic_send_crq(adapter, &crq);
542
543 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
544 i++) {
545 kfree(adapter->tx_pool[i].tx_buff);
546 free_long_term_buff(adapter,
547 &adapter->tx_pool[i].long_term_buff);
548 kfree(adapter->tx_pool[i].free_map);
549 }
550 kfree(adapter->tx_pool);
551 adapter->tx_pool = NULL;
552
553 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
554 i++) {
555 free_rx_pool(adapter, &adapter->rx_pool[i]);
556 free_long_term_buff(adapter,
557 &adapter->rx_pool[i].long_term_buff);
558 }
559 kfree(adapter->rx_pool);
560 adapter->rx_pool = NULL;
561
562 adapter->closing = false;
563
564 return 0;
565}
566
Thomas Falconad7775d2016-04-01 17:20:34 -0500567/**
568 * build_hdr_data - creates L2/L3/L4 header data buffer
569 * @hdr_field - bitfield determining needed headers
570 * @skb - socket buffer
571 * @hdr_len - array of header lengths
572 * @tot_len - total length of data
573 *
574 * Reads hdr_field to determine which headers are needed by firmware.
575 * Builds a buffer containing these headers. Saves individual header
576 * lengths and total buffer length to be used to build descriptors.
577 */
578static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
579 int *hdr_len, u8 *hdr_data)
580{
581 int len = 0;
582 u8 *hdr;
583
584 hdr_len[0] = sizeof(struct ethhdr);
585
586 if (skb->protocol == htons(ETH_P_IP)) {
587 hdr_len[1] = ip_hdr(skb)->ihl * 4;
588 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
589 hdr_len[2] = tcp_hdrlen(skb);
590 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
591 hdr_len[2] = sizeof(struct udphdr);
592 } else if (skb->protocol == htons(ETH_P_IPV6)) {
593 hdr_len[1] = sizeof(struct ipv6hdr);
594 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
595 hdr_len[2] = tcp_hdrlen(skb);
596 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
597 hdr_len[2] = sizeof(struct udphdr);
598 }
599
600 memset(hdr_data, 0, 120);
601 if ((hdr_field >> 6) & 1) {
602 hdr = skb_mac_header(skb);
603 memcpy(hdr_data, hdr, hdr_len[0]);
604 len += hdr_len[0];
605 }
606
607 if ((hdr_field >> 5) & 1) {
608 hdr = skb_network_header(skb);
609 memcpy(hdr_data + len, hdr, hdr_len[1]);
610 len += hdr_len[1];
611 }
612
613 if ((hdr_field >> 4) & 1) {
614 hdr = skb_transport_header(skb);
615 memcpy(hdr_data + len, hdr, hdr_len[2]);
616 len += hdr_len[2];
617 }
618 return len;
619}
620
621/**
622 * create_hdr_descs - create header and header extension descriptors
623 * @hdr_field - bitfield determining needed headers
624 * @data - buffer containing header data
625 * @len - length of data buffer
626 * @hdr_len - array of individual header lengths
627 * @scrq_arr - descriptor array
628 *
629 * Creates header and, if needed, header extension descriptors and
630 * places them in a descriptor array, scrq_arr
631 */
632
633static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
634 union sub_crq *scrq_arr)
635{
636 union sub_crq hdr_desc;
637 int tmp_len = len;
638 u8 *data, *cur;
639 int tmp;
640
641 while (tmp_len > 0) {
642 cur = hdr_data + len - tmp_len;
643
644 memset(&hdr_desc, 0, sizeof(hdr_desc));
645 if (cur != hdr_data) {
646 data = hdr_desc.hdr_ext.data;
647 tmp = tmp_len > 29 ? 29 : tmp_len;
648 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
649 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
650 hdr_desc.hdr_ext.len = tmp;
651 } else {
652 data = hdr_desc.hdr.data;
653 tmp = tmp_len > 24 ? 24 : tmp_len;
654 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
655 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
656 hdr_desc.hdr.len = tmp;
657 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
658 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
659 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
660 hdr_desc.hdr.flag = hdr_field << 1;
661 }
662 memcpy(data, cur, tmp);
663 tmp_len -= tmp;
664 *scrq_arr = hdr_desc;
665 scrq_arr++;
666 }
667}
668
669/**
670 * build_hdr_descs_arr - build a header descriptor array
671 * @skb - socket buffer
672 * @num_entries - number of descriptors to be sent
673 * @subcrq - first TX descriptor
674 * @hdr_field - bit field determining which headers will be sent
675 *
676 * This function will build a TX descriptor array with applicable
677 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
678 */
679
680static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
681 int *num_entries, u8 hdr_field)
682{
683 int hdr_len[3] = {0, 0, 0};
684 int tot_len, len;
685 u8 *hdr_data = txbuff->hdr_data;
686
687 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
688 txbuff->hdr_data);
689 len = tot_len;
690 len -= 24;
691 if (len > 0)
692 num_entries += len % 29 ? len / 29 + 1 : len / 29;
693 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
694 txbuff->indir_arr + 1);
695}
696
Thomas Falcon032c5e82015-12-21 11:26:06 -0600697static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
698{
699 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
700 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500701 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600702 struct device *dev = &adapter->vdev->dev;
703 struct ibmvnic_tx_buff *tx_buff = NULL;
704 struct ibmvnic_tx_pool *tx_pool;
705 unsigned int tx_send_failed = 0;
706 unsigned int tx_map_failed = 0;
707 unsigned int tx_dropped = 0;
708 unsigned int tx_packets = 0;
709 unsigned int tx_bytes = 0;
710 dma_addr_t data_dma_addr;
711 struct netdev_queue *txq;
712 bool used_bounce = false;
713 unsigned long lpar_rc;
714 union sub_crq tx_crq;
715 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500716 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600717 unsigned char *dst;
718 u64 *handle_array;
719 int index = 0;
720 int ret = 0;
721
722 tx_pool = &adapter->tx_pool[queue_num];
723 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
724 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
725 be32_to_cpu(adapter->login_rsp_buf->
726 off_txsubm_subcrqs));
727 if (adapter->migrated) {
728 tx_send_failed++;
729 tx_dropped++;
730 ret = NETDEV_TX_BUSY;
731 goto out;
732 }
733
734 index = tx_pool->free_map[tx_pool->consumer_index];
735 offset = index * adapter->req_mtu;
736 dst = tx_pool->long_term_buff.buff + offset;
737 memset(dst, 0, adapter->req_mtu);
738 skb_copy_from_linear_data(skb, dst, skb->len);
739 data_dma_addr = tx_pool->long_term_buff.addr + offset;
740
741 tx_pool->consumer_index =
742 (tx_pool->consumer_index + 1) %
743 adapter->max_tx_entries_per_subcrq;
744
745 tx_buff = &tx_pool->tx_buff[index];
746 tx_buff->skb = skb;
747 tx_buff->data_dma[0] = data_dma_addr;
748 tx_buff->data_len[0] = skb->len;
749 tx_buff->index = index;
750 tx_buff->pool_index = queue_num;
751 tx_buff->last_frag = true;
752 tx_buff->used_bounce = used_bounce;
753
754 memset(&tx_crq, 0, sizeof(tx_crq));
755 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
756 tx_crq.v1.type = IBMVNIC_TX_DESC;
757 tx_crq.v1.n_crq_elem = 1;
758 tx_crq.v1.n_sge = 1;
759 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
760 tx_crq.v1.correlator = cpu_to_be32(index);
761 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
762 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
763 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
764
765 if (adapter->vlan_header_insertion) {
766 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
767 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
768 }
769
770 if (skb->protocol == htons(ETH_P_IP)) {
771 if (ip_hdr(skb)->version == 4)
772 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
773 else if (ip_hdr(skb)->version == 6)
774 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
775
776 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
777 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
778 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
779 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
780 }
781
Thomas Falconad7775d2016-04-01 17:20:34 -0500782 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600783 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500784 hdrs += 2;
785 }
786 /* determine if l2/3/4 headers are sent to firmware */
787 if ((*hdrs >> 7) & 1 &&
788 (skb->protocol == htons(ETH_P_IP) ||
789 skb->protocol == htons(ETH_P_IPV6))) {
790 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
791 tx_crq.v1.n_crq_elem = num_entries;
792 tx_buff->indir_arr[0] = tx_crq;
793 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
794 sizeof(tx_buff->indir_arr),
795 DMA_TO_DEVICE);
796 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
797 if (!firmware_has_feature(FW_FEATURE_CMO))
798 dev_err(dev, "tx: unable to map descriptor array\n");
799 tx_map_failed++;
800 tx_dropped++;
801 ret = NETDEV_TX_BUSY;
802 goto out;
803 }
John Allen498cd8e2016-04-06 11:49:55 -0500804 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500805 (u64)tx_buff->indir_dma,
806 (u64)num_entries);
807 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500808 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
809 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500810 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600811 if (lpar_rc != H_SUCCESS) {
812 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
813
814 if (tx_pool->consumer_index == 0)
815 tx_pool->consumer_index =
816 adapter->max_tx_entries_per_subcrq - 1;
817 else
818 tx_pool->consumer_index--;
819
820 tx_send_failed++;
821 tx_dropped++;
822 ret = NETDEV_TX_BUSY;
823 goto out;
824 }
825 tx_packets++;
826 tx_bytes += skb->len;
827 txq->trans_start = jiffies;
828 ret = NETDEV_TX_OK;
829
830out:
831 netdev->stats.tx_dropped += tx_dropped;
832 netdev->stats.tx_bytes += tx_bytes;
833 netdev->stats.tx_packets += tx_packets;
834 adapter->tx_send_failed += tx_send_failed;
835 adapter->tx_map_failed += tx_map_failed;
836
837 return ret;
838}
839
840static void ibmvnic_set_multi(struct net_device *netdev)
841{
842 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
843 struct netdev_hw_addr *ha;
844 union ibmvnic_crq crq;
845
846 memset(&crq, 0, sizeof(crq));
847 crq.request_capability.first = IBMVNIC_CRQ_CMD;
848 crq.request_capability.cmd = REQUEST_CAPABILITY;
849
850 if (netdev->flags & IFF_PROMISC) {
851 if (!adapter->promisc_supported)
852 return;
853 } else {
854 if (netdev->flags & IFF_ALLMULTI) {
855 /* Accept all multicast */
856 memset(&crq, 0, sizeof(crq));
857 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
858 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
859 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
860 ibmvnic_send_crq(adapter, &crq);
861 } else if (netdev_mc_empty(netdev)) {
862 /* Reject all multicast */
863 memset(&crq, 0, sizeof(crq));
864 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
865 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
866 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
867 ibmvnic_send_crq(adapter, &crq);
868 } else {
869 /* Accept one or more multicast(s) */
870 netdev_for_each_mc_addr(ha, netdev) {
871 memset(&crq, 0, sizeof(crq));
872 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
873 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
874 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
875 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
876 ha->addr);
877 ibmvnic_send_crq(adapter, &crq);
878 }
879 }
880 }
881}
882
883static int ibmvnic_set_mac(struct net_device *netdev, void *p)
884{
885 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
886 struct sockaddr *addr = p;
887 union ibmvnic_crq crq;
888
889 if (!is_valid_ether_addr(addr->sa_data))
890 return -EADDRNOTAVAIL;
891
892 memset(&crq, 0, sizeof(crq));
893 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
894 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
895 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
896 ibmvnic_send_crq(adapter, &crq);
897 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
898 return 0;
899}
900
901static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
902{
903 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
904
905 if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
906 return -EINVAL;
907
908 netdev->mtu = new_mtu;
909 return 0;
910}
911
912static void ibmvnic_tx_timeout(struct net_device *dev)
913{
914 struct ibmvnic_adapter *adapter = netdev_priv(dev);
915 int rc;
916
917 /* Adapter timed out, resetting it */
918 release_sub_crqs(adapter);
919 rc = ibmvnic_reset_crq(adapter);
920 if (rc)
921 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
922 else
923 ibmvnic_send_crq_init(adapter);
924}
925
926static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
927 struct ibmvnic_rx_buff *rx_buff)
928{
929 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
930
931 rx_buff->skb = NULL;
932
933 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
934 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
935
936 atomic_dec(&pool->available);
937}
938
939static int ibmvnic_poll(struct napi_struct *napi, int budget)
940{
941 struct net_device *netdev = napi->dev;
942 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
943 int scrq_num = (int)(napi - adapter->napi);
944 int frames_processed = 0;
945restart_poll:
946 while (frames_processed < budget) {
947 struct sk_buff *skb;
948 struct ibmvnic_rx_buff *rx_buff;
949 union sub_crq *next;
950 u32 length;
951 u16 offset;
952 u8 flags = 0;
953
954 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
955 break;
956 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
957 rx_buff =
958 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
959 rx_comp.correlator);
960 /* do error checking */
961 if (next->rx_comp.rc) {
962 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
963 /* free the entry */
964 next->rx_comp.first = 0;
965 remove_buff_from_pool(adapter, rx_buff);
966 break;
967 }
968
969 length = be32_to_cpu(next->rx_comp.len);
970 offset = be16_to_cpu(next->rx_comp.off_frame_data);
971 flags = next->rx_comp.flags;
972 skb = rx_buff->skb;
973 skb_copy_to_linear_data(skb, rx_buff->data + offset,
974 length);
975 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
976 /* free the entry */
977 next->rx_comp.first = 0;
978 remove_buff_from_pool(adapter, rx_buff);
979
980 skb_put(skb, length);
981 skb->protocol = eth_type_trans(skb, netdev);
982
983 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
984 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
985 skb->ip_summed = CHECKSUM_UNNECESSARY;
986 }
987
988 length = skb->len;
989 napi_gro_receive(napi, skb); /* send it up */
990 netdev->stats.rx_packets++;
991 netdev->stats.rx_bytes += length;
992 frames_processed++;
993 }
John Allen498cd8e2016-04-06 11:49:55 -0500994 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600995
996 if (frames_processed < budget) {
997 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
998 napi_complete(napi);
999 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1000 napi_reschedule(napi)) {
1001 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1002 goto restart_poll;
1003 }
1004 }
1005 return frames_processed;
1006}
1007
1008#ifdef CONFIG_NET_POLL_CONTROLLER
1009static void ibmvnic_netpoll_controller(struct net_device *dev)
1010{
1011 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1012 int i;
1013
1014 replenish_pools(netdev_priv(dev));
1015 for (i = 0; i < adapter->req_rx_queues; i++)
1016 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1017 adapter->rx_scrq[i]);
1018}
1019#endif
1020
1021static const struct net_device_ops ibmvnic_netdev_ops = {
1022 .ndo_open = ibmvnic_open,
1023 .ndo_stop = ibmvnic_close,
1024 .ndo_start_xmit = ibmvnic_xmit,
1025 .ndo_set_rx_mode = ibmvnic_set_multi,
1026 .ndo_set_mac_address = ibmvnic_set_mac,
1027 .ndo_validate_addr = eth_validate_addr,
1028 .ndo_change_mtu = ibmvnic_change_mtu,
1029 .ndo_tx_timeout = ibmvnic_tx_timeout,
1030#ifdef CONFIG_NET_POLL_CONTROLLER
1031 .ndo_poll_controller = ibmvnic_netpoll_controller,
1032#endif
1033};
1034
1035/* ethtool functions */
1036
1037static int ibmvnic_get_settings(struct net_device *netdev,
1038 struct ethtool_cmd *cmd)
1039{
1040 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1041 SUPPORTED_FIBRE);
1042 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1043 ADVERTISED_FIBRE);
1044 ethtool_cmd_speed_set(cmd, SPEED_1000);
1045 cmd->duplex = DUPLEX_FULL;
1046 cmd->port = PORT_FIBRE;
1047 cmd->phy_address = 0;
1048 cmd->transceiver = XCVR_INTERNAL;
1049 cmd->autoneg = AUTONEG_ENABLE;
1050 cmd->maxtxpkt = 0;
1051 cmd->maxrxpkt = 1;
1052 return 0;
1053}
1054
1055static void ibmvnic_get_drvinfo(struct net_device *dev,
1056 struct ethtool_drvinfo *info)
1057{
1058 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1059 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1060}
1061
1062static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1063{
1064 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1065
1066 return adapter->msg_enable;
1067}
1068
1069static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1070{
1071 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1072
1073 adapter->msg_enable = data;
1074}
1075
1076static u32 ibmvnic_get_link(struct net_device *netdev)
1077{
1078 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1079
1080 /* Don't need to send a query because we request a logical link up at
1081 * init and then we wait for link state indications
1082 */
1083 return adapter->logical_link_state;
1084}
1085
1086static void ibmvnic_get_ringparam(struct net_device *netdev,
1087 struct ethtool_ringparam *ring)
1088{
1089 ring->rx_max_pending = 0;
1090 ring->tx_max_pending = 0;
1091 ring->rx_mini_max_pending = 0;
1092 ring->rx_jumbo_max_pending = 0;
1093 ring->rx_pending = 0;
1094 ring->tx_pending = 0;
1095 ring->rx_mini_pending = 0;
1096 ring->rx_jumbo_pending = 0;
1097}
1098
1099static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1100{
1101 int i;
1102
1103 if (stringset != ETH_SS_STATS)
1104 return;
1105
1106 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1107 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1108}
1109
1110static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1111{
1112 switch (sset) {
1113 case ETH_SS_STATS:
1114 return ARRAY_SIZE(ibmvnic_stats);
1115 default:
1116 return -EOPNOTSUPP;
1117 }
1118}
1119
1120static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1121 struct ethtool_stats *stats, u64 *data)
1122{
1123 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1124 union ibmvnic_crq crq;
1125 int i;
1126
1127 memset(&crq, 0, sizeof(crq));
1128 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1129 crq.request_statistics.cmd = REQUEST_STATISTICS;
1130 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1131 crq.request_statistics.len =
1132 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1133 ibmvnic_send_crq(adapter, &crq);
1134
1135 /* Wait for data to be written */
1136 init_completion(&adapter->stats_done);
1137 wait_for_completion(&adapter->stats_done);
1138
1139 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1140 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1141}
1142
1143static const struct ethtool_ops ibmvnic_ethtool_ops = {
1144 .get_settings = ibmvnic_get_settings,
1145 .get_drvinfo = ibmvnic_get_drvinfo,
1146 .get_msglevel = ibmvnic_get_msglevel,
1147 .set_msglevel = ibmvnic_set_msglevel,
1148 .get_link = ibmvnic_get_link,
1149 .get_ringparam = ibmvnic_get_ringparam,
1150 .get_strings = ibmvnic_get_strings,
1151 .get_sset_count = ibmvnic_get_sset_count,
1152 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1153};
1154
1155/* Routines for managing CRQs/sCRQs */
1156
1157static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1158 struct ibmvnic_sub_crq_queue *scrq)
1159{
1160 struct device *dev = &adapter->vdev->dev;
1161 long rc;
1162
1163 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1164
1165 /* Close the sub-crqs */
1166 do {
1167 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1168 adapter->vdev->unit_address,
1169 scrq->crq_num);
1170 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1171
1172 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1173 DMA_BIDIRECTIONAL);
1174 free_pages((unsigned long)scrq->msgs, 2);
1175 kfree(scrq);
1176}
1177
1178static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1179 *adapter)
1180{
1181 struct device *dev = &adapter->vdev->dev;
1182 struct ibmvnic_sub_crq_queue *scrq;
1183 int rc;
1184
1185 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1186 if (!scrq)
1187 return NULL;
1188
1189 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
1190 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1191 if (!scrq->msgs) {
1192 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1193 goto zero_page_failed;
1194 }
1195
1196 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1197 DMA_BIDIRECTIONAL);
1198 if (dma_mapping_error(dev, scrq->msg_token)) {
1199 dev_warn(dev, "Couldn't map crq queue messages page\n");
1200 goto map_failed;
1201 }
1202
1203 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1204 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1205
1206 if (rc == H_RESOURCE)
1207 rc = ibmvnic_reset_crq(adapter);
1208
1209 if (rc == H_CLOSED) {
1210 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1211 } else if (rc) {
1212 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1213 goto reg_failed;
1214 }
1215
1216 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1217 if (scrq->irq == NO_IRQ) {
1218 dev_err(dev, "Error mapping irq\n");
1219 goto map_irq_failed;
1220 }
1221
1222 scrq->adapter = adapter;
1223 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1224 scrq->cur = 0;
1225 scrq->rx_skb_top = NULL;
1226 spin_lock_init(&scrq->lock);
1227
1228 netdev_dbg(adapter->netdev,
1229 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1230 scrq->crq_num, scrq->hw_irq, scrq->irq);
1231
1232 return scrq;
1233
1234map_irq_failed:
1235 do {
1236 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1237 adapter->vdev->unit_address,
1238 scrq->crq_num);
1239 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1240reg_failed:
1241 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1242 DMA_BIDIRECTIONAL);
1243map_failed:
1244 free_pages((unsigned long)scrq->msgs, 2);
1245zero_page_failed:
1246 kfree(scrq);
1247
1248 return NULL;
1249}
1250
1251static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1252{
1253 int i;
1254
1255 if (adapter->tx_scrq) {
1256 for (i = 0; i < adapter->req_tx_queues; i++)
1257 if (adapter->tx_scrq[i]) {
1258 free_irq(adapter->tx_scrq[i]->irq,
1259 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001260 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001261 release_sub_crq_queue(adapter,
1262 adapter->tx_scrq[i]);
1263 }
1264 adapter->tx_scrq = NULL;
1265 }
1266
1267 if (adapter->rx_scrq) {
1268 for (i = 0; i < adapter->req_rx_queues; i++)
1269 if (adapter->rx_scrq[i]) {
1270 free_irq(adapter->rx_scrq[i]->irq,
1271 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001272 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001273 release_sub_crq_queue(adapter,
1274 adapter->rx_scrq[i]);
1275 }
1276 adapter->rx_scrq = NULL;
1277 }
1278
1279 adapter->requested_caps = 0;
1280}
1281
1282static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1283 struct ibmvnic_sub_crq_queue *scrq)
1284{
1285 struct device *dev = &adapter->vdev->dev;
1286 unsigned long rc;
1287
1288 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1289 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1290 if (rc)
1291 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1292 scrq->hw_irq, rc);
1293 return rc;
1294}
1295
1296static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1297 struct ibmvnic_sub_crq_queue *scrq)
1298{
1299 struct device *dev = &adapter->vdev->dev;
1300 unsigned long rc;
1301
1302 if (scrq->hw_irq > 0x100000000ULL) {
1303 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1304 return 1;
1305 }
1306
1307 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1308 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1309 if (rc)
1310 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1311 scrq->hw_irq, rc);
1312 return rc;
1313}
1314
1315static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1316 struct ibmvnic_sub_crq_queue *scrq)
1317{
1318 struct device *dev = &adapter->vdev->dev;
1319 struct ibmvnic_tx_buff *txbuff;
1320 union sub_crq *next;
1321 int index;
1322 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001323 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001324
1325restart_loop:
1326 while (pending_scrq(adapter, scrq)) {
1327 unsigned int pool = scrq->pool_index;
1328
1329 next = ibmvnic_next_scrq(adapter, scrq);
1330 for (i = 0; i < next->tx_comp.num_comps; i++) {
1331 if (next->tx_comp.rcs[i]) {
1332 dev_err(dev, "tx error %x\n",
1333 next->tx_comp.rcs[i]);
1334 continue;
1335 }
1336 index = be32_to_cpu(next->tx_comp.correlators[i]);
1337 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1338
1339 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1340 if (!txbuff->data_dma[j])
1341 continue;
1342
1343 txbuff->data_dma[j] = 0;
1344 txbuff->used_bounce = false;
1345 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001346 /* if sub_crq was sent indirectly */
1347 first = txbuff->indir_arr[0].generic.first;
1348 if (first == IBMVNIC_CRQ_CMD) {
1349 dma_unmap_single(dev, txbuff->indir_dma,
1350 sizeof(txbuff->indir_arr),
1351 DMA_TO_DEVICE);
1352 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001353
1354 if (txbuff->last_frag)
1355 dev_kfree_skb_any(txbuff->skb);
1356
1357 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1358 producer_index] = index;
1359 adapter->tx_pool[pool].producer_index =
1360 (adapter->tx_pool[pool].producer_index + 1) %
1361 adapter->max_tx_entries_per_subcrq;
1362 }
1363 /* remove tx_comp scrq*/
1364 next->tx_comp.first = 0;
1365 }
1366
1367 enable_scrq_irq(adapter, scrq);
1368
1369 if (pending_scrq(adapter, scrq)) {
1370 disable_scrq_irq(adapter, scrq);
1371 goto restart_loop;
1372 }
1373
1374 return 0;
1375}
1376
1377static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1378{
1379 struct ibmvnic_sub_crq_queue *scrq = instance;
1380 struct ibmvnic_adapter *adapter = scrq->adapter;
1381
1382 disable_scrq_irq(adapter, scrq);
1383 ibmvnic_complete_tx(adapter, scrq);
1384
1385 return IRQ_HANDLED;
1386}
1387
1388static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1389{
1390 struct ibmvnic_sub_crq_queue *scrq = instance;
1391 struct ibmvnic_adapter *adapter = scrq->adapter;
1392
1393 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1394 disable_scrq_irq(adapter, scrq);
1395 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1396 }
1397
1398 return IRQ_HANDLED;
1399}
1400
1401static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1402{
1403 struct device *dev = &adapter->vdev->dev;
1404 struct ibmvnic_sub_crq_queue **allqueues;
1405 int registered_queues = 0;
1406 union ibmvnic_crq crq;
1407 int total_queues;
1408 int more = 0;
1409 int i, j;
1410 int rc;
1411
1412 if (!retry) {
1413 /* Sub-CRQ entries are 32 byte long */
1414 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1415
1416 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1417 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1418 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1419 goto allqueues_failed;
1420 }
1421
1422 /* Get the minimum between the queried max and the entries
1423 * that fit in our PAGE_SIZE
1424 */
1425 adapter->req_tx_entries_per_subcrq =
1426 adapter->max_tx_entries_per_subcrq > entries_page ?
1427 entries_page : adapter->max_tx_entries_per_subcrq;
1428 adapter->req_rx_add_entries_per_subcrq =
1429 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1430 entries_page : adapter->max_rx_add_entries_per_subcrq;
1431
1432 /* Choosing the maximum number of queues supported by firmware*/
John Allen498cd8e2016-04-06 11:49:55 -05001433 adapter->req_tx_queues = adapter->max_tx_queues;
1434 adapter->req_rx_queues = adapter->max_rx_queues;
1435 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001436
1437 adapter->req_mtu = adapter->max_mtu;
1438 }
1439
1440 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1441
1442 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1443 if (!allqueues)
1444 goto allqueues_failed;
1445
1446 for (i = 0; i < total_queues; i++) {
1447 allqueues[i] = init_sub_crq_queue(adapter);
1448 if (!allqueues[i]) {
1449 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1450 break;
1451 }
1452 registered_queues++;
1453 }
1454
1455 /* Make sure we were able to register the minimum number of queues */
1456 if (registered_queues <
1457 adapter->min_tx_queues + adapter->min_rx_queues) {
1458 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1459 goto tx_failed;
1460 }
1461
1462 /* Distribute the failed allocated queues*/
1463 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1464 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1465 switch (i % 3) {
1466 case 0:
1467 if (adapter->req_rx_queues > adapter->min_rx_queues)
1468 adapter->req_rx_queues--;
1469 else
1470 more++;
1471 break;
1472 case 1:
1473 if (adapter->req_tx_queues > adapter->min_tx_queues)
1474 adapter->req_tx_queues--;
1475 else
1476 more++;
1477 break;
1478 }
1479 }
1480
1481 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1482 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1483 if (!adapter->tx_scrq)
1484 goto tx_failed;
1485
1486 for (i = 0; i < adapter->req_tx_queues; i++) {
1487 adapter->tx_scrq[i] = allqueues[i];
1488 adapter->tx_scrq[i]->pool_index = i;
1489 rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
1490 0, "ibmvnic_tx", adapter->tx_scrq[i]);
1491 if (rc) {
1492 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1493 adapter->tx_scrq[i]->irq, rc);
1494 goto req_tx_irq_failed;
1495 }
1496 }
1497
1498 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1499 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1500 if (!adapter->rx_scrq)
1501 goto rx_failed;
1502
1503 for (i = 0; i < adapter->req_rx_queues; i++) {
1504 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1505 adapter->rx_scrq[i]->scrq_num = i;
1506 rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
1507 0, "ibmvnic_rx", adapter->rx_scrq[i]);
1508 if (rc) {
1509 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1510 adapter->rx_scrq[i]->irq, rc);
1511 goto req_rx_irq_failed;
1512 }
1513 }
1514
1515 memset(&crq, 0, sizeof(crq));
1516 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1517 crq.request_capability.cmd = REQUEST_CAPABILITY;
1518
1519 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001520 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001521 ibmvnic_send_crq(adapter, &crq);
1522
1523 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001524 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001525 ibmvnic_send_crq(adapter, &crq);
1526
1527 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001528 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001529 ibmvnic_send_crq(adapter, &crq);
1530
1531 crq.request_capability.capability =
1532 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1533 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001534 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001535 ibmvnic_send_crq(adapter, &crq);
1536
1537 crq.request_capability.capability =
1538 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1539 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001540 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001541 ibmvnic_send_crq(adapter, &crq);
1542
1543 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001544 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001545 ibmvnic_send_crq(adapter, &crq);
1546
1547 if (adapter->netdev->flags & IFF_PROMISC) {
1548 if (adapter->promisc_supported) {
1549 crq.request_capability.capability =
1550 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001551 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001552 ibmvnic_send_crq(adapter, &crq);
1553 }
1554 } else {
1555 crq.request_capability.capability =
1556 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001557 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001558 ibmvnic_send_crq(adapter, &crq);
1559 }
1560
1561 kfree(allqueues);
1562
1563 return;
1564
1565req_rx_irq_failed:
1566 for (j = 0; j < i; j++)
1567 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1568 i = adapter->req_tx_queues;
1569req_tx_irq_failed:
1570 for (j = 0; j < i; j++)
1571 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1572 kfree(adapter->rx_scrq);
1573 adapter->rx_scrq = NULL;
1574rx_failed:
1575 kfree(adapter->tx_scrq);
1576 adapter->tx_scrq = NULL;
1577tx_failed:
1578 for (i = 0; i < registered_queues; i++)
1579 release_sub_crq_queue(adapter, allqueues[i]);
1580 kfree(allqueues);
1581allqueues_failed:
1582 ibmvnic_remove(adapter->vdev);
1583}
1584
1585static int pending_scrq(struct ibmvnic_adapter *adapter,
1586 struct ibmvnic_sub_crq_queue *scrq)
1587{
1588 union sub_crq *entry = &scrq->msgs[scrq->cur];
1589
1590 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1591 return 1;
1592 else
1593 return 0;
1594}
1595
1596static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1597 struct ibmvnic_sub_crq_queue *scrq)
1598{
1599 union sub_crq *entry;
1600 unsigned long flags;
1601
1602 spin_lock_irqsave(&scrq->lock, flags);
1603 entry = &scrq->msgs[scrq->cur];
1604 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1605 if (++scrq->cur == scrq->size)
1606 scrq->cur = 0;
1607 } else {
1608 entry = NULL;
1609 }
1610 spin_unlock_irqrestore(&scrq->lock, flags);
1611
1612 return entry;
1613}
1614
1615static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1616{
1617 struct ibmvnic_crq_queue *queue = &adapter->crq;
1618 union ibmvnic_crq *crq;
1619
1620 crq = &queue->msgs[queue->cur];
1621 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1622 if (++queue->cur == queue->size)
1623 queue->cur = 0;
1624 } else {
1625 crq = NULL;
1626 }
1627
1628 return crq;
1629}
1630
1631static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1632 union sub_crq *sub_crq)
1633{
1634 unsigned int ua = adapter->vdev->unit_address;
1635 struct device *dev = &adapter->vdev->dev;
1636 u64 *u64_crq = (u64 *)sub_crq;
1637 int rc;
1638
1639 netdev_dbg(adapter->netdev,
1640 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1641 (unsigned long int)cpu_to_be64(remote_handle),
1642 (unsigned long int)cpu_to_be64(u64_crq[0]),
1643 (unsigned long int)cpu_to_be64(u64_crq[1]),
1644 (unsigned long int)cpu_to_be64(u64_crq[2]),
1645 (unsigned long int)cpu_to_be64(u64_crq[3]));
1646
1647 /* Make sure the hypervisor sees the complete request */
1648 mb();
1649
1650 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1651 cpu_to_be64(remote_handle),
1652 cpu_to_be64(u64_crq[0]),
1653 cpu_to_be64(u64_crq[1]),
1654 cpu_to_be64(u64_crq[2]),
1655 cpu_to_be64(u64_crq[3]));
1656
1657 if (rc) {
1658 if (rc == H_CLOSED)
1659 dev_warn(dev, "CRQ Queue closed\n");
1660 dev_err(dev, "Send error (rc=%d)\n", rc);
1661 }
1662
1663 return rc;
1664}
1665
Thomas Falconad7775d2016-04-01 17:20:34 -05001666static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1667 u64 remote_handle, u64 ioba, u64 num_entries)
1668{
1669 unsigned int ua = adapter->vdev->unit_address;
1670 struct device *dev = &adapter->vdev->dev;
1671 int rc;
1672
1673 /* Make sure the hypervisor sees the complete request */
1674 mb();
1675 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1676 cpu_to_be64(remote_handle),
1677 ioba, num_entries);
1678
1679 if (rc) {
1680 if (rc == H_CLOSED)
1681 dev_warn(dev, "CRQ Queue closed\n");
1682 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1683 }
1684
1685 return rc;
1686}
1687
Thomas Falcon032c5e82015-12-21 11:26:06 -06001688static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1689 union ibmvnic_crq *crq)
1690{
1691 unsigned int ua = adapter->vdev->unit_address;
1692 struct device *dev = &adapter->vdev->dev;
1693 u64 *u64_crq = (u64 *)crq;
1694 int rc;
1695
1696 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1697 (unsigned long int)cpu_to_be64(u64_crq[0]),
1698 (unsigned long int)cpu_to_be64(u64_crq[1]));
1699
1700 /* Make sure the hypervisor sees the complete request */
1701 mb();
1702
1703 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1704 cpu_to_be64(u64_crq[0]),
1705 cpu_to_be64(u64_crq[1]));
1706
1707 if (rc) {
1708 if (rc == H_CLOSED)
1709 dev_warn(dev, "CRQ Queue closed\n");
1710 dev_warn(dev, "Send error (rc=%d)\n", rc);
1711 }
1712
1713 return rc;
1714}
1715
1716static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1717{
1718 union ibmvnic_crq crq;
1719
1720 memset(&crq, 0, sizeof(crq));
1721 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1722 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1723 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1724
1725 return ibmvnic_send_crq(adapter, &crq);
1726}
1727
1728static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1729{
1730 union ibmvnic_crq crq;
1731
1732 memset(&crq, 0, sizeof(crq));
1733 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1734 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1735 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1736
1737 return ibmvnic_send_crq(adapter, &crq);
1738}
1739
1740static int send_version_xchg(struct ibmvnic_adapter *adapter)
1741{
1742 union ibmvnic_crq crq;
1743
1744 memset(&crq, 0, sizeof(crq));
1745 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1746 crq.version_exchange.cmd = VERSION_EXCHANGE;
1747 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1748
1749 return ibmvnic_send_crq(adapter, &crq);
1750}
1751
1752static void send_login(struct ibmvnic_adapter *adapter)
1753{
1754 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1755 struct ibmvnic_login_buffer *login_buffer;
1756 struct ibmvnic_inflight_cmd *inflight_cmd;
1757 struct device *dev = &adapter->vdev->dev;
1758 dma_addr_t rsp_buffer_token;
1759 dma_addr_t buffer_token;
1760 size_t rsp_buffer_size;
1761 union ibmvnic_crq crq;
1762 unsigned long flags;
1763 size_t buffer_size;
1764 __be64 *tx_list_p;
1765 __be64 *rx_list_p;
1766 int i;
1767
1768 buffer_size =
1769 sizeof(struct ibmvnic_login_buffer) +
1770 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1771
1772 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1773 if (!login_buffer)
1774 goto buf_alloc_failed;
1775
1776 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1777 DMA_TO_DEVICE);
1778 if (dma_mapping_error(dev, buffer_token)) {
1779 dev_err(dev, "Couldn't map login buffer\n");
1780 goto buf_map_failed;
1781 }
1782
John Allen498cd8e2016-04-06 11:49:55 -05001783 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1784 sizeof(u64) * adapter->req_tx_queues +
1785 sizeof(u64) * adapter->req_rx_queues +
1786 sizeof(u64) * adapter->req_rx_queues +
1787 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001788
1789 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1790 if (!login_rsp_buffer)
1791 goto buf_rsp_alloc_failed;
1792
1793 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1794 rsp_buffer_size, DMA_FROM_DEVICE);
1795 if (dma_mapping_error(dev, rsp_buffer_token)) {
1796 dev_err(dev, "Couldn't map login rsp buffer\n");
1797 goto buf_rsp_map_failed;
1798 }
1799 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1800 if (!inflight_cmd) {
1801 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1802 goto inflight_alloc_failed;
1803 }
1804 adapter->login_buf = login_buffer;
1805 adapter->login_buf_token = buffer_token;
1806 adapter->login_buf_sz = buffer_size;
1807 adapter->login_rsp_buf = login_rsp_buffer;
1808 adapter->login_rsp_buf_token = rsp_buffer_token;
1809 adapter->login_rsp_buf_sz = rsp_buffer_size;
1810
1811 login_buffer->len = cpu_to_be32(buffer_size);
1812 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1813 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1814 login_buffer->off_txcomp_subcrqs =
1815 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1816 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1817 login_buffer->off_rxcomp_subcrqs =
1818 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1819 sizeof(u64) * adapter->req_tx_queues);
1820 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1821 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1822
1823 tx_list_p = (__be64 *)((char *)login_buffer +
1824 sizeof(struct ibmvnic_login_buffer));
1825 rx_list_p = (__be64 *)((char *)login_buffer +
1826 sizeof(struct ibmvnic_login_buffer) +
1827 sizeof(u64) * adapter->req_tx_queues);
1828
1829 for (i = 0; i < adapter->req_tx_queues; i++) {
1830 if (adapter->tx_scrq[i]) {
1831 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1832 crq_num);
1833 }
1834 }
1835
1836 for (i = 0; i < adapter->req_rx_queues; i++) {
1837 if (adapter->rx_scrq[i]) {
1838 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1839 crq_num);
1840 }
1841 }
1842
1843 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1844 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1845 netdev_dbg(adapter->netdev, "%016lx\n",
1846 ((unsigned long int *)(adapter->login_buf))[i]);
1847 }
1848
1849 memset(&crq, 0, sizeof(crq));
1850 crq.login.first = IBMVNIC_CRQ_CMD;
1851 crq.login.cmd = LOGIN;
1852 crq.login.ioba = cpu_to_be32(buffer_token);
1853 crq.login.len = cpu_to_be32(buffer_size);
1854
1855 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1856
1857 spin_lock_irqsave(&adapter->inflight_lock, flags);
1858 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1859 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1860
1861 ibmvnic_send_crq(adapter, &crq);
1862
1863 return;
1864
1865inflight_alloc_failed:
1866 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1867 DMA_FROM_DEVICE);
1868buf_rsp_map_failed:
1869 kfree(login_rsp_buffer);
1870buf_rsp_alloc_failed:
1871 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1872buf_map_failed:
1873 kfree(login_buffer);
1874buf_alloc_failed:
1875 return;
1876}
1877
1878static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1879 u32 len, u8 map_id)
1880{
1881 union ibmvnic_crq crq;
1882
1883 memset(&crq, 0, sizeof(crq));
1884 crq.request_map.first = IBMVNIC_CRQ_CMD;
1885 crq.request_map.cmd = REQUEST_MAP;
1886 crq.request_map.map_id = map_id;
1887 crq.request_map.ioba = cpu_to_be32(addr);
1888 crq.request_map.len = cpu_to_be32(len);
1889 ibmvnic_send_crq(adapter, &crq);
1890}
1891
1892static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1893{
1894 union ibmvnic_crq crq;
1895
1896 memset(&crq, 0, sizeof(crq));
1897 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1898 crq.request_unmap.cmd = REQUEST_UNMAP;
1899 crq.request_unmap.map_id = map_id;
1900 ibmvnic_send_crq(adapter, &crq);
1901}
1902
1903static void send_map_query(struct ibmvnic_adapter *adapter)
1904{
1905 union ibmvnic_crq crq;
1906
1907 memset(&crq, 0, sizeof(crq));
1908 crq.query_map.first = IBMVNIC_CRQ_CMD;
1909 crq.query_map.cmd = QUERY_MAP;
1910 ibmvnic_send_crq(adapter, &crq);
1911}
1912
1913/* Send a series of CRQs requesting various capabilities of the VNIC server */
1914static void send_cap_queries(struct ibmvnic_adapter *adapter)
1915{
1916 union ibmvnic_crq crq;
1917
1918 atomic_set(&adapter->running_cap_queries, 0);
1919 memset(&crq, 0, sizeof(crq));
1920 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1921 crq.query_capability.cmd = QUERY_CAPABILITY;
1922
1923 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1924 atomic_inc(&adapter->running_cap_queries);
1925 ibmvnic_send_crq(adapter, &crq);
1926
1927 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1928 atomic_inc(&adapter->running_cap_queries);
1929 ibmvnic_send_crq(adapter, &crq);
1930
1931 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1932 atomic_inc(&adapter->running_cap_queries);
1933 ibmvnic_send_crq(adapter, &crq);
1934
1935 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1936 atomic_inc(&adapter->running_cap_queries);
1937 ibmvnic_send_crq(adapter, &crq);
1938
1939 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1940 atomic_inc(&adapter->running_cap_queries);
1941 ibmvnic_send_crq(adapter, &crq);
1942
1943 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1944 atomic_inc(&adapter->running_cap_queries);
1945 ibmvnic_send_crq(adapter, &crq);
1946
1947 crq.query_capability.capability =
1948 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1949 atomic_inc(&adapter->running_cap_queries);
1950 ibmvnic_send_crq(adapter, &crq);
1951
1952 crq.query_capability.capability =
1953 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1954 atomic_inc(&adapter->running_cap_queries);
1955 ibmvnic_send_crq(adapter, &crq);
1956
1957 crq.query_capability.capability =
1958 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1959 atomic_inc(&adapter->running_cap_queries);
1960 ibmvnic_send_crq(adapter, &crq);
1961
1962 crq.query_capability.capability =
1963 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
1964 atomic_inc(&adapter->running_cap_queries);
1965 ibmvnic_send_crq(adapter, &crq);
1966
1967 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
1968 atomic_inc(&adapter->running_cap_queries);
1969 ibmvnic_send_crq(adapter, &crq);
1970
1971 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
1972 atomic_inc(&adapter->running_cap_queries);
1973 ibmvnic_send_crq(adapter, &crq);
1974
1975 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
1976 atomic_inc(&adapter->running_cap_queries);
1977 ibmvnic_send_crq(adapter, &crq);
1978
1979 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
1980 atomic_inc(&adapter->running_cap_queries);
1981 ibmvnic_send_crq(adapter, &crq);
1982
1983 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
1984 atomic_inc(&adapter->running_cap_queries);
1985 ibmvnic_send_crq(adapter, &crq);
1986
1987 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
1988 atomic_inc(&adapter->running_cap_queries);
1989 ibmvnic_send_crq(adapter, &crq);
1990
1991 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
1992 atomic_inc(&adapter->running_cap_queries);
1993 ibmvnic_send_crq(adapter, &crq);
1994
1995 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
1996 atomic_inc(&adapter->running_cap_queries);
1997 ibmvnic_send_crq(adapter, &crq);
1998
1999 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2000 atomic_inc(&adapter->running_cap_queries);
2001 ibmvnic_send_crq(adapter, &crq);
2002
2003 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2004 atomic_inc(&adapter->running_cap_queries);
2005 ibmvnic_send_crq(adapter, &crq);
2006
2007 crq.query_capability.capability =
2008 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2009 atomic_inc(&adapter->running_cap_queries);
2010 ibmvnic_send_crq(adapter, &crq);
2011
2012 crq.query_capability.capability =
2013 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2014 atomic_inc(&adapter->running_cap_queries);
2015 ibmvnic_send_crq(adapter, &crq);
2016
2017 crq.query_capability.capability =
2018 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2019 atomic_inc(&adapter->running_cap_queries);
2020 ibmvnic_send_crq(adapter, &crq);
2021
2022 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2023 atomic_inc(&adapter->running_cap_queries);
2024 ibmvnic_send_crq(adapter, &crq);
2025}
2026
2027static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2028{
2029 struct device *dev = &adapter->vdev->dev;
2030 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2031 union ibmvnic_crq crq;
2032 int i;
2033
2034 dma_unmap_single(dev, adapter->ip_offload_tok,
2035 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2036
2037 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2038 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2039 netdev_dbg(adapter->netdev, "%016lx\n",
2040 ((unsigned long int *)(buf))[i]);
2041
2042 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2043 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2044 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2045 buf->tcp_ipv4_chksum);
2046 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2047 buf->tcp_ipv6_chksum);
2048 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2049 buf->udp_ipv4_chksum);
2050 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2051 buf->udp_ipv6_chksum);
2052 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2053 buf->large_tx_ipv4);
2054 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2055 buf->large_tx_ipv6);
2056 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2057 buf->large_rx_ipv4);
2058 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2059 buf->large_rx_ipv6);
2060 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2061 buf->max_ipv4_header_size);
2062 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2063 buf->max_ipv6_header_size);
2064 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2065 buf->max_tcp_header_size);
2066 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2067 buf->max_udp_header_size);
2068 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2069 buf->max_large_tx_size);
2070 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2071 buf->max_large_rx_size);
2072 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2073 buf->ipv6_extension_header);
2074 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2075 buf->tcp_pseudosum_req);
2076 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2077 buf->num_ipv6_ext_headers);
2078 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2079 buf->off_ipv6_ext_headers);
2080
2081 adapter->ip_offload_ctrl_tok =
2082 dma_map_single(dev, &adapter->ip_offload_ctrl,
2083 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2084
2085 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2086 dev_err(dev, "Couldn't map ip offload control buffer\n");
2087 return;
2088 }
2089
2090 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2091 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2092 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2093 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2094 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2095
2096 /* large_tx/rx disabled for now, additional features needed */
2097 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2098 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2099 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2100 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2101
2102 adapter->netdev->features = NETIF_F_GSO;
2103
2104 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2105 adapter->netdev->features |= NETIF_F_IP_CSUM;
2106
2107 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2108 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2109
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002110 if ((adapter->netdev->features &
2111 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2112 adapter->netdev->features |= NETIF_F_RXCSUM;
2113
Thomas Falcon032c5e82015-12-21 11:26:06 -06002114 memset(&crq, 0, sizeof(crq));
2115 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2116 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2117 crq.control_ip_offload.len =
2118 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2119 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2120 ibmvnic_send_crq(adapter, &crq);
2121}
2122
2123static void handle_error_info_rsp(union ibmvnic_crq *crq,
2124 struct ibmvnic_adapter *adapter)
2125{
2126 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002127 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002128 unsigned long flags;
2129 bool found = false;
2130 int i;
2131
2132 if (!crq->request_error_rsp.rc.code) {
2133 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2134 crq->request_error_rsp.rc.code);
2135 return;
2136 }
2137
2138 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002139 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002140 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2141 found = true;
2142 list_del(&error_buff->list);
2143 break;
2144 }
2145 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2146
2147 if (!found) {
2148 dev_err(dev, "Couldn't find error id %x\n",
2149 crq->request_error_rsp.error_id);
2150 return;
2151 }
2152
2153 dev_err(dev, "Detailed info for error id %x:",
2154 crq->request_error_rsp.error_id);
2155
2156 for (i = 0; i < error_buff->len; i++) {
2157 pr_cont("%02x", (int)error_buff->buff[i]);
2158 if (i % 8 == 7)
2159 pr_cont(" ");
2160 }
2161 pr_cont("\n");
2162
2163 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2164 DMA_FROM_DEVICE);
2165 kfree(error_buff->buff);
2166 kfree(error_buff);
2167}
2168
2169static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2170 struct ibmvnic_adapter *adapter)
2171{
2172 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2173 struct ibmvnic_inflight_cmd *inflight_cmd;
2174 struct device *dev = &adapter->vdev->dev;
2175 union ibmvnic_crq newcrq;
2176 unsigned long flags;
2177
2178 /* allocate and map buffer */
2179 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2180 if (!adapter->dump_data) {
2181 complete(&adapter->fw_done);
2182 return;
2183 }
2184
2185 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2186 DMA_FROM_DEVICE);
2187
2188 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2189 if (!firmware_has_feature(FW_FEATURE_CMO))
2190 dev_err(dev, "Couldn't map dump data\n");
2191 kfree(adapter->dump_data);
2192 complete(&adapter->fw_done);
2193 return;
2194 }
2195
2196 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2197 if (!inflight_cmd) {
2198 dma_unmap_single(dev, adapter->dump_data_token, len,
2199 DMA_FROM_DEVICE);
2200 kfree(adapter->dump_data);
2201 complete(&adapter->fw_done);
2202 return;
2203 }
2204
2205 memset(&newcrq, 0, sizeof(newcrq));
2206 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2207 newcrq.request_dump.cmd = REQUEST_DUMP;
2208 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2209 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2210
2211 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2212
2213 spin_lock_irqsave(&adapter->inflight_lock, flags);
2214 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2215 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2216
2217 ibmvnic_send_crq(adapter, &newcrq);
2218}
2219
2220static void handle_error_indication(union ibmvnic_crq *crq,
2221 struct ibmvnic_adapter *adapter)
2222{
2223 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2224 struct ibmvnic_inflight_cmd *inflight_cmd;
2225 struct device *dev = &adapter->vdev->dev;
2226 struct ibmvnic_error_buff *error_buff;
2227 union ibmvnic_crq new_crq;
2228 unsigned long flags;
2229
2230 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2231 crq->error_indication.
2232 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2233 crq->error_indication.error_id,
2234 crq->error_indication.error_cause);
2235
2236 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2237 if (!error_buff)
2238 return;
2239
2240 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2241 if (!error_buff->buff) {
2242 kfree(error_buff);
2243 return;
2244 }
2245
2246 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2247 DMA_FROM_DEVICE);
2248 if (dma_mapping_error(dev, error_buff->dma)) {
2249 if (!firmware_has_feature(FW_FEATURE_CMO))
2250 dev_err(dev, "Couldn't map error buffer\n");
2251 kfree(error_buff->buff);
2252 kfree(error_buff);
2253 return;
2254 }
2255
2256 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2257 if (!inflight_cmd) {
2258 dma_unmap_single(dev, error_buff->dma, detail_len,
2259 DMA_FROM_DEVICE);
2260 kfree(error_buff->buff);
2261 kfree(error_buff);
2262 return;
2263 }
2264
2265 error_buff->len = detail_len;
2266 error_buff->error_id = crq->error_indication.error_id;
2267
2268 spin_lock_irqsave(&adapter->error_list_lock, flags);
2269 list_add_tail(&error_buff->list, &adapter->errors);
2270 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2271
2272 memset(&new_crq, 0, sizeof(new_crq));
2273 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2274 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2275 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2276 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2277 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2278
2279 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2280
2281 spin_lock_irqsave(&adapter->inflight_lock, flags);
2282 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2283 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2284
2285 ibmvnic_send_crq(adapter, &new_crq);
2286}
2287
2288static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2289 struct ibmvnic_adapter *adapter)
2290{
2291 struct net_device *netdev = adapter->netdev;
2292 struct device *dev = &adapter->vdev->dev;
2293 long rc;
2294
2295 rc = crq->change_mac_addr_rsp.rc.code;
2296 if (rc) {
2297 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2298 return;
2299 }
2300 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2301 ETH_ALEN);
2302}
2303
2304static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2305 struct ibmvnic_adapter *adapter)
2306{
2307 struct device *dev = &adapter->vdev->dev;
2308 u64 *req_value;
2309 char *name;
2310
2311 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2312 case REQ_TX_QUEUES:
2313 req_value = &adapter->req_tx_queues;
2314 name = "tx";
2315 break;
2316 case REQ_RX_QUEUES:
2317 req_value = &adapter->req_rx_queues;
2318 name = "rx";
2319 break;
2320 case REQ_RX_ADD_QUEUES:
2321 req_value = &adapter->req_rx_add_queues;
2322 name = "rx_add";
2323 break;
2324 case REQ_TX_ENTRIES_PER_SUBCRQ:
2325 req_value = &adapter->req_tx_entries_per_subcrq;
2326 name = "tx_entries_per_subcrq";
2327 break;
2328 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2329 req_value = &adapter->req_rx_add_entries_per_subcrq;
2330 name = "rx_add_entries_per_subcrq";
2331 break;
2332 case REQ_MTU:
2333 req_value = &adapter->req_mtu;
2334 name = "mtu";
2335 break;
2336 case PROMISC_REQUESTED:
2337 req_value = &adapter->promisc;
2338 name = "promisc";
2339 break;
2340 default:
2341 dev_err(dev, "Got invalid cap request rsp %d\n",
2342 crq->request_capability.capability);
2343 return;
2344 }
2345
2346 switch (crq->request_capability_rsp.rc.code) {
2347 case SUCCESS:
2348 break;
2349 case PARTIALSUCCESS:
2350 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2351 *req_value,
2352 (long int)be32_to_cpu(crq->request_capability_rsp.
2353 number), name);
2354 release_sub_crqs(adapter);
2355 *req_value = be32_to_cpu(crq->request_capability_rsp.number);
2356 complete(&adapter->init_done);
2357 return;
2358 default:
2359 dev_err(dev, "Error %d in request cap rsp\n",
2360 crq->request_capability_rsp.rc.code);
2361 return;
2362 }
2363
2364 /* Done receiving requested capabilities, query IP offload support */
2365 if (++adapter->requested_caps == 7) {
2366 union ibmvnic_crq newcrq;
2367 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2368 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2369 &adapter->ip_offload_buf;
2370
2371 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2372 buf_sz,
2373 DMA_FROM_DEVICE);
2374
2375 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2376 if (!firmware_has_feature(FW_FEATURE_CMO))
2377 dev_err(dev, "Couldn't map offload buffer\n");
2378 return;
2379 }
2380
2381 memset(&newcrq, 0, sizeof(newcrq));
2382 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2383 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2384 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2385 newcrq.query_ip_offload.ioba =
2386 cpu_to_be32(adapter->ip_offload_tok);
2387
2388 ibmvnic_send_crq(adapter, &newcrq);
2389 }
2390}
2391
2392static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2393 struct ibmvnic_adapter *adapter)
2394{
2395 struct device *dev = &adapter->vdev->dev;
2396 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2397 struct ibmvnic_login_buffer *login = adapter->login_buf;
2398 union ibmvnic_crq crq;
2399 int i;
2400
2401 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2402 DMA_BIDIRECTIONAL);
2403 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2404 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2405
John Allen498cd8e2016-04-06 11:49:55 -05002406 /* If the number of queues requested can't be allocated by the
2407 * server, the login response will return with code 1. We will need
2408 * to resend the login buffer with fewer queues requested.
2409 */
2410 if (login_rsp_crq->generic.rc.code) {
2411 adapter->renegotiate = true;
2412 complete(&adapter->init_done);
2413 return 0;
2414 }
2415
Thomas Falcon032c5e82015-12-21 11:26:06 -06002416 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2417 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2418 netdev_dbg(adapter->netdev, "%016lx\n",
2419 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2420 }
2421
2422 /* Sanity checks */
2423 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2424 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2425 adapter->req_rx_add_queues !=
2426 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2427 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2428 ibmvnic_remove(adapter->vdev);
2429 return -EIO;
2430 }
2431 complete(&adapter->init_done);
2432
2433 memset(&crq, 0, sizeof(crq));
2434 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2435 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2436 ibmvnic_send_crq(adapter, &crq);
2437
2438 return 0;
2439}
2440
2441static void handle_request_map_rsp(union ibmvnic_crq *crq,
2442 struct ibmvnic_adapter *adapter)
2443{
2444 struct device *dev = &adapter->vdev->dev;
2445 u8 map_id = crq->request_map_rsp.map_id;
2446 int tx_subcrqs;
2447 int rx_subcrqs;
2448 long rc;
2449 int i;
2450
2451 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2452 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2453
2454 rc = crq->request_map_rsp.rc.code;
2455 if (rc) {
2456 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2457 adapter->map_id--;
2458 /* need to find and zero tx/rx_pool map_id */
2459 for (i = 0; i < tx_subcrqs; i++) {
2460 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2461 adapter->tx_pool[i].long_term_buff.map_id = 0;
2462 }
2463 for (i = 0; i < rx_subcrqs; i++) {
2464 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2465 adapter->rx_pool[i].long_term_buff.map_id = 0;
2466 }
2467 }
2468 complete(&adapter->fw_done);
2469}
2470
2471static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2472 struct ibmvnic_adapter *adapter)
2473{
2474 struct device *dev = &adapter->vdev->dev;
2475 long rc;
2476
2477 rc = crq->request_unmap_rsp.rc.code;
2478 if (rc)
2479 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2480}
2481
2482static void handle_query_map_rsp(union ibmvnic_crq *crq,
2483 struct ibmvnic_adapter *adapter)
2484{
2485 struct net_device *netdev = adapter->netdev;
2486 struct device *dev = &adapter->vdev->dev;
2487 long rc;
2488
2489 rc = crq->query_map_rsp.rc.code;
2490 if (rc) {
2491 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2492 return;
2493 }
2494 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2495 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2496 crq->query_map_rsp.free_pages);
2497}
2498
2499static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2500 struct ibmvnic_adapter *adapter)
2501{
2502 struct net_device *netdev = adapter->netdev;
2503 struct device *dev = &adapter->vdev->dev;
2504 long rc;
2505
2506 atomic_dec(&adapter->running_cap_queries);
2507 netdev_dbg(netdev, "Outstanding queries: %d\n",
2508 atomic_read(&adapter->running_cap_queries));
2509 rc = crq->query_capability.rc.code;
2510 if (rc) {
2511 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2512 goto out;
2513 }
2514
2515 switch (be16_to_cpu(crq->query_capability.capability)) {
2516 case MIN_TX_QUEUES:
2517 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002518 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002519 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2520 adapter->min_tx_queues);
2521 break;
2522 case MIN_RX_QUEUES:
2523 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002524 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002525 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2526 adapter->min_rx_queues);
2527 break;
2528 case MIN_RX_ADD_QUEUES:
2529 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002530 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002531 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2532 adapter->min_rx_add_queues);
2533 break;
2534 case MAX_TX_QUEUES:
2535 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002536 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002537 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2538 adapter->max_tx_queues);
2539 break;
2540 case MAX_RX_QUEUES:
2541 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002542 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002543 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2544 adapter->max_rx_queues);
2545 break;
2546 case MAX_RX_ADD_QUEUES:
2547 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002548 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002549 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2550 adapter->max_rx_add_queues);
2551 break;
2552 case MIN_TX_ENTRIES_PER_SUBCRQ:
2553 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002554 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002555 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2556 adapter->min_tx_entries_per_subcrq);
2557 break;
2558 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2559 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002560 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002561 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2562 adapter->min_rx_add_entries_per_subcrq);
2563 break;
2564 case MAX_TX_ENTRIES_PER_SUBCRQ:
2565 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002566 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002567 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2568 adapter->max_tx_entries_per_subcrq);
2569 break;
2570 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2571 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002572 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002573 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2574 adapter->max_rx_add_entries_per_subcrq);
2575 break;
2576 case TCP_IP_OFFLOAD:
2577 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002578 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002579 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2580 adapter->tcp_ip_offload);
2581 break;
2582 case PROMISC_SUPPORTED:
2583 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002584 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002585 netdev_dbg(netdev, "promisc_supported = %lld\n",
2586 adapter->promisc_supported);
2587 break;
2588 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002589 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002590 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2591 break;
2592 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002593 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002594 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2595 break;
2596 case MAX_MULTICAST_FILTERS:
2597 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002598 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002599 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2600 adapter->max_multicast_filters);
2601 break;
2602 case VLAN_HEADER_INSERTION:
2603 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002604 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002605 if (adapter->vlan_header_insertion)
2606 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2607 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2608 adapter->vlan_header_insertion);
2609 break;
2610 case MAX_TX_SG_ENTRIES:
2611 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002612 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002613 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2614 adapter->max_tx_sg_entries);
2615 break;
2616 case RX_SG_SUPPORTED:
2617 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002618 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002619 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2620 adapter->rx_sg_supported);
2621 break;
2622 case OPT_TX_COMP_SUB_QUEUES:
2623 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002624 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002625 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2626 adapter->opt_tx_comp_sub_queues);
2627 break;
2628 case OPT_RX_COMP_QUEUES:
2629 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002630 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002631 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2632 adapter->opt_rx_comp_queues);
2633 break;
2634 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2635 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002636 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002637 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2638 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2639 break;
2640 case OPT_TX_ENTRIES_PER_SUBCRQ:
2641 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002642 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002643 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2644 adapter->opt_tx_entries_per_subcrq);
2645 break;
2646 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2647 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002648 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002649 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2650 adapter->opt_rxba_entries_per_subcrq);
2651 break;
2652 case TX_RX_DESC_REQ:
2653 adapter->tx_rx_desc_req = crq->query_capability.number;
2654 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2655 adapter->tx_rx_desc_req);
2656 break;
2657
2658 default:
2659 netdev_err(netdev, "Got invalid cap rsp %d\n",
2660 crq->query_capability.capability);
2661 }
2662
2663out:
2664 if (atomic_read(&adapter->running_cap_queries) == 0)
2665 complete(&adapter->init_done);
2666 /* We're done querying the capabilities, initialize sub-crqs */
2667}
2668
2669static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2670 struct ibmvnic_adapter *adapter)
2671{
2672 u8 correlator = crq->control_ras_rsp.correlator;
2673 struct device *dev = &adapter->vdev->dev;
2674 bool found = false;
2675 int i;
2676
2677 if (crq->control_ras_rsp.rc.code) {
2678 dev_warn(dev, "Control ras failed rc=%d\n",
2679 crq->control_ras_rsp.rc.code);
2680 return;
2681 }
2682
2683 for (i = 0; i < adapter->ras_comp_num; i++) {
2684 if (adapter->ras_comps[i].correlator == correlator) {
2685 found = true;
2686 break;
2687 }
2688 }
2689
2690 if (!found) {
2691 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2692 return;
2693 }
2694
2695 switch (crq->control_ras_rsp.op) {
2696 case IBMVNIC_TRACE_LEVEL:
2697 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2698 break;
2699 case IBMVNIC_ERROR_LEVEL:
2700 adapter->ras_comps[i].error_check_level =
2701 crq->control_ras.level;
2702 break;
2703 case IBMVNIC_TRACE_PAUSE:
2704 adapter->ras_comp_int[i].paused = 1;
2705 break;
2706 case IBMVNIC_TRACE_RESUME:
2707 adapter->ras_comp_int[i].paused = 0;
2708 break;
2709 case IBMVNIC_TRACE_ON:
2710 adapter->ras_comps[i].trace_on = 1;
2711 break;
2712 case IBMVNIC_TRACE_OFF:
2713 adapter->ras_comps[i].trace_on = 0;
2714 break;
2715 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2716 /* trace_buff_sz is 3 bytes, stuff it into an int */
2717 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2718 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2719 crq->control_ras_rsp.trace_buff_sz[0];
2720 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2721 crq->control_ras_rsp.trace_buff_sz[1];
2722 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2723 crq->control_ras_rsp.trace_buff_sz[2];
2724 break;
2725 default:
2726 dev_err(dev, "invalid op %d on control_ras_rsp",
2727 crq->control_ras_rsp.op);
2728 }
2729}
2730
2731static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
2732{
2733 file->private_data = inode->i_private;
2734 return 0;
2735}
2736
2737static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2738 loff_t *ppos)
2739{
2740 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2741 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2742 struct device *dev = &adapter->vdev->dev;
2743 struct ibmvnic_fw_trace_entry *trace;
2744 int num = ras_comp_int->num;
2745 union ibmvnic_crq crq;
2746 dma_addr_t trace_tok;
2747
2748 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2749 return 0;
2750
2751 trace =
2752 dma_alloc_coherent(dev,
2753 be32_to_cpu(adapter->ras_comps[num].
2754 trace_buff_size), &trace_tok,
2755 GFP_KERNEL);
2756 if (!trace) {
2757 dev_err(dev, "Couldn't alloc trace buffer\n");
2758 return 0;
2759 }
2760
2761 memset(&crq, 0, sizeof(crq));
2762 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2763 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2764 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2765 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2766 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2767 ibmvnic_send_crq(adapter, &crq);
2768
2769 init_completion(&adapter->fw_done);
2770 wait_for_completion(&adapter->fw_done);
2771
2772 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2773 len =
2774 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2775 *ppos;
2776
2777 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2778
2779 dma_free_coherent(dev,
2780 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2781 trace, trace_tok);
2782 *ppos += len;
2783 return len;
2784}
2785
2786static const struct file_operations trace_ops = {
2787 .owner = THIS_MODULE,
2788 .open = ibmvnic_fw_comp_open,
2789 .read = trace_read,
2790};
2791
2792static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2793 loff_t *ppos)
2794{
2795 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2796 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2797 int num = ras_comp_int->num;
2798 char buff[5]; /* 1 or 0 plus \n and \0 */
2799 int size;
2800
2801 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2802
2803 if (*ppos >= size)
2804 return 0;
2805
2806 copy_to_user(user_buf, buff, size);
2807 *ppos += size;
2808 return size;
2809}
2810
2811static ssize_t paused_write(struct file *file, const char __user *user_buf,
2812 size_t len, loff_t *ppos)
2813{
2814 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2815 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2816 int num = ras_comp_int->num;
2817 union ibmvnic_crq crq;
2818 unsigned long val;
2819 char buff[9]; /* decimal max int plus \n and \0 */
2820
2821 copy_from_user(buff, user_buf, sizeof(buff));
2822 val = kstrtoul(buff, 10, NULL);
2823
2824 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2825
2826 memset(&crq, 0, sizeof(crq));
2827 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2828 crq.control_ras.cmd = CONTROL_RAS;
2829 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2830 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2831 ibmvnic_send_crq(adapter, &crq);
2832
2833 return len;
2834}
2835
2836static const struct file_operations paused_ops = {
2837 .owner = THIS_MODULE,
2838 .open = ibmvnic_fw_comp_open,
2839 .read = paused_read,
2840 .write = paused_write,
2841};
2842
2843static ssize_t tracing_read(struct file *file, char __user *user_buf,
2844 size_t len, loff_t *ppos)
2845{
2846 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2847 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2848 int num = ras_comp_int->num;
2849 char buff[5]; /* 1 or 0 plus \n and \0 */
2850 int size;
2851
2852 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2853
2854 if (*ppos >= size)
2855 return 0;
2856
2857 copy_to_user(user_buf, buff, size);
2858 *ppos += size;
2859 return size;
2860}
2861
2862static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2863 size_t len, loff_t *ppos)
2864{
2865 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2866 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2867 int num = ras_comp_int->num;
2868 union ibmvnic_crq crq;
2869 unsigned long val;
2870 char buff[9]; /* decimal max int plus \n and \0 */
2871
2872 copy_from_user(buff, user_buf, sizeof(buff));
2873 val = kstrtoul(buff, 10, NULL);
2874
2875 memset(&crq, 0, sizeof(crq));
2876 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2877 crq.control_ras.cmd = CONTROL_RAS;
2878 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2879 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2880
2881 return len;
2882}
2883
2884static const struct file_operations tracing_ops = {
2885 .owner = THIS_MODULE,
2886 .open = ibmvnic_fw_comp_open,
2887 .read = tracing_read,
2888 .write = tracing_write,
2889};
2890
2891static ssize_t error_level_read(struct file *file, char __user *user_buf,
2892 size_t len, loff_t *ppos)
2893{
2894 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2895 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2896 int num = ras_comp_int->num;
2897 char buff[5]; /* decimal max char plus \n and \0 */
2898 int size;
2899
2900 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2901
2902 if (*ppos >= size)
2903 return 0;
2904
2905 copy_to_user(user_buf, buff, size);
2906 *ppos += size;
2907 return size;
2908}
2909
2910static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2911 size_t len, loff_t *ppos)
2912{
2913 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2914 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2915 int num = ras_comp_int->num;
2916 union ibmvnic_crq crq;
2917 unsigned long val;
2918 char buff[9]; /* decimal max int plus \n and \0 */
2919
2920 copy_from_user(buff, user_buf, sizeof(buff));
2921 val = kstrtoul(buff, 10, NULL);
2922
2923 if (val > 9)
2924 val = 9;
2925
2926 memset(&crq, 0, sizeof(crq));
2927 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2928 crq.control_ras.cmd = CONTROL_RAS;
2929 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2930 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2931 crq.control_ras.level = val;
2932 ibmvnic_send_crq(adapter, &crq);
2933
2934 return len;
2935}
2936
2937static const struct file_operations error_level_ops = {
2938 .owner = THIS_MODULE,
2939 .open = ibmvnic_fw_comp_open,
2940 .read = error_level_read,
2941 .write = error_level_write,
2942};
2943
2944static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2945 size_t len, loff_t *ppos)
2946{
2947 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2948 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2949 int num = ras_comp_int->num;
2950 char buff[5]; /* decimal max char plus \n and \0 */
2951 int size;
2952
2953 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2954 if (*ppos >= size)
2955 return 0;
2956
2957 copy_to_user(user_buf, buff, size);
2958 *ppos += size;
2959 return size;
2960}
2961
2962static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2963 size_t len, loff_t *ppos)
2964{
2965 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2966 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2967 union ibmvnic_crq crq;
2968 unsigned long val;
2969 char buff[9]; /* decimal max int plus \n and \0 */
2970
2971 copy_from_user(buff, user_buf, sizeof(buff));
2972 val = kstrtoul(buff, 10, NULL);
2973 if (val > 9)
2974 val = 9;
2975
2976 memset(&crq, 0, sizeof(crq));
2977 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2978 crq.control_ras.cmd = CONTROL_RAS;
2979 crq.control_ras.correlator =
2980 adapter->ras_comps[ras_comp_int->num].correlator;
2981 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
2982 crq.control_ras.level = val;
2983 ibmvnic_send_crq(adapter, &crq);
2984
2985 return len;
2986}
2987
2988static const struct file_operations trace_level_ops = {
2989 .owner = THIS_MODULE,
2990 .open = ibmvnic_fw_comp_open,
2991 .read = trace_level_read,
2992 .write = trace_level_write,
2993};
2994
2995static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
2996 size_t len, loff_t *ppos)
2997{
2998 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2999 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3000 int num = ras_comp_int->num;
3001 char buff[9]; /* decimal max int plus \n and \0 */
3002 int size;
3003
3004 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3005 if (*ppos >= size)
3006 return 0;
3007
3008 copy_to_user(user_buf, buff, size);
3009 *ppos += size;
3010 return size;
3011}
3012
3013static ssize_t trace_buff_size_write(struct file *file,
3014 const char __user *user_buf, size_t len,
3015 loff_t *ppos)
3016{
3017 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3018 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3019 union ibmvnic_crq crq;
3020 unsigned long val;
3021 char buff[9]; /* decimal max int plus \n and \0 */
3022
3023 copy_from_user(buff, user_buf, sizeof(buff));
3024 val = kstrtoul(buff, 10, NULL);
3025
3026 memset(&crq, 0, sizeof(crq));
3027 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3028 crq.control_ras.cmd = CONTROL_RAS;
3029 crq.control_ras.correlator =
3030 adapter->ras_comps[ras_comp_int->num].correlator;
3031 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3032 /* trace_buff_sz is 3 bytes, stuff an int into it */
3033 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3034 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3035 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3036 ibmvnic_send_crq(adapter, &crq);
3037
3038 return len;
3039}
3040
3041static const struct file_operations trace_size_ops = {
3042 .owner = THIS_MODULE,
3043 .open = ibmvnic_fw_comp_open,
3044 .read = trace_buff_size_read,
3045 .write = trace_buff_size_write,
3046};
3047
3048static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3049 struct ibmvnic_adapter *adapter)
3050{
3051 struct device *dev = &adapter->vdev->dev;
3052 struct dentry *dir_ent;
3053 struct dentry *ent;
3054 int i;
3055
3056 debugfs_remove_recursive(adapter->ras_comps_ent);
3057
3058 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3059 adapter->debugfs_dir);
3060 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3061 dev_info(dev, "debugfs create ras_comps dir failed\n");
3062 return;
3063 }
3064
3065 for (i = 0; i < adapter->ras_comp_num; i++) {
3066 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3067 adapter->ras_comps_ent);
3068 if (!dir_ent || IS_ERR(dir_ent)) {
3069 dev_info(dev, "debugfs create %s dir failed\n",
3070 adapter->ras_comps[i].name);
3071 continue;
3072 }
3073
3074 adapter->ras_comp_int[i].adapter = adapter;
3075 adapter->ras_comp_int[i].num = i;
3076 adapter->ras_comp_int[i].desc_blob.data =
3077 &adapter->ras_comps[i].description;
3078 adapter->ras_comp_int[i].desc_blob.size =
3079 sizeof(adapter->ras_comps[i].description);
3080
3081 /* Don't need to remember the dentry's because the debugfs dir
3082 * gets removed recursively
3083 */
3084 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3085 &adapter->ras_comp_int[i].desc_blob);
3086 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3087 dir_ent, &adapter->ras_comp_int[i],
3088 &trace_size_ops);
3089 ent = debugfs_create_file("trace_level",
3090 S_IRUGO |
3091 (adapter->ras_comps[i].trace_level !=
3092 0xFF ? S_IWUSR : 0),
3093 dir_ent, &adapter->ras_comp_int[i],
3094 &trace_level_ops);
3095 ent = debugfs_create_file("error_level",
3096 S_IRUGO |
3097 (adapter->
3098 ras_comps[i].error_check_level !=
3099 0xFF ? S_IWUSR : 0),
3100 dir_ent, &adapter->ras_comp_int[i],
3101 &trace_level_ops);
3102 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3103 dir_ent, &adapter->ras_comp_int[i],
3104 &tracing_ops);
3105 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3106 dir_ent, &adapter->ras_comp_int[i],
3107 &paused_ops);
3108 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3109 &adapter->ras_comp_int[i],
3110 &trace_ops);
3111 }
3112}
3113
3114static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3115 struct ibmvnic_adapter *adapter)
3116{
3117 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3118 struct device *dev = &adapter->vdev->dev;
3119 union ibmvnic_crq newcrq;
3120
3121 adapter->ras_comps = dma_alloc_coherent(dev, len,
3122 &adapter->ras_comps_tok,
3123 GFP_KERNEL);
3124 if (!adapter->ras_comps) {
3125 if (!firmware_has_feature(FW_FEATURE_CMO))
3126 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3127 return;
3128 }
3129
3130 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3131 sizeof(struct ibmvnic_fw_comp_internal),
3132 GFP_KERNEL);
3133 if (!adapter->ras_comp_int)
3134 dma_free_coherent(dev, len, adapter->ras_comps,
3135 adapter->ras_comps_tok);
3136
3137 memset(&newcrq, 0, sizeof(newcrq));
3138 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3139 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3140 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3141 newcrq.request_ras_comps.len = cpu_to_be32(len);
3142 ibmvnic_send_crq(adapter, &newcrq);
3143}
3144
3145static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3146{
Wei Yongjun96183182016-06-27 20:48:53 +08003147 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003148 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003149 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003150 unsigned long flags;
3151 unsigned long flags2;
3152
3153 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003154 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003155 switch (inflight_cmd->crq.generic.cmd) {
3156 case LOGIN:
3157 dma_unmap_single(dev, adapter->login_buf_token,
3158 adapter->login_buf_sz,
3159 DMA_BIDIRECTIONAL);
3160 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3161 adapter->login_rsp_buf_sz,
3162 DMA_BIDIRECTIONAL);
3163 kfree(adapter->login_rsp_buf);
3164 kfree(adapter->login_buf);
3165 break;
3166 case REQUEST_DUMP:
3167 complete(&adapter->fw_done);
3168 break;
3169 case REQUEST_ERROR_INFO:
3170 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08003171 list_for_each_entry_safe(error_buff, tmp2,
3172 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003173 dma_unmap_single(dev, error_buff->dma,
3174 error_buff->len,
3175 DMA_FROM_DEVICE);
3176 kfree(error_buff->buff);
3177 list_del(&error_buff->list);
3178 kfree(error_buff);
3179 }
3180 spin_unlock_irqrestore(&adapter->error_list_lock,
3181 flags2);
3182 break;
3183 }
3184 list_del(&inflight_cmd->list);
3185 kfree(inflight_cmd);
3186 }
3187 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3188}
3189
3190static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3191 struct ibmvnic_adapter *adapter)
3192{
3193 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3194 struct net_device *netdev = adapter->netdev;
3195 struct device *dev = &adapter->vdev->dev;
3196 long rc;
3197
3198 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3199 ((unsigned long int *)crq)[0],
3200 ((unsigned long int *)crq)[1]);
3201 switch (gen_crq->first) {
3202 case IBMVNIC_CRQ_INIT_RSP:
3203 switch (gen_crq->cmd) {
3204 case IBMVNIC_CRQ_INIT:
3205 dev_info(dev, "Partner initialized\n");
3206 /* Send back a response */
3207 rc = ibmvnic_send_crq_init_complete(adapter);
3208 if (rc == 0)
3209 send_version_xchg(adapter);
3210 else
3211 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3212 break;
3213 case IBMVNIC_CRQ_INIT_COMPLETE:
3214 dev_info(dev, "Partner initialization complete\n");
3215 send_version_xchg(adapter);
3216 break;
3217 default:
3218 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3219 }
3220 return;
3221 case IBMVNIC_CRQ_XPORT_EVENT:
3222 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3223 dev_info(dev, "Re-enabling adapter\n");
3224 adapter->migrated = true;
3225 ibmvnic_free_inflight(adapter);
3226 release_sub_crqs(adapter);
3227 rc = ibmvnic_reenable_crq_queue(adapter);
3228 if (rc)
3229 dev_err(dev, "Error after enable rc=%ld\n", rc);
3230 adapter->migrated = false;
3231 rc = ibmvnic_send_crq_init(adapter);
3232 if (rc)
3233 dev_err(dev, "Error sending init rc=%ld\n", rc);
3234 } else {
3235 /* The adapter lost the connection */
3236 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3237 gen_crq->cmd);
3238 ibmvnic_free_inflight(adapter);
3239 release_sub_crqs(adapter);
3240 }
3241 return;
3242 case IBMVNIC_CRQ_CMD_RSP:
3243 break;
3244 default:
3245 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3246 gen_crq->first);
3247 return;
3248 }
3249
3250 switch (gen_crq->cmd) {
3251 case VERSION_EXCHANGE_RSP:
3252 rc = crq->version_exchange_rsp.rc.code;
3253 if (rc) {
3254 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3255 break;
3256 }
3257 dev_info(dev, "Partner protocol version is %d\n",
3258 crq->version_exchange_rsp.version);
3259 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3260 ibmvnic_version)
3261 ibmvnic_version =
3262 be16_to_cpu(crq->version_exchange_rsp.version);
3263 send_cap_queries(adapter);
3264 break;
3265 case QUERY_CAPABILITY_RSP:
3266 handle_query_cap_rsp(crq, adapter);
3267 break;
3268 case QUERY_MAP_RSP:
3269 handle_query_map_rsp(crq, adapter);
3270 break;
3271 case REQUEST_MAP_RSP:
3272 handle_request_map_rsp(crq, adapter);
3273 break;
3274 case REQUEST_UNMAP_RSP:
3275 handle_request_unmap_rsp(crq, adapter);
3276 break;
3277 case REQUEST_CAPABILITY_RSP:
3278 handle_request_cap_rsp(crq, adapter);
3279 break;
3280 case LOGIN_RSP:
3281 netdev_dbg(netdev, "Got Login Response\n");
3282 handle_login_rsp(crq, adapter);
3283 break;
3284 case LOGICAL_LINK_STATE_RSP:
3285 netdev_dbg(netdev, "Got Logical Link State Response\n");
3286 adapter->logical_link_state =
3287 crq->logical_link_state_rsp.link_state;
3288 break;
3289 case LINK_STATE_INDICATION:
3290 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3291 adapter->phys_link_state =
3292 crq->link_state_indication.phys_link_state;
3293 adapter->logical_link_state =
3294 crq->link_state_indication.logical_link_state;
3295 break;
3296 case CHANGE_MAC_ADDR_RSP:
3297 netdev_dbg(netdev, "Got MAC address change Response\n");
3298 handle_change_mac_rsp(crq, adapter);
3299 break;
3300 case ERROR_INDICATION:
3301 netdev_dbg(netdev, "Got Error Indication\n");
3302 handle_error_indication(crq, adapter);
3303 break;
3304 case REQUEST_ERROR_RSP:
3305 netdev_dbg(netdev, "Got Error Detail Response\n");
3306 handle_error_info_rsp(crq, adapter);
3307 break;
3308 case REQUEST_STATISTICS_RSP:
3309 netdev_dbg(netdev, "Got Statistics Response\n");
3310 complete(&adapter->stats_done);
3311 break;
3312 case REQUEST_DUMP_SIZE_RSP:
3313 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3314 handle_dump_size_rsp(crq, adapter);
3315 break;
3316 case REQUEST_DUMP_RSP:
3317 netdev_dbg(netdev, "Got Request Dump Response\n");
3318 complete(&adapter->fw_done);
3319 break;
3320 case QUERY_IP_OFFLOAD_RSP:
3321 netdev_dbg(netdev, "Got Query IP offload Response\n");
3322 handle_query_ip_offload_rsp(adapter);
3323 break;
3324 case MULTICAST_CTRL_RSP:
3325 netdev_dbg(netdev, "Got multicast control Response\n");
3326 break;
3327 case CONTROL_IP_OFFLOAD_RSP:
3328 netdev_dbg(netdev, "Got Control IP offload Response\n");
3329 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3330 sizeof(adapter->ip_offload_ctrl),
3331 DMA_TO_DEVICE);
3332 /* We're done with the queries, perform the login */
3333 send_login(adapter);
3334 break;
3335 case REQUEST_RAS_COMP_NUM_RSP:
3336 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3337 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3338 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3339 break;
3340 }
3341 adapter->ras_comp_num =
3342 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3343 handle_request_ras_comp_num_rsp(crq, adapter);
3344 break;
3345 case REQUEST_RAS_COMPS_RSP:
3346 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3347 handle_request_ras_comps_rsp(crq, adapter);
3348 break;
3349 case CONTROL_RAS_RSP:
3350 netdev_dbg(netdev, "Got Control RAS Response\n");
3351 handle_control_ras_rsp(crq, adapter);
3352 break;
3353 case COLLECT_FW_TRACE_RSP:
3354 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3355 complete(&adapter->fw_done);
3356 break;
3357 default:
3358 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3359 gen_crq->cmd);
3360 }
3361}
3362
3363static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3364{
3365 struct ibmvnic_adapter *adapter = instance;
3366 struct ibmvnic_crq_queue *queue = &adapter->crq;
3367 struct vio_dev *vdev = adapter->vdev;
3368 union ibmvnic_crq *crq;
3369 unsigned long flags;
3370 bool done = false;
3371
3372 spin_lock_irqsave(&queue->lock, flags);
3373 vio_disable_interrupts(vdev);
3374 while (!done) {
3375 /* Pull all the valid messages off the CRQ */
3376 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3377 ibmvnic_handle_crq(crq, adapter);
3378 crq->generic.first = 0;
3379 }
3380 vio_enable_interrupts(vdev);
3381 crq = ibmvnic_next_crq(adapter);
3382 if (crq) {
3383 vio_disable_interrupts(vdev);
3384 ibmvnic_handle_crq(crq, adapter);
3385 crq->generic.first = 0;
3386 } else {
3387 done = true;
3388 }
3389 }
3390 spin_unlock_irqrestore(&queue->lock, flags);
3391 return IRQ_HANDLED;
3392}
3393
3394static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3395{
3396 struct vio_dev *vdev = adapter->vdev;
3397 int rc;
3398
3399 do {
3400 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3401 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3402
3403 if (rc)
3404 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3405
3406 return rc;
3407}
3408
3409static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3410{
3411 struct ibmvnic_crq_queue *crq = &adapter->crq;
3412 struct device *dev = &adapter->vdev->dev;
3413 struct vio_dev *vdev = adapter->vdev;
3414 int rc;
3415
3416 /* Close the CRQ */
3417 do {
3418 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3419 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3420
3421 /* Clean out the queue */
3422 memset(crq->msgs, 0, PAGE_SIZE);
3423 crq->cur = 0;
3424
3425 /* And re-open it again */
3426 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3427 crq->msg_token, PAGE_SIZE);
3428
3429 if (rc == H_CLOSED)
3430 /* Adapter is good, but other end is not ready */
3431 dev_warn(dev, "Partner adapter not ready\n");
3432 else if (rc != 0)
3433 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3434
3435 return rc;
3436}
3437
3438static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3439{
3440 struct ibmvnic_crq_queue *crq = &adapter->crq;
3441 struct vio_dev *vdev = adapter->vdev;
3442 long rc;
3443
3444 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3445 free_irq(vdev->irq, adapter);
3446 do {
3447 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3448 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3449
3450 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3451 DMA_BIDIRECTIONAL);
3452 free_page((unsigned long)crq->msgs);
3453}
3454
3455static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3456{
3457 struct ibmvnic_crq_queue *crq = &adapter->crq;
3458 struct device *dev = &adapter->vdev->dev;
3459 struct vio_dev *vdev = adapter->vdev;
3460 int rc, retrc = -ENOMEM;
3461
3462 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3463 /* Should we allocate more than one page? */
3464
3465 if (!crq->msgs)
3466 return -ENOMEM;
3467
3468 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3469 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3470 DMA_BIDIRECTIONAL);
3471 if (dma_mapping_error(dev, crq->msg_token))
3472 goto map_failed;
3473
3474 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3475 crq->msg_token, PAGE_SIZE);
3476
3477 if (rc == H_RESOURCE)
3478 /* maybe kexecing and resource is busy. try a reset */
3479 rc = ibmvnic_reset_crq(adapter);
3480 retrc = rc;
3481
3482 if (rc == H_CLOSED) {
3483 dev_warn(dev, "Partner adapter not ready\n");
3484 } else if (rc) {
3485 dev_warn(dev, "Error %d opening adapter\n", rc);
3486 goto reg_crq_failed;
3487 }
3488
3489 retrc = 0;
3490
3491 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3492 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3493 adapter);
3494 if (rc) {
3495 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3496 vdev->irq, rc);
3497 goto req_irq_failed;
3498 }
3499
3500 rc = vio_enable_interrupts(vdev);
3501 if (rc) {
3502 dev_err(dev, "Error %d enabling interrupts\n", rc);
3503 goto req_irq_failed;
3504 }
3505
3506 crq->cur = 0;
3507 spin_lock_init(&crq->lock);
3508
3509 return retrc;
3510
3511req_irq_failed:
3512 do {
3513 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3514 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3515reg_crq_failed:
3516 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3517map_failed:
3518 free_page((unsigned long)crq->msgs);
3519 return retrc;
3520}
3521
3522/* debugfs for dump */
3523static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3524{
3525 struct net_device *netdev = seq->private;
3526 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3527 struct device *dev = &adapter->vdev->dev;
3528 union ibmvnic_crq crq;
3529
3530 memset(&crq, 0, sizeof(crq));
3531 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3532 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3533 ibmvnic_send_crq(adapter, &crq);
3534
3535 init_completion(&adapter->fw_done);
3536 wait_for_completion(&adapter->fw_done);
3537
3538 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3539
3540 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3541 DMA_BIDIRECTIONAL);
3542
3543 kfree(adapter->dump_data);
3544
3545 return 0;
3546}
3547
3548static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3549{
3550 return single_open(file, ibmvnic_dump_show, inode->i_private);
3551}
3552
3553static const struct file_operations ibmvnic_dump_ops = {
3554 .owner = THIS_MODULE,
3555 .open = ibmvnic_dump_open,
3556 .read = seq_read,
3557 .llseek = seq_lseek,
3558 .release = single_release,
3559};
3560
3561static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3562{
3563 struct ibmvnic_adapter *adapter;
3564 struct net_device *netdev;
3565 unsigned char *mac_addr_p;
3566 struct dentry *ent;
3567 char buf[16]; /* debugfs name buf */
3568 int rc;
3569
3570 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3571 dev->unit_address);
3572
3573 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3574 VETH_MAC_ADDR, NULL);
3575 if (!mac_addr_p) {
3576 dev_err(&dev->dev,
3577 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3578 __FILE__, __LINE__);
3579 return 0;
3580 }
3581
3582 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3583 IBMVNIC_MAX_TX_QUEUES);
3584 if (!netdev)
3585 return -ENOMEM;
3586
3587 adapter = netdev_priv(netdev);
3588 dev_set_drvdata(&dev->dev, netdev);
3589 adapter->vdev = dev;
3590 adapter->netdev = netdev;
3591
3592 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3593 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3594 netdev->irq = dev->irq;
3595 netdev->netdev_ops = &ibmvnic_netdev_ops;
3596 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3597 SET_NETDEV_DEV(netdev, &dev->dev);
3598
3599 spin_lock_init(&adapter->stats_lock);
3600
3601 rc = ibmvnic_init_crq_queue(adapter);
3602 if (rc) {
3603 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3604 goto free_netdev;
3605 }
3606
3607 INIT_LIST_HEAD(&adapter->errors);
3608 INIT_LIST_HEAD(&adapter->inflight);
3609 spin_lock_init(&adapter->error_list_lock);
3610 spin_lock_init(&adapter->inflight_lock);
3611
3612 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3613 sizeof(struct ibmvnic_statistics),
3614 DMA_FROM_DEVICE);
3615 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3616 if (!firmware_has_feature(FW_FEATURE_CMO))
3617 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3618 goto free_crq;
3619 }
3620
3621 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3622 ent = debugfs_create_dir(buf, NULL);
3623 if (!ent || IS_ERR(ent)) {
3624 dev_info(&dev->dev, "debugfs create directory failed\n");
3625 adapter->debugfs_dir = NULL;
3626 } else {
3627 adapter->debugfs_dir = ent;
3628 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3629 netdev, &ibmvnic_dump_ops);
3630 if (!ent || IS_ERR(ent)) {
3631 dev_info(&dev->dev,
3632 "debugfs create dump file failed\n");
3633 adapter->debugfs_dump = NULL;
3634 } else {
3635 adapter->debugfs_dump = ent;
3636 }
3637 }
3638 ibmvnic_send_crq_init(adapter);
3639
3640 init_completion(&adapter->init_done);
3641 wait_for_completion(&adapter->init_done);
3642
John Allen498cd8e2016-04-06 11:49:55 -05003643 do {
3644 adapter->renegotiate = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003645
John Allen498cd8e2016-04-06 11:49:55 -05003646 init_sub_crqs(adapter, 0);
3647 reinit_completion(&adapter->init_done);
3648 wait_for_completion(&adapter->init_done);
3649
3650 if (adapter->renegotiate) {
3651 release_sub_crqs(adapter);
3652 send_cap_queries(adapter);
3653
3654 reinit_completion(&adapter->init_done);
3655 wait_for_completion(&adapter->init_done);
3656 }
3657 } while (adapter->renegotiate);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003658
3659 /* if init_sub_crqs is partially successful, retry */
3660 while (!adapter->tx_scrq || !adapter->rx_scrq) {
3661 init_sub_crqs(adapter, 1);
3662
3663 reinit_completion(&adapter->init_done);
3664 wait_for_completion(&adapter->init_done);
3665 }
3666
3667 netdev->real_num_tx_queues = adapter->req_tx_queues;
3668
3669 rc = register_netdev(netdev);
3670 if (rc) {
3671 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3672 goto free_debugfs;
3673 }
3674 dev_info(&dev->dev, "ibmvnic registered\n");
3675
3676 return 0;
3677
3678free_debugfs:
3679 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3680 debugfs_remove_recursive(adapter->debugfs_dir);
3681free_crq:
3682 ibmvnic_release_crq_queue(adapter);
3683free_netdev:
3684 free_netdev(netdev);
3685 return rc;
3686}
3687
3688static int ibmvnic_remove(struct vio_dev *dev)
3689{
3690 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3691 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3692
3693 unregister_netdev(netdev);
3694
3695 release_sub_crqs(adapter);
3696
3697 ibmvnic_release_crq_queue(adapter);
3698
3699 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3700 debugfs_remove_recursive(adapter->debugfs_dir);
3701
3702 if (adapter->ras_comps)
3703 dma_free_coherent(&dev->dev,
3704 adapter->ras_comp_num *
3705 sizeof(struct ibmvnic_fw_component),
3706 adapter->ras_comps, adapter->ras_comps_tok);
3707
3708 kfree(adapter->ras_comp_int);
3709
3710 free_netdev(netdev);
3711 dev_set_drvdata(&dev->dev, NULL);
3712
3713 return 0;
3714}
3715
3716static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3717{
3718 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3719 struct ibmvnic_adapter *adapter;
3720 struct iommu_table *tbl;
3721 unsigned long ret = 0;
3722 int i;
3723
3724 tbl = get_iommu_table_base(&vdev->dev);
3725
3726 /* netdev inits at probe time along with the structures we need below*/
3727 if (!netdev)
3728 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3729
3730 adapter = netdev_priv(netdev);
3731
3732 ret += PAGE_SIZE; /* the crq message queue */
3733 ret += adapter->bounce_buffer_size;
3734 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3735
3736 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3737 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3738
3739 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3740 i++)
3741 ret += adapter->rx_pool[i].size *
3742 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3743
3744 return ret;
3745}
3746
3747static int ibmvnic_resume(struct device *dev)
3748{
3749 struct net_device *netdev = dev_get_drvdata(dev);
3750 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3751 int i;
3752
3753 /* kick the interrupt handlers just in case we lost an interrupt */
3754 for (i = 0; i < adapter->req_rx_queues; i++)
3755 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3756 adapter->rx_scrq[i]);
3757
3758 return 0;
3759}
3760
3761static struct vio_device_id ibmvnic_device_table[] = {
3762 {"network", "IBM,vnic"},
3763 {"", "" }
3764};
3765MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3766
3767static const struct dev_pm_ops ibmvnic_pm_ops = {
3768 .resume = ibmvnic_resume
3769};
3770
3771static struct vio_driver ibmvnic_driver = {
3772 .id_table = ibmvnic_device_table,
3773 .probe = ibmvnic_probe,
3774 .remove = ibmvnic_remove,
3775 .get_desired_dma = ibmvnic_get_desired_dma,
3776 .name = ibmvnic_driver_name,
3777 .pm = &ibmvnic_pm_ops,
3778};
3779
3780/* module functions */
3781static int __init ibmvnic_module_init(void)
3782{
3783 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3784 IBMVNIC_DRIVER_VERSION);
3785
3786 return vio_register_driver(&ibmvnic_driver);
3787}
3788
3789static void __exit ibmvnic_module_exit(void)
3790{
3791 vio_unregister_driver(&ibmvnic_driver);
3792}
3793
3794module_init(ibmvnic_module_init);
3795module_exit(ibmvnic_module_exit);