blob: 42274bf4bc095e2388ab1727dbe8bb6cd01fa5d8 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050077#include <linux/workqueue.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060078
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
Thomas Falconea22d512016-07-06 15:35:17 -050092static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -060093static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050099static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600100static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109static int ibmvnic_poll(struct napi_struct *napi, int data);
110static void send_map_query(struct ibmvnic_adapter *adapter);
111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112static void send_request_unmap(struct ibmvnic_adapter *, u8);
John Allenbd0b6722017-03-17 17:13:40 -0500113static void send_login(struct ibmvnic_adapter *adapter);
114static void send_cap_queries(struct ibmvnic_adapter *adapter);
115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600116
117struct ibmvnic_stat {
118 char name[ETH_GSTRING_LEN];
119 int offset;
120};
121
122#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
123 offsetof(struct ibmvnic_statistics, stat))
124#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
125
126static const struct ibmvnic_stat ibmvnic_stats[] = {
127 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
128 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
129 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
130 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
131 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
132 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
133 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
134 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
135 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
136 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
137 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
138 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
139 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
140 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
141 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
142 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
143 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
144 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
145 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
146 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
147 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
148 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
149};
150
151static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
152 unsigned long length, unsigned long *number,
153 unsigned long *irq)
154{
155 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
156 long rc;
157
158 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
159 *number = retbuf[0];
160 *irq = retbuf[1];
161
162 return rc;
163}
164
165/* net_device_ops functions */
166
167static void init_rx_pool(struct ibmvnic_adapter *adapter,
168 struct ibmvnic_rx_pool *rx_pool, int num, int index,
169 int buff_size, int active)
170{
171 netdev_dbg(adapter->netdev,
172 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
173 index, num, buff_size);
174 rx_pool->size = num;
175 rx_pool->index = index;
176 rx_pool->buff_size = buff_size;
177 rx_pool->active = active;
178}
179
180static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
181 struct ibmvnic_long_term_buff *ltb, int size)
182{
183 struct device *dev = &adapter->vdev->dev;
184
185 ltb->size = size;
186 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
187 GFP_KERNEL);
188
189 if (!ltb->buff) {
190 dev_err(dev, "Couldn't alloc long term buffer\n");
191 return -ENOMEM;
192 }
193 ltb->map_id = adapter->map_id;
194 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500195
196 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600197 send_request_map(adapter, ltb->addr,
198 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600199 wait_for_completion(&adapter->fw_done);
200 return 0;
201}
202
203static void free_long_term_buff(struct ibmvnic_adapter *adapter,
204 struct ibmvnic_long_term_buff *ltb)
205{
206 struct device *dev = &adapter->vdev->dev;
207
208 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcondfad09a2016-08-18 11:37:51 -0500209 if (!adapter->failover)
210 send_request_unmap(adapter, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600211}
212
213static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
214 struct ibmvnic_rx_pool *pool)
215{
216 struct device *dev = &adapter->vdev->dev;
217 int i;
218
219 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
220 if (!pool->free_map)
221 return -ENOMEM;
222
223 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
224 GFP_KERNEL);
225
226 if (!pool->rx_buff) {
227 dev_err(dev, "Couldn't alloc rx buffers\n");
228 kfree(pool->free_map);
229 return -ENOMEM;
230 }
231
232 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
233 pool->size * pool->buff_size)) {
234 kfree(pool->free_map);
235 kfree(pool->rx_buff);
236 return -ENOMEM;
237 }
238
239 for (i = 0; i < pool->size; ++i)
240 pool->free_map[i] = i;
241
242 atomic_set(&pool->available, 0);
243 pool->next_alloc = 0;
244 pool->next_free = 0;
245
246 return 0;
247}
248
249static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
250 struct ibmvnic_rx_pool *pool)
251{
252 int count = pool->size - atomic_read(&pool->available);
253 struct device *dev = &adapter->vdev->dev;
254 int buffers_added = 0;
255 unsigned long lpar_rc;
256 union sub_crq sub_crq;
257 struct sk_buff *skb;
258 unsigned int offset;
259 dma_addr_t dma_addr;
260 unsigned char *dst;
261 u64 *handle_array;
262 int shift = 0;
263 int index;
264 int i;
265
266 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
267 be32_to_cpu(adapter->login_rsp_buf->
268 off_rxadd_subcrqs));
269
270 for (i = 0; i < count; ++i) {
271 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
272 if (!skb) {
273 dev_err(dev, "Couldn't replenish rx buff\n");
274 adapter->replenish_no_mem++;
275 break;
276 }
277
278 index = pool->free_map[pool->next_free];
279
280 if (pool->rx_buff[index].skb)
281 dev_err(dev, "Inconsistent free_map!\n");
282
283 /* Copy the skb to the long term mapped DMA buffer */
284 offset = index * pool->buff_size;
285 dst = pool->long_term_buff.buff + offset;
286 memset(dst, 0, pool->buff_size);
287 dma_addr = pool->long_term_buff.addr + offset;
288 pool->rx_buff[index].data = dst;
289
290 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
291 pool->rx_buff[index].dma = dma_addr;
292 pool->rx_buff[index].skb = skb;
293 pool->rx_buff[index].pool_index = pool->index;
294 pool->rx_buff[index].size = pool->buff_size;
295
296 memset(&sub_crq, 0, sizeof(sub_crq));
297 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
298 sub_crq.rx_add.correlator =
299 cpu_to_be64((u64)&pool->rx_buff[index]);
300 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
301 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
302
303 /* The length field of the sCRQ is defined to be 24 bits so the
304 * buffer size needs to be left shifted by a byte before it is
305 * converted to big endian to prevent the last byte from being
306 * truncated.
307 */
308#ifdef __LITTLE_ENDIAN__
309 shift = 8;
310#endif
311 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
312
313 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
314 &sub_crq);
315 if (lpar_rc != H_SUCCESS)
316 goto failure;
317
318 buffers_added++;
319 adapter->replenish_add_buff_success++;
320 pool->next_free = (pool->next_free + 1) % pool->size;
321 }
322 atomic_add(buffers_added, &pool->available);
323 return;
324
325failure:
326 dev_info(dev, "replenish pools failure\n");
327 pool->free_map[pool->next_free] = index;
328 pool->rx_buff[index].skb = NULL;
329 if (!dma_mapping_error(dev, dma_addr))
330 dma_unmap_single(dev, dma_addr, pool->buff_size,
331 DMA_FROM_DEVICE);
332
333 dev_kfree_skb_any(skb);
334 adapter->replenish_add_buff_failure++;
335 atomic_add(buffers_added, &pool->available);
336}
337
338static void replenish_pools(struct ibmvnic_adapter *adapter)
339{
340 int i;
341
342 if (adapter->migrated)
343 return;
344
345 adapter->replenish_task_cycles++;
346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
347 i++) {
348 if (adapter->rx_pool[i].active)
349 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 }
351}
352
353static void free_rx_pool(struct ibmvnic_adapter *adapter,
354 struct ibmvnic_rx_pool *pool)
355{
356 int i;
357
358 kfree(pool->free_map);
359 pool->free_map = NULL;
360
361 if (!pool->rx_buff)
362 return;
363
364 for (i = 0; i < pool->size; i++) {
365 if (pool->rx_buff[i].skb) {
366 dev_kfree_skb_any(pool->rx_buff[i].skb);
367 pool->rx_buff[i].skb = NULL;
368 }
369 }
370 kfree(pool->rx_buff);
371 pool->rx_buff = NULL;
372}
373
John Allena57a5d22017-03-17 17:13:41 -0500374static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600375{
376 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500377 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600378 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600379
John Allenbd0b6722017-03-17 17:13:40 -0500380 do {
381 if (adapter->renegotiate) {
382 adapter->renegotiate = false;
383 release_sub_crqs_no_irqs(adapter);
384
385 reinit_completion(&adapter->init_done);
386 send_cap_queries(adapter);
387 if (!wait_for_completion_timeout(&adapter->init_done,
388 timeout)) {
389 dev_err(dev, "Capabilities query timeout\n");
390 return -1;
391 }
392 }
393
394 reinit_completion(&adapter->init_done);
395 send_login(adapter);
396 if (!wait_for_completion_timeout(&adapter->init_done,
397 timeout)) {
398 dev_err(dev, "Login timeout\n");
399 return -1;
400 }
401 } while (adapter->renegotiate);
402
John Allena57a5d22017-03-17 17:13:41 -0500403 return 0;
404}
405
406static int ibmvnic_open(struct net_device *netdev)
407{
408 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
409 struct device *dev = &adapter->vdev->dev;
410 struct ibmvnic_tx_pool *tx_pool;
411 union ibmvnic_crq crq;
412 int rxadd_subcrqs;
413 u64 *size_array;
414 int tx_subcrqs;
415 int rc = 0;
416 int i, j;
417
418 rc = ibmvnic_login(netdev);
419 if (rc)
420 return rc;
421
John Allenbd0b6722017-03-17 17:13:40 -0500422 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
423 if (rc) {
424 dev_err(dev, "failed to set the number of tx queues\n");
425 return -1;
426 }
427
428 rc = init_sub_crq_irqs(adapter);
429 if (rc) {
430 dev_err(dev, "failed to initialize sub crq irqs\n");
431 return -1;
432 }
433
Thomas Falcon032c5e82015-12-21 11:26:06 -0600434 rxadd_subcrqs =
435 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
436 tx_subcrqs =
437 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
438 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
439 be32_to_cpu(adapter->login_rsp_buf->
440 off_rxadd_buff_size));
441 adapter->map_id = 1;
442 adapter->napi = kcalloc(adapter->req_rx_queues,
443 sizeof(struct napi_struct), GFP_KERNEL);
444 if (!adapter->napi)
445 goto alloc_napi_failed;
446 for (i = 0; i < adapter->req_rx_queues; i++) {
447 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
448 NAPI_POLL_WEIGHT);
449 napi_enable(&adapter->napi[i]);
450 }
451 adapter->rx_pool =
452 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
453
454 if (!adapter->rx_pool)
455 goto rx_pool_arr_alloc_failed;
456 send_map_query(adapter);
457 for (i = 0; i < rxadd_subcrqs; i++) {
458 init_rx_pool(adapter, &adapter->rx_pool[i],
Thomas Falcon068d9f92017-03-05 12:18:42 -0600459 adapter->req_rx_add_entries_per_subcrq, i,
Thomas Falcon032c5e82015-12-21 11:26:06 -0600460 be64_to_cpu(size_array[i]), 1);
461 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
462 dev_err(dev, "Couldn't alloc rx pool\n");
463 goto rx_pool_alloc_failed;
464 }
465 }
466 adapter->tx_pool =
467 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
468
469 if (!adapter->tx_pool)
470 goto tx_pool_arr_alloc_failed;
471 for (i = 0; i < tx_subcrqs; i++) {
472 tx_pool = &adapter->tx_pool[i];
473 tx_pool->tx_buff =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600474 kcalloc(adapter->req_tx_entries_per_subcrq,
Thomas Falcon032c5e82015-12-21 11:26:06 -0600475 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
476 if (!tx_pool->tx_buff)
477 goto tx_pool_alloc_failed;
478
479 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
Thomas Falcon068d9f92017-03-05 12:18:42 -0600480 adapter->req_tx_entries_per_subcrq *
Thomas Falcon032c5e82015-12-21 11:26:06 -0600481 adapter->req_mtu))
482 goto tx_ltb_alloc_failed;
483
484 tx_pool->free_map =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600485 kcalloc(adapter->req_tx_entries_per_subcrq,
Thomas Falcon032c5e82015-12-21 11:26:06 -0600486 sizeof(int), GFP_KERNEL);
487 if (!tx_pool->free_map)
488 goto tx_fm_alloc_failed;
489
Thomas Falcon068d9f92017-03-05 12:18:42 -0600490 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600491 tx_pool->free_map[j] = j;
492
493 tx_pool->consumer_index = 0;
494 tx_pool->producer_index = 0;
495 }
496 adapter->bounce_buffer_size =
497 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
498 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
499 GFP_KERNEL);
500 if (!adapter->bounce_buffer)
501 goto bounce_alloc_failed;
502
503 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
504 adapter->bounce_buffer_size,
505 DMA_TO_DEVICE);
506 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
507 dev_err(dev, "Couldn't map tx bounce buffer\n");
508 goto bounce_map_failed;
509 }
510 replenish_pools(adapter);
511
512 /* We're ready to receive frames, enable the sub-crq interrupts and
513 * set the logical link state to up
514 */
515 for (i = 0; i < adapter->req_rx_queues; i++)
516 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
517
518 for (i = 0; i < adapter->req_tx_queues; i++)
519 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
520
521 memset(&crq, 0, sizeof(crq));
522 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
523 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
524 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
525 ibmvnic_send_crq(adapter, &crq);
526
Thomas Falconb8efb892016-07-06 15:35:15 -0500527 netif_tx_start_all_queues(netdev);
528
Thomas Falcon032c5e82015-12-21 11:26:06 -0600529 return 0;
530
531bounce_map_failed:
532 kfree(adapter->bounce_buffer);
533bounce_alloc_failed:
534 i = tx_subcrqs - 1;
535 kfree(adapter->tx_pool[i].free_map);
536tx_fm_alloc_failed:
537 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
538tx_ltb_alloc_failed:
539 kfree(adapter->tx_pool[i].tx_buff);
540tx_pool_alloc_failed:
541 for (j = 0; j < i; j++) {
542 kfree(adapter->tx_pool[j].tx_buff);
543 free_long_term_buff(adapter,
544 &adapter->tx_pool[j].long_term_buff);
545 kfree(adapter->tx_pool[j].free_map);
546 }
547 kfree(adapter->tx_pool);
548 adapter->tx_pool = NULL;
549tx_pool_arr_alloc_failed:
550 i = rxadd_subcrqs;
551rx_pool_alloc_failed:
552 for (j = 0; j < i; j++) {
553 free_rx_pool(adapter, &adapter->rx_pool[j]);
554 free_long_term_buff(adapter,
555 &adapter->rx_pool[j].long_term_buff);
556 }
557 kfree(adapter->rx_pool);
558 adapter->rx_pool = NULL;
559rx_pool_arr_alloc_failed:
560 for (i = 0; i < adapter->req_rx_queues; i++)
Nathan Fontenote722af62017-02-10 13:29:06 -0500561 napi_disable(&adapter->napi[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600562alloc_napi_failed:
John Allenbd0b6722017-03-17 17:13:40 -0500563 release_sub_crqs(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600564 return -ENOMEM;
565}
566
567static int ibmvnic_close(struct net_device *netdev)
568{
569 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
570 struct device *dev = &adapter->vdev->dev;
571 union ibmvnic_crq crq;
572 int i;
573
574 adapter->closing = true;
575
576 for (i = 0; i < adapter->req_rx_queues; i++)
577 napi_disable(&adapter->napi[i]);
578
Thomas Falcondfad09a2016-08-18 11:37:51 -0500579 if (!adapter->failover)
580 netif_tx_stop_all_queues(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600581
582 if (adapter->bounce_buffer) {
583 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
584 dma_unmap_single(&adapter->vdev->dev,
585 adapter->bounce_buffer_dma,
586 adapter->bounce_buffer_size,
587 DMA_BIDIRECTIONAL);
588 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
589 }
590 kfree(adapter->bounce_buffer);
591 adapter->bounce_buffer = NULL;
592 }
593
594 memset(&crq, 0, sizeof(crq));
595 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
596 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
597 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
598 ibmvnic_send_crq(adapter, &crq);
599
600 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
601 i++) {
602 kfree(adapter->tx_pool[i].tx_buff);
603 free_long_term_buff(adapter,
604 &adapter->tx_pool[i].long_term_buff);
605 kfree(adapter->tx_pool[i].free_map);
606 }
607 kfree(adapter->tx_pool);
608 adapter->tx_pool = NULL;
609
610 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
611 i++) {
612 free_rx_pool(adapter, &adapter->rx_pool[i]);
613 free_long_term_buff(adapter,
614 &adapter->rx_pool[i].long_term_buff);
615 }
616 kfree(adapter->rx_pool);
617 adapter->rx_pool = NULL;
618
619 adapter->closing = false;
620
621 return 0;
622}
623
Thomas Falconad7775d2016-04-01 17:20:34 -0500624/**
625 * build_hdr_data - creates L2/L3/L4 header data buffer
626 * @hdr_field - bitfield determining needed headers
627 * @skb - socket buffer
628 * @hdr_len - array of header lengths
629 * @tot_len - total length of data
630 *
631 * Reads hdr_field to determine which headers are needed by firmware.
632 * Builds a buffer containing these headers. Saves individual header
633 * lengths and total buffer length to be used to build descriptors.
634 */
635static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
636 int *hdr_len, u8 *hdr_data)
637{
638 int len = 0;
639 u8 *hdr;
640
641 hdr_len[0] = sizeof(struct ethhdr);
642
643 if (skb->protocol == htons(ETH_P_IP)) {
644 hdr_len[1] = ip_hdr(skb)->ihl * 4;
645 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
646 hdr_len[2] = tcp_hdrlen(skb);
647 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
648 hdr_len[2] = sizeof(struct udphdr);
649 } else if (skb->protocol == htons(ETH_P_IPV6)) {
650 hdr_len[1] = sizeof(struct ipv6hdr);
651 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
652 hdr_len[2] = tcp_hdrlen(skb);
653 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
654 hdr_len[2] = sizeof(struct udphdr);
655 }
656
657 memset(hdr_data, 0, 120);
658 if ((hdr_field >> 6) & 1) {
659 hdr = skb_mac_header(skb);
660 memcpy(hdr_data, hdr, hdr_len[0]);
661 len += hdr_len[0];
662 }
663
664 if ((hdr_field >> 5) & 1) {
665 hdr = skb_network_header(skb);
666 memcpy(hdr_data + len, hdr, hdr_len[1]);
667 len += hdr_len[1];
668 }
669
670 if ((hdr_field >> 4) & 1) {
671 hdr = skb_transport_header(skb);
672 memcpy(hdr_data + len, hdr, hdr_len[2]);
673 len += hdr_len[2];
674 }
675 return len;
676}
677
678/**
679 * create_hdr_descs - create header and header extension descriptors
680 * @hdr_field - bitfield determining needed headers
681 * @data - buffer containing header data
682 * @len - length of data buffer
683 * @hdr_len - array of individual header lengths
684 * @scrq_arr - descriptor array
685 *
686 * Creates header and, if needed, header extension descriptors and
687 * places them in a descriptor array, scrq_arr
688 */
689
690static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
691 union sub_crq *scrq_arr)
692{
693 union sub_crq hdr_desc;
694 int tmp_len = len;
695 u8 *data, *cur;
696 int tmp;
697
698 while (tmp_len > 0) {
699 cur = hdr_data + len - tmp_len;
700
701 memset(&hdr_desc, 0, sizeof(hdr_desc));
702 if (cur != hdr_data) {
703 data = hdr_desc.hdr_ext.data;
704 tmp = tmp_len > 29 ? 29 : tmp_len;
705 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
706 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
707 hdr_desc.hdr_ext.len = tmp;
708 } else {
709 data = hdr_desc.hdr.data;
710 tmp = tmp_len > 24 ? 24 : tmp_len;
711 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
712 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
713 hdr_desc.hdr.len = tmp;
714 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
715 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
716 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
717 hdr_desc.hdr.flag = hdr_field << 1;
718 }
719 memcpy(data, cur, tmp);
720 tmp_len -= tmp;
721 *scrq_arr = hdr_desc;
722 scrq_arr++;
723 }
724}
725
726/**
727 * build_hdr_descs_arr - build a header descriptor array
728 * @skb - socket buffer
729 * @num_entries - number of descriptors to be sent
730 * @subcrq - first TX descriptor
731 * @hdr_field - bit field determining which headers will be sent
732 *
733 * This function will build a TX descriptor array with applicable
734 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
735 */
736
737static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
738 int *num_entries, u8 hdr_field)
739{
740 int hdr_len[3] = {0, 0, 0};
741 int tot_len, len;
742 u8 *hdr_data = txbuff->hdr_data;
743
744 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
745 txbuff->hdr_data);
746 len = tot_len;
747 len -= 24;
748 if (len > 0)
749 num_entries += len % 29 ? len / 29 + 1 : len / 29;
750 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
751 txbuff->indir_arr + 1);
752}
753
Thomas Falcon032c5e82015-12-21 11:26:06 -0600754static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
755{
756 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
757 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500758 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600759 struct device *dev = &adapter->vdev->dev;
760 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600761 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600762 struct ibmvnic_tx_pool *tx_pool;
763 unsigned int tx_send_failed = 0;
764 unsigned int tx_map_failed = 0;
765 unsigned int tx_dropped = 0;
766 unsigned int tx_packets = 0;
767 unsigned int tx_bytes = 0;
768 dma_addr_t data_dma_addr;
769 struct netdev_queue *txq;
770 bool used_bounce = false;
771 unsigned long lpar_rc;
772 union sub_crq tx_crq;
773 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500774 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600775 unsigned char *dst;
776 u64 *handle_array;
777 int index = 0;
778 int ret = 0;
779
780 tx_pool = &adapter->tx_pool[queue_num];
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600781 tx_scrq = adapter->tx_scrq[queue_num];
Thomas Falcon032c5e82015-12-21 11:26:06 -0600782 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
783 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
784 be32_to_cpu(adapter->login_rsp_buf->
785 off_txsubm_subcrqs));
786 if (adapter->migrated) {
787 tx_send_failed++;
788 tx_dropped++;
789 ret = NETDEV_TX_BUSY;
790 goto out;
791 }
792
793 index = tx_pool->free_map[tx_pool->consumer_index];
794 offset = index * adapter->req_mtu;
795 dst = tx_pool->long_term_buff.buff + offset;
796 memset(dst, 0, adapter->req_mtu);
797 skb_copy_from_linear_data(skb, dst, skb->len);
798 data_dma_addr = tx_pool->long_term_buff.addr + offset;
799
800 tx_pool->consumer_index =
801 (tx_pool->consumer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -0600802 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600803
804 tx_buff = &tx_pool->tx_buff[index];
805 tx_buff->skb = skb;
806 tx_buff->data_dma[0] = data_dma_addr;
807 tx_buff->data_len[0] = skb->len;
808 tx_buff->index = index;
809 tx_buff->pool_index = queue_num;
810 tx_buff->last_frag = true;
811 tx_buff->used_bounce = used_bounce;
812
813 memset(&tx_crq, 0, sizeof(tx_crq));
814 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
815 tx_crq.v1.type = IBMVNIC_TX_DESC;
816 tx_crq.v1.n_crq_elem = 1;
817 tx_crq.v1.n_sge = 1;
818 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
819 tx_crq.v1.correlator = cpu_to_be32(index);
820 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
821 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
822 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
823
824 if (adapter->vlan_header_insertion) {
825 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
826 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
827 }
828
829 if (skb->protocol == htons(ETH_P_IP)) {
830 if (ip_hdr(skb)->version == 4)
831 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
832 else if (ip_hdr(skb)->version == 6)
833 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
834
835 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
836 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
837 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
838 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
839 }
840
Thomas Falconad7775d2016-04-01 17:20:34 -0500841 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600842 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500843 hdrs += 2;
844 }
845 /* determine if l2/3/4 headers are sent to firmware */
846 if ((*hdrs >> 7) & 1 &&
847 (skb->protocol == htons(ETH_P_IP) ||
848 skb->protocol == htons(ETH_P_IPV6))) {
849 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
850 tx_crq.v1.n_crq_elem = num_entries;
851 tx_buff->indir_arr[0] = tx_crq;
852 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
853 sizeof(tx_buff->indir_arr),
854 DMA_TO_DEVICE);
855 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
856 if (!firmware_has_feature(FW_FEATURE_CMO))
857 dev_err(dev, "tx: unable to map descriptor array\n");
858 tx_map_failed++;
859 tx_dropped++;
860 ret = NETDEV_TX_BUSY;
861 goto out;
862 }
John Allen498cd8e2016-04-06 11:49:55 -0500863 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500864 (u64)tx_buff->indir_dma,
865 (u64)num_entries);
866 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500867 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
868 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500869 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600870 if (lpar_rc != H_SUCCESS) {
871 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
872
873 if (tx_pool->consumer_index == 0)
874 tx_pool->consumer_index =
Thomas Falcon068d9f92017-03-05 12:18:42 -0600875 adapter->req_tx_entries_per_subcrq - 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600876 else
877 tx_pool->consumer_index--;
878
879 tx_send_failed++;
880 tx_dropped++;
881 ret = NETDEV_TX_BUSY;
882 goto out;
883 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600884
885 atomic_inc(&tx_scrq->used);
886
887 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
888 netdev_info(netdev, "Stopping queue %d\n", queue_num);
889 netif_stop_subqueue(netdev, queue_num);
890 }
891
Thomas Falcon032c5e82015-12-21 11:26:06 -0600892 tx_packets++;
893 tx_bytes += skb->len;
894 txq->trans_start = jiffies;
895 ret = NETDEV_TX_OK;
896
897out:
898 netdev->stats.tx_dropped += tx_dropped;
899 netdev->stats.tx_bytes += tx_bytes;
900 netdev->stats.tx_packets += tx_packets;
901 adapter->tx_send_failed += tx_send_failed;
902 adapter->tx_map_failed += tx_map_failed;
903
904 return ret;
905}
906
907static void ibmvnic_set_multi(struct net_device *netdev)
908{
909 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
910 struct netdev_hw_addr *ha;
911 union ibmvnic_crq crq;
912
913 memset(&crq, 0, sizeof(crq));
914 crq.request_capability.first = IBMVNIC_CRQ_CMD;
915 crq.request_capability.cmd = REQUEST_CAPABILITY;
916
917 if (netdev->flags & IFF_PROMISC) {
918 if (!adapter->promisc_supported)
919 return;
920 } else {
921 if (netdev->flags & IFF_ALLMULTI) {
922 /* Accept all multicast */
923 memset(&crq, 0, sizeof(crq));
924 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
925 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
926 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
927 ibmvnic_send_crq(adapter, &crq);
928 } else if (netdev_mc_empty(netdev)) {
929 /* Reject all multicast */
930 memset(&crq, 0, sizeof(crq));
931 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
932 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
933 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
934 ibmvnic_send_crq(adapter, &crq);
935 } else {
936 /* Accept one or more multicast(s) */
937 netdev_for_each_mc_addr(ha, netdev) {
938 memset(&crq, 0, sizeof(crq));
939 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
940 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
941 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
942 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
943 ha->addr);
944 ibmvnic_send_crq(adapter, &crq);
945 }
946 }
947 }
948}
949
950static int ibmvnic_set_mac(struct net_device *netdev, void *p)
951{
952 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
953 struct sockaddr *addr = p;
954 union ibmvnic_crq crq;
955
956 if (!is_valid_ether_addr(addr->sa_data))
957 return -EADDRNOTAVAIL;
958
959 memset(&crq, 0, sizeof(crq));
960 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
961 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
962 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
963 ibmvnic_send_crq(adapter, &crq);
964 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
965 return 0;
966}
967
Thomas Falcon032c5e82015-12-21 11:26:06 -0600968static void ibmvnic_tx_timeout(struct net_device *dev)
969{
970 struct ibmvnic_adapter *adapter = netdev_priv(dev);
971 int rc;
972
973 /* Adapter timed out, resetting it */
974 release_sub_crqs(adapter);
975 rc = ibmvnic_reset_crq(adapter);
976 if (rc)
977 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
978 else
979 ibmvnic_send_crq_init(adapter);
980}
981
982static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
983 struct ibmvnic_rx_buff *rx_buff)
984{
985 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
986
987 rx_buff->skb = NULL;
988
989 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
990 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
991
992 atomic_dec(&pool->available);
993}
994
995static int ibmvnic_poll(struct napi_struct *napi, int budget)
996{
997 struct net_device *netdev = napi->dev;
998 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
999 int scrq_num = (int)(napi - adapter->napi);
1000 int frames_processed = 0;
1001restart_poll:
1002 while (frames_processed < budget) {
1003 struct sk_buff *skb;
1004 struct ibmvnic_rx_buff *rx_buff;
1005 union sub_crq *next;
1006 u32 length;
1007 u16 offset;
1008 u8 flags = 0;
1009
1010 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1011 break;
1012 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1013 rx_buff =
1014 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1015 rx_comp.correlator);
1016 /* do error checking */
1017 if (next->rx_comp.rc) {
1018 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1019 /* free the entry */
1020 next->rx_comp.first = 0;
1021 remove_buff_from_pool(adapter, rx_buff);
1022 break;
1023 }
1024
1025 length = be32_to_cpu(next->rx_comp.len);
1026 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1027 flags = next->rx_comp.flags;
1028 skb = rx_buff->skb;
1029 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1030 length);
1031 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
1032 /* free the entry */
1033 next->rx_comp.first = 0;
1034 remove_buff_from_pool(adapter, rx_buff);
1035
1036 skb_put(skb, length);
1037 skb->protocol = eth_type_trans(skb, netdev);
1038
1039 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1040 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1041 skb->ip_summed = CHECKSUM_UNNECESSARY;
1042 }
1043
1044 length = skb->len;
1045 napi_gro_receive(napi, skb); /* send it up */
1046 netdev->stats.rx_packets++;
1047 netdev->stats.rx_bytes += length;
1048 frames_processed++;
1049 }
John Allen498cd8e2016-04-06 11:49:55 -05001050 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001051
1052 if (frames_processed < budget) {
1053 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08001054 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001055 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1056 napi_reschedule(napi)) {
1057 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1058 goto restart_poll;
1059 }
1060 }
1061 return frames_processed;
1062}
1063
1064#ifdef CONFIG_NET_POLL_CONTROLLER
1065static void ibmvnic_netpoll_controller(struct net_device *dev)
1066{
1067 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1068 int i;
1069
1070 replenish_pools(netdev_priv(dev));
1071 for (i = 0; i < adapter->req_rx_queues; i++)
1072 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1073 adapter->rx_scrq[i]);
1074}
1075#endif
1076
1077static const struct net_device_ops ibmvnic_netdev_ops = {
1078 .ndo_open = ibmvnic_open,
1079 .ndo_stop = ibmvnic_close,
1080 .ndo_start_xmit = ibmvnic_xmit,
1081 .ndo_set_rx_mode = ibmvnic_set_multi,
1082 .ndo_set_mac_address = ibmvnic_set_mac,
1083 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001084 .ndo_tx_timeout = ibmvnic_tx_timeout,
1085#ifdef CONFIG_NET_POLL_CONTROLLER
1086 .ndo_poll_controller = ibmvnic_netpoll_controller,
1087#endif
1088};
1089
1090/* ethtool functions */
1091
Philippe Reynes8a433792017-01-07 22:37:29 +01001092static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1093 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001094{
Philippe Reynes8a433792017-01-07 22:37:29 +01001095 u32 supported, advertising;
1096
1097 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001098 SUPPORTED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001099 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001100 ADVERTISED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001101 cmd->base.speed = SPEED_1000;
1102 cmd->base.duplex = DUPLEX_FULL;
1103 cmd->base.port = PORT_FIBRE;
1104 cmd->base.phy_address = 0;
1105 cmd->base.autoneg = AUTONEG_ENABLE;
1106
1107 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1108 supported);
1109 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1110 advertising);
1111
Thomas Falcon032c5e82015-12-21 11:26:06 -06001112 return 0;
1113}
1114
1115static void ibmvnic_get_drvinfo(struct net_device *dev,
1116 struct ethtool_drvinfo *info)
1117{
1118 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1119 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1120}
1121
1122static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1123{
1124 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1125
1126 return adapter->msg_enable;
1127}
1128
1129static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1130{
1131 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1132
1133 adapter->msg_enable = data;
1134}
1135
1136static u32 ibmvnic_get_link(struct net_device *netdev)
1137{
1138 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1139
1140 /* Don't need to send a query because we request a logical link up at
1141 * init and then we wait for link state indications
1142 */
1143 return adapter->logical_link_state;
1144}
1145
1146static void ibmvnic_get_ringparam(struct net_device *netdev,
1147 struct ethtool_ringparam *ring)
1148{
1149 ring->rx_max_pending = 0;
1150 ring->tx_max_pending = 0;
1151 ring->rx_mini_max_pending = 0;
1152 ring->rx_jumbo_max_pending = 0;
1153 ring->rx_pending = 0;
1154 ring->tx_pending = 0;
1155 ring->rx_mini_pending = 0;
1156 ring->rx_jumbo_pending = 0;
1157}
1158
1159static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1160{
1161 int i;
1162
1163 if (stringset != ETH_SS_STATS)
1164 return;
1165
1166 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1167 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1168}
1169
1170static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1171{
1172 switch (sset) {
1173 case ETH_SS_STATS:
1174 return ARRAY_SIZE(ibmvnic_stats);
1175 default:
1176 return -EOPNOTSUPP;
1177 }
1178}
1179
1180static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1181 struct ethtool_stats *stats, u64 *data)
1182{
1183 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1184 union ibmvnic_crq crq;
1185 int i;
1186
1187 memset(&crq, 0, sizeof(crq));
1188 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1189 crq.request_statistics.cmd = REQUEST_STATISTICS;
1190 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1191 crq.request_statistics.len =
1192 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001193
1194 /* Wait for data to be written */
1195 init_completion(&adapter->stats_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05001196 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001197 wait_for_completion(&adapter->stats_done);
1198
1199 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1200 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1201}
1202
1203static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001204 .get_drvinfo = ibmvnic_get_drvinfo,
1205 .get_msglevel = ibmvnic_get_msglevel,
1206 .set_msglevel = ibmvnic_set_msglevel,
1207 .get_link = ibmvnic_get_link,
1208 .get_ringparam = ibmvnic_get_ringparam,
1209 .get_strings = ibmvnic_get_strings,
1210 .get_sset_count = ibmvnic_get_sset_count,
1211 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01001212 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001213};
1214
1215/* Routines for managing CRQs/sCRQs */
1216
1217static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1218 struct ibmvnic_sub_crq_queue *scrq)
1219{
1220 struct device *dev = &adapter->vdev->dev;
1221 long rc;
1222
1223 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1224
1225 /* Close the sub-crqs */
1226 do {
1227 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1228 adapter->vdev->unit_address,
1229 scrq->crq_num);
1230 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1231
1232 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1233 DMA_BIDIRECTIONAL);
1234 free_pages((unsigned long)scrq->msgs, 2);
1235 kfree(scrq);
1236}
1237
1238static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1239 *adapter)
1240{
1241 struct device *dev = &adapter->vdev->dev;
1242 struct ibmvnic_sub_crq_queue *scrq;
1243 int rc;
1244
1245 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1246 if (!scrq)
1247 return NULL;
1248
Thomas Falcon12608c22016-10-17 15:28:09 -05001249 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001250 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1251 if (!scrq->msgs) {
1252 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1253 goto zero_page_failed;
1254 }
1255
1256 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1257 DMA_BIDIRECTIONAL);
1258 if (dma_mapping_error(dev, scrq->msg_token)) {
1259 dev_warn(dev, "Couldn't map crq queue messages page\n");
1260 goto map_failed;
1261 }
1262
1263 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1264 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1265
1266 if (rc == H_RESOURCE)
1267 rc = ibmvnic_reset_crq(adapter);
1268
1269 if (rc == H_CLOSED) {
1270 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1271 } else if (rc) {
1272 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1273 goto reg_failed;
1274 }
1275
Thomas Falcon032c5e82015-12-21 11:26:06 -06001276 scrq->adapter = adapter;
1277 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1278 scrq->cur = 0;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001279 atomic_set(&scrq->used, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001280 scrq->rx_skb_top = NULL;
1281 spin_lock_init(&scrq->lock);
1282
1283 netdev_dbg(adapter->netdev,
1284 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1285 scrq->crq_num, scrq->hw_irq, scrq->irq);
1286
1287 return scrq;
1288
Thomas Falcon032c5e82015-12-21 11:26:06 -06001289reg_failed:
1290 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1291 DMA_BIDIRECTIONAL);
1292map_failed:
1293 free_pages((unsigned long)scrq->msgs, 2);
1294zero_page_failed:
1295 kfree(scrq);
1296
1297 return NULL;
1298}
1299
1300static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1301{
1302 int i;
1303
1304 if (adapter->tx_scrq) {
1305 for (i = 0; i < adapter->req_tx_queues; i++)
1306 if (adapter->tx_scrq[i]) {
1307 free_irq(adapter->tx_scrq[i]->irq,
1308 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001309 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001310 release_sub_crq_queue(adapter,
1311 adapter->tx_scrq[i]);
1312 }
1313 adapter->tx_scrq = NULL;
1314 }
1315
1316 if (adapter->rx_scrq) {
1317 for (i = 0; i < adapter->req_rx_queues; i++)
1318 if (adapter->rx_scrq[i]) {
1319 free_irq(adapter->rx_scrq[i]->irq,
1320 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001321 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001322 release_sub_crq_queue(adapter,
1323 adapter->rx_scrq[i]);
1324 }
1325 adapter->rx_scrq = NULL;
1326 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001327}
1328
Thomas Falconea22d512016-07-06 15:35:17 -05001329static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1330{
1331 int i;
1332
1333 if (adapter->tx_scrq) {
1334 for (i = 0; i < adapter->req_tx_queues; i++)
1335 if (adapter->tx_scrq[i])
1336 release_sub_crq_queue(adapter,
1337 adapter->tx_scrq[i]);
1338 adapter->tx_scrq = NULL;
1339 }
1340
1341 if (adapter->rx_scrq) {
1342 for (i = 0; i < adapter->req_rx_queues; i++)
1343 if (adapter->rx_scrq[i])
1344 release_sub_crq_queue(adapter,
1345 adapter->rx_scrq[i]);
1346 adapter->rx_scrq = NULL;
1347 }
Thomas Falconea22d512016-07-06 15:35:17 -05001348}
1349
Thomas Falcon032c5e82015-12-21 11:26:06 -06001350static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1351 struct ibmvnic_sub_crq_queue *scrq)
1352{
1353 struct device *dev = &adapter->vdev->dev;
1354 unsigned long rc;
1355
1356 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1357 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1358 if (rc)
1359 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1360 scrq->hw_irq, rc);
1361 return rc;
1362}
1363
1364static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1365 struct ibmvnic_sub_crq_queue *scrq)
1366{
1367 struct device *dev = &adapter->vdev->dev;
1368 unsigned long rc;
1369
1370 if (scrq->hw_irq > 0x100000000ULL) {
1371 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1372 return 1;
1373 }
1374
1375 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1376 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1377 if (rc)
1378 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1379 scrq->hw_irq, rc);
1380 return rc;
1381}
1382
1383static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1384 struct ibmvnic_sub_crq_queue *scrq)
1385{
1386 struct device *dev = &adapter->vdev->dev;
1387 struct ibmvnic_tx_buff *txbuff;
1388 union sub_crq *next;
1389 int index;
1390 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001391 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001392
1393restart_loop:
1394 while (pending_scrq(adapter, scrq)) {
1395 unsigned int pool = scrq->pool_index;
1396
1397 next = ibmvnic_next_scrq(adapter, scrq);
1398 for (i = 0; i < next->tx_comp.num_comps; i++) {
1399 if (next->tx_comp.rcs[i]) {
1400 dev_err(dev, "tx error %x\n",
1401 next->tx_comp.rcs[i]);
1402 continue;
1403 }
1404 index = be32_to_cpu(next->tx_comp.correlators[i]);
1405 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1406
1407 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1408 if (!txbuff->data_dma[j])
1409 continue;
1410
1411 txbuff->data_dma[j] = 0;
1412 txbuff->used_bounce = false;
1413 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001414 /* if sub_crq was sent indirectly */
1415 first = txbuff->indir_arr[0].generic.first;
1416 if (first == IBMVNIC_CRQ_CMD) {
1417 dma_unmap_single(dev, txbuff->indir_dma,
1418 sizeof(txbuff->indir_arr),
1419 DMA_TO_DEVICE);
1420 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001421
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001422 if (txbuff->last_frag) {
1423 atomic_dec(&scrq->used);
1424
1425 if (atomic_read(&scrq->used) <=
1426 (adapter->req_tx_entries_per_subcrq / 2) &&
1427 netif_subqueue_stopped(adapter->netdev,
1428 txbuff->skb)) {
1429 netif_wake_subqueue(adapter->netdev,
1430 scrq->pool_index);
1431 netdev_dbg(adapter->netdev,
1432 "Started queue %d\n",
1433 scrq->pool_index);
1434 }
1435
Thomas Falcon032c5e82015-12-21 11:26:06 -06001436 dev_kfree_skb_any(txbuff->skb);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001437 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001438
1439 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1440 producer_index] = index;
1441 adapter->tx_pool[pool].producer_index =
1442 (adapter->tx_pool[pool].producer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001443 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001444 }
1445 /* remove tx_comp scrq*/
1446 next->tx_comp.first = 0;
1447 }
1448
1449 enable_scrq_irq(adapter, scrq);
1450
1451 if (pending_scrq(adapter, scrq)) {
1452 disable_scrq_irq(adapter, scrq);
1453 goto restart_loop;
1454 }
1455
1456 return 0;
1457}
1458
1459static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1460{
1461 struct ibmvnic_sub_crq_queue *scrq = instance;
1462 struct ibmvnic_adapter *adapter = scrq->adapter;
1463
1464 disable_scrq_irq(adapter, scrq);
1465 ibmvnic_complete_tx(adapter, scrq);
1466
1467 return IRQ_HANDLED;
1468}
1469
1470static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1471{
1472 struct ibmvnic_sub_crq_queue *scrq = instance;
1473 struct ibmvnic_adapter *adapter = scrq->adapter;
1474
1475 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1476 disable_scrq_irq(adapter, scrq);
1477 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1478 }
1479
1480 return IRQ_HANDLED;
1481}
1482
Thomas Falconea22d512016-07-06 15:35:17 -05001483static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1484{
1485 struct device *dev = &adapter->vdev->dev;
1486 struct ibmvnic_sub_crq_queue *scrq;
1487 int i = 0, j = 0;
1488 int rc = 0;
1489
1490 for (i = 0; i < adapter->req_tx_queues; i++) {
1491 scrq = adapter->tx_scrq[i];
1492 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1493
Michael Ellerman99c17902016-09-10 19:59:05 +10001494 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001495 rc = -EINVAL;
1496 dev_err(dev, "Error mapping irq\n");
1497 goto req_tx_irq_failed;
1498 }
1499
1500 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1501 0, "ibmvnic_tx", scrq);
1502
1503 if (rc) {
1504 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1505 scrq->irq, rc);
1506 irq_dispose_mapping(scrq->irq);
1507 goto req_rx_irq_failed;
1508 }
1509 }
1510
1511 for (i = 0; i < adapter->req_rx_queues; i++) {
1512 scrq = adapter->rx_scrq[i];
1513 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001514 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001515 rc = -EINVAL;
1516 dev_err(dev, "Error mapping irq\n");
1517 goto req_rx_irq_failed;
1518 }
1519 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1520 0, "ibmvnic_rx", scrq);
1521 if (rc) {
1522 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1523 scrq->irq, rc);
1524 irq_dispose_mapping(scrq->irq);
1525 goto req_rx_irq_failed;
1526 }
1527 }
1528 return rc;
1529
1530req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001531 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001532 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1533 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001534 }
Thomas Falconea22d512016-07-06 15:35:17 -05001535 i = adapter->req_tx_queues;
1536req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001537 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001538 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1539 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001540 }
Thomas Falconea22d512016-07-06 15:35:17 -05001541 release_sub_crqs_no_irqs(adapter);
1542 return rc;
1543}
1544
Thomas Falcon032c5e82015-12-21 11:26:06 -06001545static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1546{
1547 struct device *dev = &adapter->vdev->dev;
1548 struct ibmvnic_sub_crq_queue **allqueues;
1549 int registered_queues = 0;
1550 union ibmvnic_crq crq;
1551 int total_queues;
1552 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001553 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001554
1555 if (!retry) {
1556 /* Sub-CRQ entries are 32 byte long */
1557 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1558
1559 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1560 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1561 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1562 goto allqueues_failed;
1563 }
1564
1565 /* Get the minimum between the queried max and the entries
1566 * that fit in our PAGE_SIZE
1567 */
1568 adapter->req_tx_entries_per_subcrq =
1569 adapter->max_tx_entries_per_subcrq > entries_page ?
1570 entries_page : adapter->max_tx_entries_per_subcrq;
1571 adapter->req_rx_add_entries_per_subcrq =
1572 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1573 entries_page : adapter->max_rx_add_entries_per_subcrq;
1574
John Allen6dbcd8f2016-11-07 14:27:28 -06001575 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1576 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
John Allen498cd8e2016-04-06 11:49:55 -05001577 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001578
Thomas Falconf39f0d12017-02-14 10:22:59 -06001579 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001580 }
1581
1582 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1583
1584 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1585 if (!allqueues)
1586 goto allqueues_failed;
1587
1588 for (i = 0; i < total_queues; i++) {
1589 allqueues[i] = init_sub_crq_queue(adapter);
1590 if (!allqueues[i]) {
1591 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1592 break;
1593 }
1594 registered_queues++;
1595 }
1596
1597 /* Make sure we were able to register the minimum number of queues */
1598 if (registered_queues <
1599 adapter->min_tx_queues + adapter->min_rx_queues) {
1600 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1601 goto tx_failed;
1602 }
1603
1604 /* Distribute the failed allocated queues*/
1605 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1606 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1607 switch (i % 3) {
1608 case 0:
1609 if (adapter->req_rx_queues > adapter->min_rx_queues)
1610 adapter->req_rx_queues--;
1611 else
1612 more++;
1613 break;
1614 case 1:
1615 if (adapter->req_tx_queues > adapter->min_tx_queues)
1616 adapter->req_tx_queues--;
1617 else
1618 more++;
1619 break;
1620 }
1621 }
1622
1623 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1624 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1625 if (!adapter->tx_scrq)
1626 goto tx_failed;
1627
1628 for (i = 0; i < adapter->req_tx_queues; i++) {
1629 adapter->tx_scrq[i] = allqueues[i];
1630 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001631 }
1632
1633 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1634 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1635 if (!adapter->rx_scrq)
1636 goto rx_failed;
1637
1638 for (i = 0; i < adapter->req_rx_queues; i++) {
1639 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1640 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001641 }
1642
1643 memset(&crq, 0, sizeof(crq));
1644 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1645 crq.request_capability.cmd = REQUEST_CAPABILITY;
1646
1647 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001648 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001649 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001650 ibmvnic_send_crq(adapter, &crq);
1651
1652 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001653 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001654 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001655 ibmvnic_send_crq(adapter, &crq);
1656
1657 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001658 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06001659 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001660 ibmvnic_send_crq(adapter, &crq);
1661
1662 crq.request_capability.capability =
1663 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1664 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001665 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001666 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001667 ibmvnic_send_crq(adapter, &crq);
1668
1669 crq.request_capability.capability =
1670 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1671 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001672 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06001673 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001674 ibmvnic_send_crq(adapter, &crq);
1675
1676 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001677 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06001678 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001679 ibmvnic_send_crq(adapter, &crq);
1680
1681 if (adapter->netdev->flags & IFF_PROMISC) {
1682 if (adapter->promisc_supported) {
1683 crq.request_capability.capability =
1684 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001685 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06001686 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001687 ibmvnic_send_crq(adapter, &crq);
1688 }
1689 } else {
1690 crq.request_capability.capability =
1691 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001692 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06001693 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001694 ibmvnic_send_crq(adapter, &crq);
1695 }
1696
1697 kfree(allqueues);
1698
1699 return;
1700
Thomas Falcon032c5e82015-12-21 11:26:06 -06001701rx_failed:
1702 kfree(adapter->tx_scrq);
1703 adapter->tx_scrq = NULL;
1704tx_failed:
1705 for (i = 0; i < registered_queues; i++)
1706 release_sub_crq_queue(adapter, allqueues[i]);
1707 kfree(allqueues);
1708allqueues_failed:
1709 ibmvnic_remove(adapter->vdev);
1710}
1711
1712static int pending_scrq(struct ibmvnic_adapter *adapter,
1713 struct ibmvnic_sub_crq_queue *scrq)
1714{
1715 union sub_crq *entry = &scrq->msgs[scrq->cur];
1716
1717 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1718 return 1;
1719 else
1720 return 0;
1721}
1722
1723static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1724 struct ibmvnic_sub_crq_queue *scrq)
1725{
1726 union sub_crq *entry;
1727 unsigned long flags;
1728
1729 spin_lock_irqsave(&scrq->lock, flags);
1730 entry = &scrq->msgs[scrq->cur];
1731 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1732 if (++scrq->cur == scrq->size)
1733 scrq->cur = 0;
1734 } else {
1735 entry = NULL;
1736 }
1737 spin_unlock_irqrestore(&scrq->lock, flags);
1738
1739 return entry;
1740}
1741
1742static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1743{
1744 struct ibmvnic_crq_queue *queue = &adapter->crq;
1745 union ibmvnic_crq *crq;
1746
1747 crq = &queue->msgs[queue->cur];
1748 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1749 if (++queue->cur == queue->size)
1750 queue->cur = 0;
1751 } else {
1752 crq = NULL;
1753 }
1754
1755 return crq;
1756}
1757
1758static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1759 union sub_crq *sub_crq)
1760{
1761 unsigned int ua = adapter->vdev->unit_address;
1762 struct device *dev = &adapter->vdev->dev;
1763 u64 *u64_crq = (u64 *)sub_crq;
1764 int rc;
1765
1766 netdev_dbg(adapter->netdev,
1767 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1768 (unsigned long int)cpu_to_be64(remote_handle),
1769 (unsigned long int)cpu_to_be64(u64_crq[0]),
1770 (unsigned long int)cpu_to_be64(u64_crq[1]),
1771 (unsigned long int)cpu_to_be64(u64_crq[2]),
1772 (unsigned long int)cpu_to_be64(u64_crq[3]));
1773
1774 /* Make sure the hypervisor sees the complete request */
1775 mb();
1776
1777 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1778 cpu_to_be64(remote_handle),
1779 cpu_to_be64(u64_crq[0]),
1780 cpu_to_be64(u64_crq[1]),
1781 cpu_to_be64(u64_crq[2]),
1782 cpu_to_be64(u64_crq[3]));
1783
1784 if (rc) {
1785 if (rc == H_CLOSED)
1786 dev_warn(dev, "CRQ Queue closed\n");
1787 dev_err(dev, "Send error (rc=%d)\n", rc);
1788 }
1789
1790 return rc;
1791}
1792
Thomas Falconad7775d2016-04-01 17:20:34 -05001793static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1794 u64 remote_handle, u64 ioba, u64 num_entries)
1795{
1796 unsigned int ua = adapter->vdev->unit_address;
1797 struct device *dev = &adapter->vdev->dev;
1798 int rc;
1799
1800 /* Make sure the hypervisor sees the complete request */
1801 mb();
1802 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1803 cpu_to_be64(remote_handle),
1804 ioba, num_entries);
1805
1806 if (rc) {
1807 if (rc == H_CLOSED)
1808 dev_warn(dev, "CRQ Queue closed\n");
1809 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1810 }
1811
1812 return rc;
1813}
1814
Thomas Falcon032c5e82015-12-21 11:26:06 -06001815static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1816 union ibmvnic_crq *crq)
1817{
1818 unsigned int ua = adapter->vdev->unit_address;
1819 struct device *dev = &adapter->vdev->dev;
1820 u64 *u64_crq = (u64 *)crq;
1821 int rc;
1822
1823 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1824 (unsigned long int)cpu_to_be64(u64_crq[0]),
1825 (unsigned long int)cpu_to_be64(u64_crq[1]));
1826
1827 /* Make sure the hypervisor sees the complete request */
1828 mb();
1829
1830 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1831 cpu_to_be64(u64_crq[0]),
1832 cpu_to_be64(u64_crq[1]));
1833
1834 if (rc) {
1835 if (rc == H_CLOSED)
1836 dev_warn(dev, "CRQ Queue closed\n");
1837 dev_warn(dev, "Send error (rc=%d)\n", rc);
1838 }
1839
1840 return rc;
1841}
1842
1843static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1844{
1845 union ibmvnic_crq crq;
1846
1847 memset(&crq, 0, sizeof(crq));
1848 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1849 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1850 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1851
1852 return ibmvnic_send_crq(adapter, &crq);
1853}
1854
1855static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1856{
1857 union ibmvnic_crq crq;
1858
1859 memset(&crq, 0, sizeof(crq));
1860 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1861 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1862 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1863
1864 return ibmvnic_send_crq(adapter, &crq);
1865}
1866
1867static int send_version_xchg(struct ibmvnic_adapter *adapter)
1868{
1869 union ibmvnic_crq crq;
1870
1871 memset(&crq, 0, sizeof(crq));
1872 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1873 crq.version_exchange.cmd = VERSION_EXCHANGE;
1874 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1875
1876 return ibmvnic_send_crq(adapter, &crq);
1877}
1878
1879static void send_login(struct ibmvnic_adapter *adapter)
1880{
1881 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1882 struct ibmvnic_login_buffer *login_buffer;
1883 struct ibmvnic_inflight_cmd *inflight_cmd;
1884 struct device *dev = &adapter->vdev->dev;
1885 dma_addr_t rsp_buffer_token;
1886 dma_addr_t buffer_token;
1887 size_t rsp_buffer_size;
1888 union ibmvnic_crq crq;
1889 unsigned long flags;
1890 size_t buffer_size;
1891 __be64 *tx_list_p;
1892 __be64 *rx_list_p;
1893 int i;
1894
1895 buffer_size =
1896 sizeof(struct ibmvnic_login_buffer) +
1897 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1898
1899 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1900 if (!login_buffer)
1901 goto buf_alloc_failed;
1902
1903 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1904 DMA_TO_DEVICE);
1905 if (dma_mapping_error(dev, buffer_token)) {
1906 dev_err(dev, "Couldn't map login buffer\n");
1907 goto buf_map_failed;
1908 }
1909
John Allen498cd8e2016-04-06 11:49:55 -05001910 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1911 sizeof(u64) * adapter->req_tx_queues +
1912 sizeof(u64) * adapter->req_rx_queues +
1913 sizeof(u64) * adapter->req_rx_queues +
1914 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001915
1916 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1917 if (!login_rsp_buffer)
1918 goto buf_rsp_alloc_failed;
1919
1920 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1921 rsp_buffer_size, DMA_FROM_DEVICE);
1922 if (dma_mapping_error(dev, rsp_buffer_token)) {
1923 dev_err(dev, "Couldn't map login rsp buffer\n");
1924 goto buf_rsp_map_failed;
1925 }
1926 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1927 if (!inflight_cmd) {
1928 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1929 goto inflight_alloc_failed;
1930 }
1931 adapter->login_buf = login_buffer;
1932 adapter->login_buf_token = buffer_token;
1933 adapter->login_buf_sz = buffer_size;
1934 adapter->login_rsp_buf = login_rsp_buffer;
1935 adapter->login_rsp_buf_token = rsp_buffer_token;
1936 adapter->login_rsp_buf_sz = rsp_buffer_size;
1937
1938 login_buffer->len = cpu_to_be32(buffer_size);
1939 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1940 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1941 login_buffer->off_txcomp_subcrqs =
1942 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1943 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1944 login_buffer->off_rxcomp_subcrqs =
1945 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1946 sizeof(u64) * adapter->req_tx_queues);
1947 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1948 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1949
1950 tx_list_p = (__be64 *)((char *)login_buffer +
1951 sizeof(struct ibmvnic_login_buffer));
1952 rx_list_p = (__be64 *)((char *)login_buffer +
1953 sizeof(struct ibmvnic_login_buffer) +
1954 sizeof(u64) * adapter->req_tx_queues);
1955
1956 for (i = 0; i < adapter->req_tx_queues; i++) {
1957 if (adapter->tx_scrq[i]) {
1958 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1959 crq_num);
1960 }
1961 }
1962
1963 for (i = 0; i < adapter->req_rx_queues; i++) {
1964 if (adapter->rx_scrq[i]) {
1965 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1966 crq_num);
1967 }
1968 }
1969
1970 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1971 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1972 netdev_dbg(adapter->netdev, "%016lx\n",
1973 ((unsigned long int *)(adapter->login_buf))[i]);
1974 }
1975
1976 memset(&crq, 0, sizeof(crq));
1977 crq.login.first = IBMVNIC_CRQ_CMD;
1978 crq.login.cmd = LOGIN;
1979 crq.login.ioba = cpu_to_be32(buffer_token);
1980 crq.login.len = cpu_to_be32(buffer_size);
1981
1982 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1983
1984 spin_lock_irqsave(&adapter->inflight_lock, flags);
1985 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1986 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1987
1988 ibmvnic_send_crq(adapter, &crq);
1989
1990 return;
1991
1992inflight_alloc_failed:
1993 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1994 DMA_FROM_DEVICE);
1995buf_rsp_map_failed:
1996 kfree(login_rsp_buffer);
1997buf_rsp_alloc_failed:
1998 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1999buf_map_failed:
2000 kfree(login_buffer);
2001buf_alloc_failed:
2002 return;
2003}
2004
2005static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2006 u32 len, u8 map_id)
2007{
2008 union ibmvnic_crq crq;
2009
2010 memset(&crq, 0, sizeof(crq));
2011 crq.request_map.first = IBMVNIC_CRQ_CMD;
2012 crq.request_map.cmd = REQUEST_MAP;
2013 crq.request_map.map_id = map_id;
2014 crq.request_map.ioba = cpu_to_be32(addr);
2015 crq.request_map.len = cpu_to_be32(len);
2016 ibmvnic_send_crq(adapter, &crq);
2017}
2018
2019static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2020{
2021 union ibmvnic_crq crq;
2022
2023 memset(&crq, 0, sizeof(crq));
2024 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2025 crq.request_unmap.cmd = REQUEST_UNMAP;
2026 crq.request_unmap.map_id = map_id;
2027 ibmvnic_send_crq(adapter, &crq);
2028}
2029
2030static void send_map_query(struct ibmvnic_adapter *adapter)
2031{
2032 union ibmvnic_crq crq;
2033
2034 memset(&crq, 0, sizeof(crq));
2035 crq.query_map.first = IBMVNIC_CRQ_CMD;
2036 crq.query_map.cmd = QUERY_MAP;
2037 ibmvnic_send_crq(adapter, &crq);
2038}
2039
2040/* Send a series of CRQs requesting various capabilities of the VNIC server */
2041static void send_cap_queries(struct ibmvnic_adapter *adapter)
2042{
2043 union ibmvnic_crq crq;
2044
Thomas Falcon901e0402017-02-15 12:17:59 -06002045 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002046 memset(&crq, 0, sizeof(crq));
2047 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2048 crq.query_capability.cmd = QUERY_CAPABILITY;
2049
2050 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002051 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002052 ibmvnic_send_crq(adapter, &crq);
2053
2054 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002055 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002056 ibmvnic_send_crq(adapter, &crq);
2057
2058 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002059 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002060 ibmvnic_send_crq(adapter, &crq);
2061
2062 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002063 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002064 ibmvnic_send_crq(adapter, &crq);
2065
2066 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002067 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002068 ibmvnic_send_crq(adapter, &crq);
2069
2070 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002071 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002072 ibmvnic_send_crq(adapter, &crq);
2073
2074 crq.query_capability.capability =
2075 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002076 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002077 ibmvnic_send_crq(adapter, &crq);
2078
2079 crq.query_capability.capability =
2080 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002081 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002082 ibmvnic_send_crq(adapter, &crq);
2083
2084 crq.query_capability.capability =
2085 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002086 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002087 ibmvnic_send_crq(adapter, &crq);
2088
2089 crq.query_capability.capability =
2090 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002091 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002092 ibmvnic_send_crq(adapter, &crq);
2093
2094 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06002095 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002096 ibmvnic_send_crq(adapter, &crq);
2097
2098 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002099 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002100 ibmvnic_send_crq(adapter, &crq);
2101
2102 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002103 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002104 ibmvnic_send_crq(adapter, &crq);
2105
2106 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002107 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002108 ibmvnic_send_crq(adapter, &crq);
2109
2110 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06002111 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002112 ibmvnic_send_crq(adapter, &crq);
2113
2114 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06002115 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002116 ibmvnic_send_crq(adapter, &crq);
2117
2118 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002119 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002120 ibmvnic_send_crq(adapter, &crq);
2121
2122 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002123 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002124 ibmvnic_send_crq(adapter, &crq);
2125
2126 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002127 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002128 ibmvnic_send_crq(adapter, &crq);
2129
2130 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002131 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002132 ibmvnic_send_crq(adapter, &crq);
2133
2134 crq.query_capability.capability =
2135 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06002136 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002137 ibmvnic_send_crq(adapter, &crq);
2138
2139 crq.query_capability.capability =
2140 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002141 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002142 ibmvnic_send_crq(adapter, &crq);
2143
2144 crq.query_capability.capability =
2145 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002146 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002147 ibmvnic_send_crq(adapter, &crq);
2148
2149 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002150 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002151 ibmvnic_send_crq(adapter, &crq);
2152}
2153
2154static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2155{
2156 struct device *dev = &adapter->vdev->dev;
2157 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2158 union ibmvnic_crq crq;
2159 int i;
2160
2161 dma_unmap_single(dev, adapter->ip_offload_tok,
2162 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2163
2164 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2165 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2166 netdev_dbg(adapter->netdev, "%016lx\n",
2167 ((unsigned long int *)(buf))[i]);
2168
2169 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2170 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2171 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2172 buf->tcp_ipv4_chksum);
2173 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2174 buf->tcp_ipv6_chksum);
2175 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2176 buf->udp_ipv4_chksum);
2177 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2178 buf->udp_ipv6_chksum);
2179 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2180 buf->large_tx_ipv4);
2181 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2182 buf->large_tx_ipv6);
2183 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2184 buf->large_rx_ipv4);
2185 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2186 buf->large_rx_ipv6);
2187 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2188 buf->max_ipv4_header_size);
2189 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2190 buf->max_ipv6_header_size);
2191 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2192 buf->max_tcp_header_size);
2193 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2194 buf->max_udp_header_size);
2195 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2196 buf->max_large_tx_size);
2197 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2198 buf->max_large_rx_size);
2199 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2200 buf->ipv6_extension_header);
2201 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2202 buf->tcp_pseudosum_req);
2203 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2204 buf->num_ipv6_ext_headers);
2205 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2206 buf->off_ipv6_ext_headers);
2207
2208 adapter->ip_offload_ctrl_tok =
2209 dma_map_single(dev, &adapter->ip_offload_ctrl,
2210 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2211
2212 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2213 dev_err(dev, "Couldn't map ip offload control buffer\n");
2214 return;
2215 }
2216
2217 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2218 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2219 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2220 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2221 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2222
2223 /* large_tx/rx disabled for now, additional features needed */
2224 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2225 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2226 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2227 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2228
2229 adapter->netdev->features = NETIF_F_GSO;
2230
2231 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2232 adapter->netdev->features |= NETIF_F_IP_CSUM;
2233
2234 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2235 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2236
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002237 if ((adapter->netdev->features &
2238 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2239 adapter->netdev->features |= NETIF_F_RXCSUM;
2240
Thomas Falcon032c5e82015-12-21 11:26:06 -06002241 memset(&crq, 0, sizeof(crq));
2242 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2243 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2244 crq.control_ip_offload.len =
2245 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2246 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2247 ibmvnic_send_crq(adapter, &crq);
2248}
2249
2250static void handle_error_info_rsp(union ibmvnic_crq *crq,
2251 struct ibmvnic_adapter *adapter)
2252{
2253 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002254 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002255 unsigned long flags;
2256 bool found = false;
2257 int i;
2258
2259 if (!crq->request_error_rsp.rc.code) {
2260 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2261 crq->request_error_rsp.rc.code);
2262 return;
2263 }
2264
2265 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002266 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002267 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2268 found = true;
2269 list_del(&error_buff->list);
2270 break;
2271 }
2272 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2273
2274 if (!found) {
2275 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falcon75224c92017-02-15 10:33:33 -06002276 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002277 return;
2278 }
2279
2280 dev_err(dev, "Detailed info for error id %x:",
Thomas Falcon75224c92017-02-15 10:33:33 -06002281 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002282
2283 for (i = 0; i < error_buff->len; i++) {
2284 pr_cont("%02x", (int)error_buff->buff[i]);
2285 if (i % 8 == 7)
2286 pr_cont(" ");
2287 }
2288 pr_cont("\n");
2289
2290 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2291 DMA_FROM_DEVICE);
2292 kfree(error_buff->buff);
2293 kfree(error_buff);
2294}
2295
2296static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2297 struct ibmvnic_adapter *adapter)
2298{
2299 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2300 struct ibmvnic_inflight_cmd *inflight_cmd;
2301 struct device *dev = &adapter->vdev->dev;
2302 union ibmvnic_crq newcrq;
2303 unsigned long flags;
2304
2305 /* allocate and map buffer */
2306 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2307 if (!adapter->dump_data) {
2308 complete(&adapter->fw_done);
2309 return;
2310 }
2311
2312 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2313 DMA_FROM_DEVICE);
2314
2315 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2316 if (!firmware_has_feature(FW_FEATURE_CMO))
2317 dev_err(dev, "Couldn't map dump data\n");
2318 kfree(adapter->dump_data);
2319 complete(&adapter->fw_done);
2320 return;
2321 }
2322
2323 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2324 if (!inflight_cmd) {
2325 dma_unmap_single(dev, adapter->dump_data_token, len,
2326 DMA_FROM_DEVICE);
2327 kfree(adapter->dump_data);
2328 complete(&adapter->fw_done);
2329 return;
2330 }
2331
2332 memset(&newcrq, 0, sizeof(newcrq));
2333 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2334 newcrq.request_dump.cmd = REQUEST_DUMP;
2335 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2336 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2337
2338 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2339
2340 spin_lock_irqsave(&adapter->inflight_lock, flags);
2341 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2342 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2343
2344 ibmvnic_send_crq(adapter, &newcrq);
2345}
2346
2347static void handle_error_indication(union ibmvnic_crq *crq,
2348 struct ibmvnic_adapter *adapter)
2349{
2350 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2351 struct ibmvnic_inflight_cmd *inflight_cmd;
2352 struct device *dev = &adapter->vdev->dev;
2353 struct ibmvnic_error_buff *error_buff;
2354 union ibmvnic_crq new_crq;
2355 unsigned long flags;
2356
2357 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2358 crq->error_indication.
2359 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
Thomas Falcon75224c92017-02-15 10:33:33 -06002360 be32_to_cpu(crq->error_indication.error_id),
2361 be16_to_cpu(crq->error_indication.error_cause));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002362
2363 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2364 if (!error_buff)
2365 return;
2366
2367 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2368 if (!error_buff->buff) {
2369 kfree(error_buff);
2370 return;
2371 }
2372
2373 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2374 DMA_FROM_DEVICE);
2375 if (dma_mapping_error(dev, error_buff->dma)) {
2376 if (!firmware_has_feature(FW_FEATURE_CMO))
2377 dev_err(dev, "Couldn't map error buffer\n");
2378 kfree(error_buff->buff);
2379 kfree(error_buff);
2380 return;
2381 }
2382
2383 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2384 if (!inflight_cmd) {
2385 dma_unmap_single(dev, error_buff->dma, detail_len,
2386 DMA_FROM_DEVICE);
2387 kfree(error_buff->buff);
2388 kfree(error_buff);
2389 return;
2390 }
2391
2392 error_buff->len = detail_len;
2393 error_buff->error_id = crq->error_indication.error_id;
2394
2395 spin_lock_irqsave(&adapter->error_list_lock, flags);
2396 list_add_tail(&error_buff->list, &adapter->errors);
2397 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2398
2399 memset(&new_crq, 0, sizeof(new_crq));
2400 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2401 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2402 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2403 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2404 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2405
2406 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2407
2408 spin_lock_irqsave(&adapter->inflight_lock, flags);
2409 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2410 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2411
2412 ibmvnic_send_crq(adapter, &new_crq);
2413}
2414
2415static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2416 struct ibmvnic_adapter *adapter)
2417{
2418 struct net_device *netdev = adapter->netdev;
2419 struct device *dev = &adapter->vdev->dev;
2420 long rc;
2421
2422 rc = crq->change_mac_addr_rsp.rc.code;
2423 if (rc) {
2424 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2425 return;
2426 }
2427 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2428 ETH_ALEN);
2429}
2430
2431static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2432 struct ibmvnic_adapter *adapter)
2433{
2434 struct device *dev = &adapter->vdev->dev;
2435 u64 *req_value;
2436 char *name;
2437
Thomas Falcon901e0402017-02-15 12:17:59 -06002438 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002439 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2440 case REQ_TX_QUEUES:
2441 req_value = &adapter->req_tx_queues;
2442 name = "tx";
2443 break;
2444 case REQ_RX_QUEUES:
2445 req_value = &adapter->req_rx_queues;
2446 name = "rx";
2447 break;
2448 case REQ_RX_ADD_QUEUES:
2449 req_value = &adapter->req_rx_add_queues;
2450 name = "rx_add";
2451 break;
2452 case REQ_TX_ENTRIES_PER_SUBCRQ:
2453 req_value = &adapter->req_tx_entries_per_subcrq;
2454 name = "tx_entries_per_subcrq";
2455 break;
2456 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2457 req_value = &adapter->req_rx_add_entries_per_subcrq;
2458 name = "rx_add_entries_per_subcrq";
2459 break;
2460 case REQ_MTU:
2461 req_value = &adapter->req_mtu;
2462 name = "mtu";
2463 break;
2464 case PROMISC_REQUESTED:
2465 req_value = &adapter->promisc;
2466 name = "promisc";
2467 break;
2468 default:
2469 dev_err(dev, "Got invalid cap request rsp %d\n",
2470 crq->request_capability.capability);
2471 return;
2472 }
2473
2474 switch (crq->request_capability_rsp.rc.code) {
2475 case SUCCESS:
2476 break;
2477 case PARTIALSUCCESS:
2478 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2479 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06002480 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06002481 number), name);
Thomas Falconea22d512016-07-06 15:35:17 -05002482 release_sub_crqs_no_irqs(adapter);
Thomas Falcon28f4d162017-02-15 10:32:11 -06002483 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
Thomas Falconea22d512016-07-06 15:35:17 -05002484 init_sub_crqs(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002485 return;
2486 default:
2487 dev_err(dev, "Error %d in request cap rsp\n",
2488 crq->request_capability_rsp.rc.code);
2489 return;
2490 }
2491
2492 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06002493 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002494 union ibmvnic_crq newcrq;
2495 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2496 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2497 &adapter->ip_offload_buf;
2498
Thomas Falcon249168a2017-02-15 12:18:00 -06002499 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002500 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2501 buf_sz,
2502 DMA_FROM_DEVICE);
2503
2504 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2505 if (!firmware_has_feature(FW_FEATURE_CMO))
2506 dev_err(dev, "Couldn't map offload buffer\n");
2507 return;
2508 }
2509
2510 memset(&newcrq, 0, sizeof(newcrq));
2511 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2512 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2513 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2514 newcrq.query_ip_offload.ioba =
2515 cpu_to_be32(adapter->ip_offload_tok);
2516
2517 ibmvnic_send_crq(adapter, &newcrq);
2518 }
2519}
2520
2521static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2522 struct ibmvnic_adapter *adapter)
2523{
2524 struct device *dev = &adapter->vdev->dev;
2525 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2526 struct ibmvnic_login_buffer *login = adapter->login_buf;
2527 union ibmvnic_crq crq;
2528 int i;
2529
2530 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2531 DMA_BIDIRECTIONAL);
2532 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2533 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2534
John Allen498cd8e2016-04-06 11:49:55 -05002535 /* If the number of queues requested can't be allocated by the
2536 * server, the login response will return with code 1. We will need
2537 * to resend the login buffer with fewer queues requested.
2538 */
2539 if (login_rsp_crq->generic.rc.code) {
2540 adapter->renegotiate = true;
2541 complete(&adapter->init_done);
2542 return 0;
2543 }
2544
Thomas Falcon032c5e82015-12-21 11:26:06 -06002545 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2546 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2547 netdev_dbg(adapter->netdev, "%016lx\n",
2548 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2549 }
2550
2551 /* Sanity checks */
2552 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2553 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2554 adapter->req_rx_add_queues !=
2555 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2556 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2557 ibmvnic_remove(adapter->vdev);
2558 return -EIO;
2559 }
2560 complete(&adapter->init_done);
2561
2562 memset(&crq, 0, sizeof(crq));
2563 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2564 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2565 ibmvnic_send_crq(adapter, &crq);
2566
2567 return 0;
2568}
2569
2570static void handle_request_map_rsp(union ibmvnic_crq *crq,
2571 struct ibmvnic_adapter *adapter)
2572{
2573 struct device *dev = &adapter->vdev->dev;
2574 u8 map_id = crq->request_map_rsp.map_id;
2575 int tx_subcrqs;
2576 int rx_subcrqs;
2577 long rc;
2578 int i;
2579
2580 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2581 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2582
2583 rc = crq->request_map_rsp.rc.code;
2584 if (rc) {
2585 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2586 adapter->map_id--;
2587 /* need to find and zero tx/rx_pool map_id */
2588 for (i = 0; i < tx_subcrqs; i++) {
2589 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2590 adapter->tx_pool[i].long_term_buff.map_id = 0;
2591 }
2592 for (i = 0; i < rx_subcrqs; i++) {
2593 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2594 adapter->rx_pool[i].long_term_buff.map_id = 0;
2595 }
2596 }
2597 complete(&adapter->fw_done);
2598}
2599
2600static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2601 struct ibmvnic_adapter *adapter)
2602{
2603 struct device *dev = &adapter->vdev->dev;
2604 long rc;
2605
2606 rc = crq->request_unmap_rsp.rc.code;
2607 if (rc)
2608 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2609}
2610
2611static void handle_query_map_rsp(union ibmvnic_crq *crq,
2612 struct ibmvnic_adapter *adapter)
2613{
2614 struct net_device *netdev = adapter->netdev;
2615 struct device *dev = &adapter->vdev->dev;
2616 long rc;
2617
2618 rc = crq->query_map_rsp.rc.code;
2619 if (rc) {
2620 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2621 return;
2622 }
2623 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2624 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2625 crq->query_map_rsp.free_pages);
2626}
2627
2628static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2629 struct ibmvnic_adapter *adapter)
2630{
2631 struct net_device *netdev = adapter->netdev;
2632 struct device *dev = &adapter->vdev->dev;
2633 long rc;
2634
Thomas Falcon901e0402017-02-15 12:17:59 -06002635 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002636 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06002637 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002638 rc = crq->query_capability.rc.code;
2639 if (rc) {
2640 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2641 goto out;
2642 }
2643
2644 switch (be16_to_cpu(crq->query_capability.capability)) {
2645 case MIN_TX_QUEUES:
2646 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002647 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002648 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2649 adapter->min_tx_queues);
2650 break;
2651 case MIN_RX_QUEUES:
2652 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002653 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002654 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2655 adapter->min_rx_queues);
2656 break;
2657 case MIN_RX_ADD_QUEUES:
2658 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002659 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002660 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2661 adapter->min_rx_add_queues);
2662 break;
2663 case MAX_TX_QUEUES:
2664 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002665 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002666 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2667 adapter->max_tx_queues);
2668 break;
2669 case MAX_RX_QUEUES:
2670 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002671 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002672 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2673 adapter->max_rx_queues);
2674 break;
2675 case MAX_RX_ADD_QUEUES:
2676 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002677 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002678 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2679 adapter->max_rx_add_queues);
2680 break;
2681 case MIN_TX_ENTRIES_PER_SUBCRQ:
2682 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002683 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002684 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2685 adapter->min_tx_entries_per_subcrq);
2686 break;
2687 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2688 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002689 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002690 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2691 adapter->min_rx_add_entries_per_subcrq);
2692 break;
2693 case MAX_TX_ENTRIES_PER_SUBCRQ:
2694 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002695 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002696 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2697 adapter->max_tx_entries_per_subcrq);
2698 break;
2699 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2700 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002701 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002702 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2703 adapter->max_rx_add_entries_per_subcrq);
2704 break;
2705 case TCP_IP_OFFLOAD:
2706 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002707 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002708 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2709 adapter->tcp_ip_offload);
2710 break;
2711 case PROMISC_SUPPORTED:
2712 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002713 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002714 netdev_dbg(netdev, "promisc_supported = %lld\n",
2715 adapter->promisc_supported);
2716 break;
2717 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002718 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002719 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002720 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2721 break;
2722 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002723 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06002724 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002725 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2726 break;
2727 case MAX_MULTICAST_FILTERS:
2728 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002729 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002730 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2731 adapter->max_multicast_filters);
2732 break;
2733 case VLAN_HEADER_INSERTION:
2734 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002735 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002736 if (adapter->vlan_header_insertion)
2737 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2738 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2739 adapter->vlan_header_insertion);
2740 break;
2741 case MAX_TX_SG_ENTRIES:
2742 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002743 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002744 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2745 adapter->max_tx_sg_entries);
2746 break;
2747 case RX_SG_SUPPORTED:
2748 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002749 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002750 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2751 adapter->rx_sg_supported);
2752 break;
2753 case OPT_TX_COMP_SUB_QUEUES:
2754 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002755 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002756 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2757 adapter->opt_tx_comp_sub_queues);
2758 break;
2759 case OPT_RX_COMP_QUEUES:
2760 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002761 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002762 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2763 adapter->opt_rx_comp_queues);
2764 break;
2765 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2766 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002767 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002768 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2769 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2770 break;
2771 case OPT_TX_ENTRIES_PER_SUBCRQ:
2772 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002773 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002774 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2775 adapter->opt_tx_entries_per_subcrq);
2776 break;
2777 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2778 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002779 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002780 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2781 adapter->opt_rxba_entries_per_subcrq);
2782 break;
2783 case TX_RX_DESC_REQ:
2784 adapter->tx_rx_desc_req = crq->query_capability.number;
2785 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2786 adapter->tx_rx_desc_req);
2787 break;
2788
2789 default:
2790 netdev_err(netdev, "Got invalid cap rsp %d\n",
2791 crq->query_capability.capability);
2792 }
2793
2794out:
Thomas Falcon249168a2017-02-15 12:18:00 -06002795 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2796 adapter->wait_capability = false;
Thomas Falconea22d512016-07-06 15:35:17 -05002797 init_sub_crqs(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002798 /* We're done querying the capabilities, initialize sub-crqs */
Thomas Falcon249168a2017-02-15 12:18:00 -06002799 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002800}
2801
2802static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2803 struct ibmvnic_adapter *adapter)
2804{
2805 u8 correlator = crq->control_ras_rsp.correlator;
2806 struct device *dev = &adapter->vdev->dev;
2807 bool found = false;
2808 int i;
2809
2810 if (crq->control_ras_rsp.rc.code) {
2811 dev_warn(dev, "Control ras failed rc=%d\n",
2812 crq->control_ras_rsp.rc.code);
2813 return;
2814 }
2815
2816 for (i = 0; i < adapter->ras_comp_num; i++) {
2817 if (adapter->ras_comps[i].correlator == correlator) {
2818 found = true;
2819 break;
2820 }
2821 }
2822
2823 if (!found) {
2824 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2825 return;
2826 }
2827
2828 switch (crq->control_ras_rsp.op) {
2829 case IBMVNIC_TRACE_LEVEL:
2830 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2831 break;
2832 case IBMVNIC_ERROR_LEVEL:
2833 adapter->ras_comps[i].error_check_level =
2834 crq->control_ras.level;
2835 break;
2836 case IBMVNIC_TRACE_PAUSE:
2837 adapter->ras_comp_int[i].paused = 1;
2838 break;
2839 case IBMVNIC_TRACE_RESUME:
2840 adapter->ras_comp_int[i].paused = 0;
2841 break;
2842 case IBMVNIC_TRACE_ON:
2843 adapter->ras_comps[i].trace_on = 1;
2844 break;
2845 case IBMVNIC_TRACE_OFF:
2846 adapter->ras_comps[i].trace_on = 0;
2847 break;
2848 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2849 /* trace_buff_sz is 3 bytes, stuff it into an int */
2850 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2851 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2852 crq->control_ras_rsp.trace_buff_sz[0];
2853 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2854 crq->control_ras_rsp.trace_buff_sz[1];
2855 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2856 crq->control_ras_rsp.trace_buff_sz[2];
2857 break;
2858 default:
2859 dev_err(dev, "invalid op %d on control_ras_rsp",
2860 crq->control_ras_rsp.op);
2861 }
2862}
2863
Thomas Falcon032c5e82015-12-21 11:26:06 -06002864static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2865 loff_t *ppos)
2866{
2867 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2868 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2869 struct device *dev = &adapter->vdev->dev;
2870 struct ibmvnic_fw_trace_entry *trace;
2871 int num = ras_comp_int->num;
2872 union ibmvnic_crq crq;
2873 dma_addr_t trace_tok;
2874
2875 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2876 return 0;
2877
2878 trace =
2879 dma_alloc_coherent(dev,
2880 be32_to_cpu(adapter->ras_comps[num].
2881 trace_buff_size), &trace_tok,
2882 GFP_KERNEL);
2883 if (!trace) {
2884 dev_err(dev, "Couldn't alloc trace buffer\n");
2885 return 0;
2886 }
2887
2888 memset(&crq, 0, sizeof(crq));
2889 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2890 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2891 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2892 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2893 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002894
2895 init_completion(&adapter->fw_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05002896 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002897 wait_for_completion(&adapter->fw_done);
2898
2899 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2900 len =
2901 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2902 *ppos;
2903
2904 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2905
2906 dma_free_coherent(dev,
2907 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2908 trace, trace_tok);
2909 *ppos += len;
2910 return len;
2911}
2912
2913static const struct file_operations trace_ops = {
2914 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002915 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002916 .read = trace_read,
2917};
2918
2919static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2920 loff_t *ppos)
2921{
2922 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2923 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2924 int num = ras_comp_int->num;
2925 char buff[5]; /* 1 or 0 plus \n and \0 */
2926 int size;
2927
2928 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2929
2930 if (*ppos >= size)
2931 return 0;
2932
2933 copy_to_user(user_buf, buff, size);
2934 *ppos += size;
2935 return size;
2936}
2937
2938static ssize_t paused_write(struct file *file, const char __user *user_buf,
2939 size_t len, loff_t *ppos)
2940{
2941 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2942 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2943 int num = ras_comp_int->num;
2944 union ibmvnic_crq crq;
2945 unsigned long val;
2946 char buff[9]; /* decimal max int plus \n and \0 */
2947
2948 copy_from_user(buff, user_buf, sizeof(buff));
2949 val = kstrtoul(buff, 10, NULL);
2950
2951 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2952
2953 memset(&crq, 0, sizeof(crq));
2954 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2955 crq.control_ras.cmd = CONTROL_RAS;
2956 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2957 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2958 ibmvnic_send_crq(adapter, &crq);
2959
2960 return len;
2961}
2962
2963static const struct file_operations paused_ops = {
2964 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002965 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002966 .read = paused_read,
2967 .write = paused_write,
2968};
2969
2970static ssize_t tracing_read(struct file *file, char __user *user_buf,
2971 size_t len, loff_t *ppos)
2972{
2973 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2974 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2975 int num = ras_comp_int->num;
2976 char buff[5]; /* 1 or 0 plus \n and \0 */
2977 int size;
2978
2979 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2980
2981 if (*ppos >= size)
2982 return 0;
2983
2984 copy_to_user(user_buf, buff, size);
2985 *ppos += size;
2986 return size;
2987}
2988
2989static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2990 size_t len, loff_t *ppos)
2991{
2992 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2993 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2994 int num = ras_comp_int->num;
2995 union ibmvnic_crq crq;
2996 unsigned long val;
2997 char buff[9]; /* decimal max int plus \n and \0 */
2998
2999 copy_from_user(buff, user_buf, sizeof(buff));
3000 val = kstrtoul(buff, 10, NULL);
3001
3002 memset(&crq, 0, sizeof(crq));
3003 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3004 crq.control_ras.cmd = CONTROL_RAS;
3005 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
3006 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
3007
3008 return len;
3009}
3010
3011static const struct file_operations tracing_ops = {
3012 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003013 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003014 .read = tracing_read,
3015 .write = tracing_write,
3016};
3017
3018static ssize_t error_level_read(struct file *file, char __user *user_buf,
3019 size_t len, loff_t *ppos)
3020{
3021 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3022 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3023 int num = ras_comp_int->num;
3024 char buff[5]; /* decimal max char plus \n and \0 */
3025 int size;
3026
3027 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
3028
3029 if (*ppos >= size)
3030 return 0;
3031
3032 copy_to_user(user_buf, buff, size);
3033 *ppos += size;
3034 return size;
3035}
3036
3037static ssize_t error_level_write(struct file *file, const char __user *user_buf,
3038 size_t len, loff_t *ppos)
3039{
3040 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3041 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3042 int num = ras_comp_int->num;
3043 union ibmvnic_crq crq;
3044 unsigned long val;
3045 char buff[9]; /* decimal max int plus \n and \0 */
3046
3047 copy_from_user(buff, user_buf, sizeof(buff));
3048 val = kstrtoul(buff, 10, NULL);
3049
3050 if (val > 9)
3051 val = 9;
3052
3053 memset(&crq, 0, sizeof(crq));
3054 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3055 crq.control_ras.cmd = CONTROL_RAS;
3056 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
3057 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
3058 crq.control_ras.level = val;
3059 ibmvnic_send_crq(adapter, &crq);
3060
3061 return len;
3062}
3063
3064static const struct file_operations error_level_ops = {
3065 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003066 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003067 .read = error_level_read,
3068 .write = error_level_write,
3069};
3070
3071static ssize_t trace_level_read(struct file *file, char __user *user_buf,
3072 size_t len, loff_t *ppos)
3073{
3074 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3075 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3076 int num = ras_comp_int->num;
3077 char buff[5]; /* decimal max char plus \n and \0 */
3078 int size;
3079
3080 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
3081 if (*ppos >= size)
3082 return 0;
3083
3084 copy_to_user(user_buf, buff, size);
3085 *ppos += size;
3086 return size;
3087}
3088
3089static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3090 size_t len, loff_t *ppos)
3091{
3092 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3093 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3094 union ibmvnic_crq crq;
3095 unsigned long val;
3096 char buff[9]; /* decimal max int plus \n and \0 */
3097
3098 copy_from_user(buff, user_buf, sizeof(buff));
3099 val = kstrtoul(buff, 10, NULL);
3100 if (val > 9)
3101 val = 9;
3102
3103 memset(&crq, 0, sizeof(crq));
3104 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3105 crq.control_ras.cmd = CONTROL_RAS;
3106 crq.control_ras.correlator =
3107 adapter->ras_comps[ras_comp_int->num].correlator;
3108 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3109 crq.control_ras.level = val;
3110 ibmvnic_send_crq(adapter, &crq);
3111
3112 return len;
3113}
3114
3115static const struct file_operations trace_level_ops = {
3116 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003117 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003118 .read = trace_level_read,
3119 .write = trace_level_write,
3120};
3121
3122static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3123 size_t len, loff_t *ppos)
3124{
3125 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3126 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3127 int num = ras_comp_int->num;
3128 char buff[9]; /* decimal max int plus \n and \0 */
3129 int size;
3130
3131 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3132 if (*ppos >= size)
3133 return 0;
3134
3135 copy_to_user(user_buf, buff, size);
3136 *ppos += size;
3137 return size;
3138}
3139
3140static ssize_t trace_buff_size_write(struct file *file,
3141 const char __user *user_buf, size_t len,
3142 loff_t *ppos)
3143{
3144 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3145 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3146 union ibmvnic_crq crq;
3147 unsigned long val;
3148 char buff[9]; /* decimal max int plus \n and \0 */
3149
3150 copy_from_user(buff, user_buf, sizeof(buff));
3151 val = kstrtoul(buff, 10, NULL);
3152
3153 memset(&crq, 0, sizeof(crq));
3154 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3155 crq.control_ras.cmd = CONTROL_RAS;
3156 crq.control_ras.correlator =
3157 adapter->ras_comps[ras_comp_int->num].correlator;
3158 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3159 /* trace_buff_sz is 3 bytes, stuff an int into it */
3160 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3161 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3162 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3163 ibmvnic_send_crq(adapter, &crq);
3164
3165 return len;
3166}
3167
3168static const struct file_operations trace_size_ops = {
3169 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003170 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003171 .read = trace_buff_size_read,
3172 .write = trace_buff_size_write,
3173};
3174
3175static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3176 struct ibmvnic_adapter *adapter)
3177{
3178 struct device *dev = &adapter->vdev->dev;
3179 struct dentry *dir_ent;
3180 struct dentry *ent;
3181 int i;
3182
3183 debugfs_remove_recursive(adapter->ras_comps_ent);
3184
3185 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3186 adapter->debugfs_dir);
3187 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3188 dev_info(dev, "debugfs create ras_comps dir failed\n");
3189 return;
3190 }
3191
3192 for (i = 0; i < adapter->ras_comp_num; i++) {
3193 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3194 adapter->ras_comps_ent);
3195 if (!dir_ent || IS_ERR(dir_ent)) {
3196 dev_info(dev, "debugfs create %s dir failed\n",
3197 adapter->ras_comps[i].name);
3198 continue;
3199 }
3200
3201 adapter->ras_comp_int[i].adapter = adapter;
3202 adapter->ras_comp_int[i].num = i;
3203 adapter->ras_comp_int[i].desc_blob.data =
3204 &adapter->ras_comps[i].description;
3205 adapter->ras_comp_int[i].desc_blob.size =
3206 sizeof(adapter->ras_comps[i].description);
3207
3208 /* Don't need to remember the dentry's because the debugfs dir
3209 * gets removed recursively
3210 */
3211 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3212 &adapter->ras_comp_int[i].desc_blob);
3213 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3214 dir_ent, &adapter->ras_comp_int[i],
3215 &trace_size_ops);
3216 ent = debugfs_create_file("trace_level",
3217 S_IRUGO |
3218 (adapter->ras_comps[i].trace_level !=
3219 0xFF ? S_IWUSR : 0),
3220 dir_ent, &adapter->ras_comp_int[i],
3221 &trace_level_ops);
3222 ent = debugfs_create_file("error_level",
3223 S_IRUGO |
3224 (adapter->
3225 ras_comps[i].error_check_level !=
3226 0xFF ? S_IWUSR : 0),
3227 dir_ent, &adapter->ras_comp_int[i],
3228 &trace_level_ops);
3229 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3230 dir_ent, &adapter->ras_comp_int[i],
3231 &tracing_ops);
3232 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3233 dir_ent, &adapter->ras_comp_int[i],
3234 &paused_ops);
3235 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3236 &adapter->ras_comp_int[i],
3237 &trace_ops);
3238 }
3239}
3240
3241static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3242 struct ibmvnic_adapter *adapter)
3243{
3244 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3245 struct device *dev = &adapter->vdev->dev;
3246 union ibmvnic_crq newcrq;
3247
3248 adapter->ras_comps = dma_alloc_coherent(dev, len,
3249 &adapter->ras_comps_tok,
3250 GFP_KERNEL);
3251 if (!adapter->ras_comps) {
3252 if (!firmware_has_feature(FW_FEATURE_CMO))
3253 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3254 return;
3255 }
3256
3257 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3258 sizeof(struct ibmvnic_fw_comp_internal),
3259 GFP_KERNEL);
3260 if (!adapter->ras_comp_int)
3261 dma_free_coherent(dev, len, adapter->ras_comps,
3262 adapter->ras_comps_tok);
3263
3264 memset(&newcrq, 0, sizeof(newcrq));
3265 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3266 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3267 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3268 newcrq.request_ras_comps.len = cpu_to_be32(len);
3269 ibmvnic_send_crq(adapter, &newcrq);
3270}
3271
3272static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3273{
Wei Yongjun96183182016-06-27 20:48:53 +08003274 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003275 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003276 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003277 unsigned long flags;
3278 unsigned long flags2;
3279
3280 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003281 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003282 switch (inflight_cmd->crq.generic.cmd) {
3283 case LOGIN:
3284 dma_unmap_single(dev, adapter->login_buf_token,
3285 adapter->login_buf_sz,
3286 DMA_BIDIRECTIONAL);
3287 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3288 adapter->login_rsp_buf_sz,
3289 DMA_BIDIRECTIONAL);
3290 kfree(adapter->login_rsp_buf);
3291 kfree(adapter->login_buf);
3292 break;
3293 case REQUEST_DUMP:
3294 complete(&adapter->fw_done);
3295 break;
3296 case REQUEST_ERROR_INFO:
3297 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08003298 list_for_each_entry_safe(error_buff, tmp2,
3299 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003300 dma_unmap_single(dev, error_buff->dma,
3301 error_buff->len,
3302 DMA_FROM_DEVICE);
3303 kfree(error_buff->buff);
3304 list_del(&error_buff->list);
3305 kfree(error_buff);
3306 }
3307 spin_unlock_irqrestore(&adapter->error_list_lock,
3308 flags2);
3309 break;
3310 }
3311 list_del(&inflight_cmd->list);
3312 kfree(inflight_cmd);
3313 }
3314 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3315}
3316
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003317static void ibmvnic_xport_event(struct work_struct *work)
3318{
3319 struct ibmvnic_adapter *adapter = container_of(work,
3320 struct ibmvnic_adapter,
3321 ibmvnic_xport);
3322 struct device *dev = &adapter->vdev->dev;
3323 long rc;
3324
3325 ibmvnic_free_inflight(adapter);
3326 release_sub_crqs(adapter);
3327 if (adapter->migrated) {
3328 rc = ibmvnic_reenable_crq_queue(adapter);
3329 if (rc)
3330 dev_err(dev, "Error after enable rc=%ld\n", rc);
3331 adapter->migrated = false;
3332 rc = ibmvnic_send_crq_init(adapter);
3333 if (rc)
3334 dev_err(dev, "Error sending init rc=%ld\n", rc);
3335 }
3336}
3337
Thomas Falcon032c5e82015-12-21 11:26:06 -06003338static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3339 struct ibmvnic_adapter *adapter)
3340{
3341 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3342 struct net_device *netdev = adapter->netdev;
3343 struct device *dev = &adapter->vdev->dev;
3344 long rc;
3345
3346 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3347 ((unsigned long int *)crq)[0],
3348 ((unsigned long int *)crq)[1]);
3349 switch (gen_crq->first) {
3350 case IBMVNIC_CRQ_INIT_RSP:
3351 switch (gen_crq->cmd) {
3352 case IBMVNIC_CRQ_INIT:
3353 dev_info(dev, "Partner initialized\n");
3354 /* Send back a response */
3355 rc = ibmvnic_send_crq_init_complete(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003356 if (!rc)
3357 schedule_work(&adapter->vnic_crq_init);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003358 else
3359 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3360 break;
3361 case IBMVNIC_CRQ_INIT_COMPLETE:
3362 dev_info(dev, "Partner initialization complete\n");
3363 send_version_xchg(adapter);
3364 break;
3365 default:
3366 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3367 }
3368 return;
3369 case IBMVNIC_CRQ_XPORT_EVENT:
3370 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3371 dev_info(dev, "Re-enabling adapter\n");
3372 adapter->migrated = true;
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003373 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003374 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3375 dev_info(dev, "Backing device failover detected\n");
3376 netif_carrier_off(netdev);
3377 adapter->failover = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003378 } else {
3379 /* The adapter lost the connection */
3380 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3381 gen_crq->cmd);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003382 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003383 }
3384 return;
3385 case IBMVNIC_CRQ_CMD_RSP:
3386 break;
3387 default:
3388 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3389 gen_crq->first);
3390 return;
3391 }
3392
3393 switch (gen_crq->cmd) {
3394 case VERSION_EXCHANGE_RSP:
3395 rc = crq->version_exchange_rsp.rc.code;
3396 if (rc) {
3397 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3398 break;
3399 }
3400 dev_info(dev, "Partner protocol version is %d\n",
3401 crq->version_exchange_rsp.version);
3402 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3403 ibmvnic_version)
3404 ibmvnic_version =
3405 be16_to_cpu(crq->version_exchange_rsp.version);
3406 send_cap_queries(adapter);
3407 break;
3408 case QUERY_CAPABILITY_RSP:
3409 handle_query_cap_rsp(crq, adapter);
3410 break;
3411 case QUERY_MAP_RSP:
3412 handle_query_map_rsp(crq, adapter);
3413 break;
3414 case REQUEST_MAP_RSP:
3415 handle_request_map_rsp(crq, adapter);
3416 break;
3417 case REQUEST_UNMAP_RSP:
3418 handle_request_unmap_rsp(crq, adapter);
3419 break;
3420 case REQUEST_CAPABILITY_RSP:
3421 handle_request_cap_rsp(crq, adapter);
3422 break;
3423 case LOGIN_RSP:
3424 netdev_dbg(netdev, "Got Login Response\n");
3425 handle_login_rsp(crq, adapter);
3426 break;
3427 case LOGICAL_LINK_STATE_RSP:
3428 netdev_dbg(netdev, "Got Logical Link State Response\n");
3429 adapter->logical_link_state =
3430 crq->logical_link_state_rsp.link_state;
3431 break;
3432 case LINK_STATE_INDICATION:
3433 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3434 adapter->phys_link_state =
3435 crq->link_state_indication.phys_link_state;
3436 adapter->logical_link_state =
3437 crq->link_state_indication.logical_link_state;
3438 break;
3439 case CHANGE_MAC_ADDR_RSP:
3440 netdev_dbg(netdev, "Got MAC address change Response\n");
3441 handle_change_mac_rsp(crq, adapter);
3442 break;
3443 case ERROR_INDICATION:
3444 netdev_dbg(netdev, "Got Error Indication\n");
3445 handle_error_indication(crq, adapter);
3446 break;
3447 case REQUEST_ERROR_RSP:
3448 netdev_dbg(netdev, "Got Error Detail Response\n");
3449 handle_error_info_rsp(crq, adapter);
3450 break;
3451 case REQUEST_STATISTICS_RSP:
3452 netdev_dbg(netdev, "Got Statistics Response\n");
3453 complete(&adapter->stats_done);
3454 break;
3455 case REQUEST_DUMP_SIZE_RSP:
3456 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3457 handle_dump_size_rsp(crq, adapter);
3458 break;
3459 case REQUEST_DUMP_RSP:
3460 netdev_dbg(netdev, "Got Request Dump Response\n");
3461 complete(&adapter->fw_done);
3462 break;
3463 case QUERY_IP_OFFLOAD_RSP:
3464 netdev_dbg(netdev, "Got Query IP offload Response\n");
3465 handle_query_ip_offload_rsp(adapter);
3466 break;
3467 case MULTICAST_CTRL_RSP:
3468 netdev_dbg(netdev, "Got multicast control Response\n");
3469 break;
3470 case CONTROL_IP_OFFLOAD_RSP:
3471 netdev_dbg(netdev, "Got Control IP offload Response\n");
3472 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3473 sizeof(adapter->ip_offload_ctrl),
3474 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05003475 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003476 break;
3477 case REQUEST_RAS_COMP_NUM_RSP:
3478 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3479 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3480 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3481 break;
3482 }
3483 adapter->ras_comp_num =
3484 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3485 handle_request_ras_comp_num_rsp(crq, adapter);
3486 break;
3487 case REQUEST_RAS_COMPS_RSP:
3488 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3489 handle_request_ras_comps_rsp(crq, adapter);
3490 break;
3491 case CONTROL_RAS_RSP:
3492 netdev_dbg(netdev, "Got Control RAS Response\n");
3493 handle_control_ras_rsp(crq, adapter);
3494 break;
3495 case COLLECT_FW_TRACE_RSP:
3496 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3497 complete(&adapter->fw_done);
3498 break;
3499 default:
3500 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3501 gen_crq->cmd);
3502 }
3503}
3504
3505static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3506{
3507 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06003508 unsigned long flags;
3509
3510 spin_lock_irqsave(&adapter->crq.lock, flags);
3511 vio_disable_interrupts(adapter->vdev);
3512 tasklet_schedule(&adapter->tasklet);
3513 spin_unlock_irqrestore(&adapter->crq.lock, flags);
3514 return IRQ_HANDLED;
3515}
3516
3517static void ibmvnic_tasklet(void *data)
3518{
3519 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003520 struct ibmvnic_crq_queue *queue = &adapter->crq;
3521 struct vio_dev *vdev = adapter->vdev;
3522 union ibmvnic_crq *crq;
3523 unsigned long flags;
3524 bool done = false;
3525
3526 spin_lock_irqsave(&queue->lock, flags);
3527 vio_disable_interrupts(vdev);
3528 while (!done) {
3529 /* Pull all the valid messages off the CRQ */
3530 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3531 ibmvnic_handle_crq(crq, adapter);
3532 crq->generic.first = 0;
3533 }
3534 vio_enable_interrupts(vdev);
3535 crq = ibmvnic_next_crq(adapter);
3536 if (crq) {
3537 vio_disable_interrupts(vdev);
3538 ibmvnic_handle_crq(crq, adapter);
3539 crq->generic.first = 0;
3540 } else {
Thomas Falcon249168a2017-02-15 12:18:00 -06003541 /* remain in tasklet until all
3542 * capabilities responses are received
3543 */
3544 if (!adapter->wait_capability)
3545 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003546 }
3547 }
Thomas Falcon249168a2017-02-15 12:18:00 -06003548 /* if capabilities CRQ's were sent in this tasklet, the following
3549 * tasklet must wait until all responses are received
3550 */
3551 if (atomic_read(&adapter->running_cap_crqs) != 0)
3552 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003553 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003554}
3555
3556static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3557{
3558 struct vio_dev *vdev = adapter->vdev;
3559 int rc;
3560
3561 do {
3562 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3563 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3564
3565 if (rc)
3566 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3567
3568 return rc;
3569}
3570
3571static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3572{
3573 struct ibmvnic_crq_queue *crq = &adapter->crq;
3574 struct device *dev = &adapter->vdev->dev;
3575 struct vio_dev *vdev = adapter->vdev;
3576 int rc;
3577
3578 /* Close the CRQ */
3579 do {
3580 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3581 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3582
3583 /* Clean out the queue */
3584 memset(crq->msgs, 0, PAGE_SIZE);
3585 crq->cur = 0;
3586
3587 /* And re-open it again */
3588 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3589 crq->msg_token, PAGE_SIZE);
3590
3591 if (rc == H_CLOSED)
3592 /* Adapter is good, but other end is not ready */
3593 dev_warn(dev, "Partner adapter not ready\n");
3594 else if (rc != 0)
3595 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3596
3597 return rc;
3598}
3599
3600static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3601{
3602 struct ibmvnic_crq_queue *crq = &adapter->crq;
3603 struct vio_dev *vdev = adapter->vdev;
3604 long rc;
3605
3606 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3607 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06003608 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003609 do {
3610 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3611 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3612
3613 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3614 DMA_BIDIRECTIONAL);
3615 free_page((unsigned long)crq->msgs);
3616}
3617
3618static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3619{
3620 struct ibmvnic_crq_queue *crq = &adapter->crq;
3621 struct device *dev = &adapter->vdev->dev;
3622 struct vio_dev *vdev = adapter->vdev;
3623 int rc, retrc = -ENOMEM;
3624
3625 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3626 /* Should we allocate more than one page? */
3627
3628 if (!crq->msgs)
3629 return -ENOMEM;
3630
3631 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3632 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3633 DMA_BIDIRECTIONAL);
3634 if (dma_mapping_error(dev, crq->msg_token))
3635 goto map_failed;
3636
3637 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3638 crq->msg_token, PAGE_SIZE);
3639
3640 if (rc == H_RESOURCE)
3641 /* maybe kexecing and resource is busy. try a reset */
3642 rc = ibmvnic_reset_crq(adapter);
3643 retrc = rc;
3644
3645 if (rc == H_CLOSED) {
3646 dev_warn(dev, "Partner adapter not ready\n");
3647 } else if (rc) {
3648 dev_warn(dev, "Error %d opening adapter\n", rc);
3649 goto reg_crq_failed;
3650 }
3651
3652 retrc = 0;
3653
Thomas Falcon6c267b32017-02-15 12:17:58 -06003654 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3655 (unsigned long)adapter);
3656
Thomas Falcon032c5e82015-12-21 11:26:06 -06003657 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3658 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3659 adapter);
3660 if (rc) {
3661 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3662 vdev->irq, rc);
3663 goto req_irq_failed;
3664 }
3665
3666 rc = vio_enable_interrupts(vdev);
3667 if (rc) {
3668 dev_err(dev, "Error %d enabling interrupts\n", rc);
3669 goto req_irq_failed;
3670 }
3671
3672 crq->cur = 0;
3673 spin_lock_init(&crq->lock);
3674
3675 return retrc;
3676
3677req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06003678 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003679 do {
3680 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3681 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3682reg_crq_failed:
3683 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3684map_failed:
3685 free_page((unsigned long)crq->msgs);
3686 return retrc;
3687}
3688
3689/* debugfs for dump */
3690static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3691{
3692 struct net_device *netdev = seq->private;
3693 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3694 struct device *dev = &adapter->vdev->dev;
3695 union ibmvnic_crq crq;
3696
3697 memset(&crq, 0, sizeof(crq));
3698 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3699 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003700
3701 init_completion(&adapter->fw_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003702 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003703 wait_for_completion(&adapter->fw_done);
3704
3705 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3706
3707 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3708 DMA_BIDIRECTIONAL);
3709
3710 kfree(adapter->dump_data);
3711
3712 return 0;
3713}
3714
3715static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3716{
3717 return single_open(file, ibmvnic_dump_show, inode->i_private);
3718}
3719
3720static const struct file_operations ibmvnic_dump_ops = {
3721 .owner = THIS_MODULE,
3722 .open = ibmvnic_dump_open,
3723 .read = seq_read,
3724 .llseek = seq_lseek,
3725 .release = single_release,
3726};
3727
Thomas Falcon65dc6892016-07-06 15:35:18 -05003728static void handle_crq_init_rsp(struct work_struct *work)
3729{
3730 struct ibmvnic_adapter *adapter = container_of(work,
3731 struct ibmvnic_adapter,
3732 vnic_crq_init);
3733 struct device *dev = &adapter->vdev->dev;
3734 struct net_device *netdev = adapter->netdev;
3735 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003736 bool restart = false;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003737 int rc;
3738
Thomas Falcondfad09a2016-08-18 11:37:51 -05003739 if (adapter->failover) {
3740 release_sub_crqs(adapter);
3741 if (netif_running(netdev)) {
3742 netif_tx_disable(netdev);
3743 ibmvnic_close(netdev);
3744 restart = true;
3745 }
3746 }
3747
Thomas Falcon65dc6892016-07-06 15:35:18 -05003748 reinit_completion(&adapter->init_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05003749 send_version_xchg(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003750 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3751 dev_err(dev, "Passive init timeout\n");
3752 goto task_failed;
3753 }
3754
Thomas Falconf39f0d12017-02-14 10:22:59 -06003755 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003756
Thomas Falcondfad09a2016-08-18 11:37:51 -05003757 if (adapter->failover) {
3758 adapter->failover = false;
3759 if (restart) {
3760 rc = ibmvnic_open(netdev);
3761 if (rc)
3762 goto restart_failed;
3763 }
3764 netif_carrier_on(netdev);
3765 return;
3766 }
3767
Thomas Falcon65dc6892016-07-06 15:35:18 -05003768 rc = register_netdev(netdev);
3769 if (rc) {
3770 dev_err(dev,
3771 "failed to register netdev rc=%d\n", rc);
3772 goto register_failed;
3773 }
3774 dev_info(dev, "ibmvnic registered\n");
3775
3776 return;
3777
Thomas Falcondfad09a2016-08-18 11:37:51 -05003778restart_failed:
3779 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003780register_failed:
3781 release_sub_crqs(adapter);
3782task_failed:
3783 dev_err(dev, "Passive initialization was not successful\n");
3784}
3785
John Allenf6ef6402017-03-17 17:13:42 -05003786static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3787{
3788 struct device *dev = &adapter->vdev->dev;
3789 unsigned long timeout = msecs_to_jiffies(30000);
3790 struct dentry *ent;
3791 char buf[17]; /* debugfs name buf */
3792 int rc;
3793
3794 rc = ibmvnic_init_crq_queue(adapter);
3795 if (rc) {
3796 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3797 return rc;
3798 }
3799
3800 adapter->stats_token = dma_map_single(dev, &adapter->stats,
3801 sizeof(struct ibmvnic_statistics),
3802 DMA_FROM_DEVICE);
3803 if (dma_mapping_error(dev, adapter->stats_token)) {
3804 ibmvnic_release_crq_queue(adapter);
3805 dev_err(dev, "Couldn't map stats buffer\n");
3806 return -ENOMEM;
3807 }
3808
3809 snprintf(buf, sizeof(buf), "ibmvnic_%x", adapter->vdev->unit_address);
3810 ent = debugfs_create_dir(buf, NULL);
3811 if (!ent || IS_ERR(ent)) {
3812 dev_info(dev, "debugfs create directory failed\n");
3813 adapter->debugfs_dir = NULL;
3814 } else {
3815 adapter->debugfs_dir = ent;
3816 ent = debugfs_create_file("dump", S_IRUGO,
3817 adapter->debugfs_dir,
3818 adapter->netdev, &ibmvnic_dump_ops);
3819 if (!ent || IS_ERR(ent)) {
3820 dev_info(dev, "debugfs create dump file failed\n");
3821 adapter->debugfs_dump = NULL;
3822 } else {
3823 adapter->debugfs_dump = ent;
3824 }
3825 }
3826
3827 init_completion(&adapter->init_done);
3828 ibmvnic_send_crq_init(adapter);
3829 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3830 dev_err(dev, "Initialization sequence timed out\n");
3831 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3832 debugfs_remove_recursive(adapter->debugfs_dir);
3833 ibmvnic_release_crq_queue(adapter);
3834 return -1;
3835 }
3836
3837 return 0;
3838}
3839
Thomas Falcon032c5e82015-12-21 11:26:06 -06003840static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3841{
3842 struct ibmvnic_adapter *adapter;
3843 struct net_device *netdev;
3844 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003845 int rc;
3846
3847 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3848 dev->unit_address);
3849
3850 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3851 VETH_MAC_ADDR, NULL);
3852 if (!mac_addr_p) {
3853 dev_err(&dev->dev,
3854 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3855 __FILE__, __LINE__);
3856 return 0;
3857 }
3858
3859 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3860 IBMVNIC_MAX_TX_QUEUES);
3861 if (!netdev)
3862 return -ENOMEM;
3863
3864 adapter = netdev_priv(netdev);
3865 dev_set_drvdata(&dev->dev, netdev);
3866 adapter->vdev = dev;
3867 adapter->netdev = netdev;
Thomas Falcondfad09a2016-08-18 11:37:51 -05003868 adapter->failover = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003869
3870 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3871 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3872 netdev->irq = dev->irq;
3873 netdev->netdev_ops = &ibmvnic_netdev_ops;
3874 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3875 SET_NETDEV_DEV(netdev, &dev->dev);
3876
Thomas Falcon65dc6892016-07-06 15:35:18 -05003877 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003878 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003879
Thomas Falcon032c5e82015-12-21 11:26:06 -06003880 spin_lock_init(&adapter->stats_lock);
3881
Thomas Falcon032c5e82015-12-21 11:26:06 -06003882 INIT_LIST_HEAD(&adapter->errors);
3883 INIT_LIST_HEAD(&adapter->inflight);
3884 spin_lock_init(&adapter->error_list_lock);
3885 spin_lock_init(&adapter->inflight_lock);
3886
John Allenf6ef6402017-03-17 17:13:42 -05003887 rc = ibmvnic_init(adapter);
3888 if (rc) {
3889 free_netdev(netdev);
3890 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003891 }
3892
Thomas Falconf39f0d12017-02-14 10:22:59 -06003893 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003894
3895 rc = register_netdev(netdev);
3896 if (rc) {
3897 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
John Allenf6ef6402017-03-17 17:13:42 -05003898 free_netdev(netdev);
3899 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003900 }
3901 dev_info(&dev->dev, "ibmvnic registered\n");
3902
3903 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003904}
3905
3906static int ibmvnic_remove(struct vio_dev *dev)
3907{
3908 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3909 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3910
3911 unregister_netdev(netdev);
3912
3913 release_sub_crqs(adapter);
3914
3915 ibmvnic_release_crq_queue(adapter);
3916
3917 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3918 debugfs_remove_recursive(adapter->debugfs_dir);
3919
Thomas Falconb7f193d2016-11-11 11:00:45 -06003920 dma_unmap_single(&dev->dev, adapter->stats_token,
3921 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3922
Thomas Falcon032c5e82015-12-21 11:26:06 -06003923 if (adapter->ras_comps)
3924 dma_free_coherent(&dev->dev,
3925 adapter->ras_comp_num *
3926 sizeof(struct ibmvnic_fw_component),
3927 adapter->ras_comps, adapter->ras_comps_tok);
3928
3929 kfree(adapter->ras_comp_int);
3930
3931 free_netdev(netdev);
3932 dev_set_drvdata(&dev->dev, NULL);
3933
3934 return 0;
3935}
3936
3937static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3938{
3939 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3940 struct ibmvnic_adapter *adapter;
3941 struct iommu_table *tbl;
3942 unsigned long ret = 0;
3943 int i;
3944
3945 tbl = get_iommu_table_base(&vdev->dev);
3946
3947 /* netdev inits at probe time along with the structures we need below*/
3948 if (!netdev)
3949 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3950
3951 adapter = netdev_priv(netdev);
3952
3953 ret += PAGE_SIZE; /* the crq message queue */
3954 ret += adapter->bounce_buffer_size;
3955 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3956
3957 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3958 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3959
3960 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3961 i++)
3962 ret += adapter->rx_pool[i].size *
3963 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3964
3965 return ret;
3966}
3967
3968static int ibmvnic_resume(struct device *dev)
3969{
3970 struct net_device *netdev = dev_get_drvdata(dev);
3971 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3972 int i;
3973
3974 /* kick the interrupt handlers just in case we lost an interrupt */
3975 for (i = 0; i < adapter->req_rx_queues; i++)
3976 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3977 adapter->rx_scrq[i]);
3978
3979 return 0;
3980}
3981
3982static struct vio_device_id ibmvnic_device_table[] = {
3983 {"network", "IBM,vnic"},
3984 {"", "" }
3985};
3986MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3987
3988static const struct dev_pm_ops ibmvnic_pm_ops = {
3989 .resume = ibmvnic_resume
3990};
3991
3992static struct vio_driver ibmvnic_driver = {
3993 .id_table = ibmvnic_device_table,
3994 .probe = ibmvnic_probe,
3995 .remove = ibmvnic_remove,
3996 .get_desired_dma = ibmvnic_get_desired_dma,
3997 .name = ibmvnic_driver_name,
3998 .pm = &ibmvnic_pm_ops,
3999};
4000
4001/* module functions */
4002static int __init ibmvnic_module_init(void)
4003{
4004 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4005 IBMVNIC_DRIVER_VERSION);
4006
4007 return vio_register_driver(&ibmvnic_driver);
4008}
4009
4010static void __exit ibmvnic_module_exit(void)
4011{
4012 vio_unregister_driver(&ibmvnic_driver);
4013}
4014
4015module_init(ibmvnic_module_init);
4016module_exit(ibmvnic_module_exit);