blob: 648f3fda68ab35b9929d74e32b822f7ad45cda16 [file] [log] [blame]
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -07001/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/if_ether.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -070026#include "vnic_resource.h"
27#include "vnic_devcmd.h"
28#include "vnic_dev.h"
29#include "vnic_stats.h"
Satish Kharat6c10cb42019-01-18 14:51:41 -080030#include "vnic_wq.h"
31
32struct devcmd2_controller {
33 struct vnic_wq_ctrl *wq_ctrl;
34 struct vnic_dev_ring results_ring;
35 struct vnic_wq wq;
36 struct vnic_devcmd2 *cmd_ring;
37 struct devcmd2_result *result;
38 u16 next_result;
39 u16 result_size;
40 int color;
41};
42
43enum vnic_proxy_type {
44 PROXY_NONE,
45 PROXY_BY_BDF,
46 PROXY_BY_INDEX,
47};
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -070048
49struct vnic_res {
50 void __iomem *vaddr;
51 unsigned int count;
52};
53
54struct vnic_dev {
55 void *priv;
56 struct pci_dev *pdev;
57 struct vnic_res res[RES_TYPE_MAX];
58 enum vnic_dev_intr_mode intr_mode;
59 struct vnic_devcmd __iomem *devcmd;
60 struct vnic_devcmd_notify *notify;
61 struct vnic_devcmd_notify notify_copy;
62 dma_addr_t notify_pa;
63 u32 *linkstatus;
64 dma_addr_t linkstatus_pa;
65 struct vnic_stats *stats;
66 dma_addr_t stats_pa;
67 struct vnic_devcmd_fw_info *fw_info;
68 dma_addr_t fw_info_pa;
Satish Kharat6c10cb42019-01-18 14:51:41 -080069 enum vnic_proxy_type proxy;
70 u32 proxy_index;
71 u64 args[VNIC_DEVCMD_NARGS];
72 struct devcmd2_controller *devcmd2;
73 int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
74 int wait);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -070075};
76
77#define VNIC_MAX_RES_HDR_SIZE \
78 (sizeof(struct vnic_resource_header) + \
79 sizeof(struct vnic_resource) * RES_TYPE_MAX)
80#define VNIC_RES_STRIDE 128
81
82void *vnic_dev_priv(struct vnic_dev *vdev)
83{
84 return vdev->priv;
85}
86
87static int vnic_dev_discover_res(struct vnic_dev *vdev,
88 struct vnic_dev_bar *bar)
89{
90 struct vnic_resource_header __iomem *rh;
91 struct vnic_resource __iomem *r;
92 u8 type;
93
94 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
95 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
96 return -EINVAL;
97 }
98
99 rh = bar->vaddr;
100 if (!rh) {
101 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
102 return -EINVAL;
103 }
104
105 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
106 ioread32(&rh->version) != VNIC_RES_VERSION) {
107 printk(KERN_ERR "vNIC BAR0 res magic/version error "
108 "exp (%lx/%lx) curr (%x/%x)\n",
109 VNIC_RES_MAGIC, VNIC_RES_VERSION,
110 ioread32(&rh->magic), ioread32(&rh->version));
111 return -EINVAL;
112 }
113
114 r = (struct vnic_resource __iomem *)(rh + 1);
115
116 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
117
118 u8 bar_num = ioread8(&r->bar);
119 u32 bar_offset = ioread32(&r->bar_offset);
120 u32 count = ioread32(&r->count);
121 u32 len;
122
123 r++;
124
125 if (bar_num != 0) /* only mapping in BAR0 resources */
126 continue;
127
128 switch (type) {
129 case RES_TYPE_WQ:
130 case RES_TYPE_RQ:
131 case RES_TYPE_CQ:
132 case RES_TYPE_INTR_CTRL:
133 /* each count is stride bytes long */
134 len = count * VNIC_RES_STRIDE;
135 if (len + bar_offset > bar->len) {
136 printk(KERN_ERR "vNIC BAR0 resource %d "
137 "out-of-bounds, offset 0x%x + "
138 "size 0x%x > bar len 0x%lx\n",
139 type, bar_offset,
140 len,
141 bar->len);
142 return -EINVAL;
143 }
144 break;
145 case RES_TYPE_INTR_PBA_LEGACY:
Satish Kharat6c10cb42019-01-18 14:51:41 -0800146 case RES_TYPE_DEVCMD2:
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700147 case RES_TYPE_DEVCMD:
148 len = count;
149 break;
150 default:
151 continue;
152 }
153
154 vdev->res[type].count = count;
155 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
156 }
157
158 return 0;
159}
160
161unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
162 enum vnic_res_type type)
163{
164 return vdev->res[type].count;
165}
166
167void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
168 unsigned int index)
169{
170 if (!vdev->res[type].vaddr)
171 return NULL;
172
173 switch (type) {
174 case RES_TYPE_WQ:
175 case RES_TYPE_RQ:
176 case RES_TYPE_CQ:
177 case RES_TYPE_INTR_CTRL:
178 return (char __iomem *)vdev->res[type].vaddr +
179 index * VNIC_RES_STRIDE;
180 default:
181 return (char __iomem *)vdev->res[type].vaddr;
182 }
183}
184
185unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
186 unsigned int desc_count,
187 unsigned int desc_size)
188{
189 /* The base address of the desc rings must be 512 byte aligned.
190 * Descriptor count is aligned to groups of 32 descriptors. A
191 * count of 0 means the maximum 4096 descriptors. Descriptor
192 * size is aligned to 16 bytes.
193 */
194
195 unsigned int count_align = 32;
196 unsigned int desc_align = 16;
197
198 ring->base_align = 512;
199
200 if (desc_count == 0)
201 desc_count = 4096;
202
203 ring->desc_count = ALIGN(desc_count, count_align);
204
205 ring->desc_size = ALIGN(desc_size, desc_align);
206
207 ring->size = ring->desc_count * ring->desc_size;
208 ring->size_unaligned = ring->size + ring->base_align;
209
210 return ring->size_unaligned;
211}
212
213void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
214{
215 memset(ring->descs, 0, ring->size);
216}
217
218int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
219 unsigned int desc_count, unsigned int desc_size)
220{
221 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
222
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200223 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700224 ring->size_unaligned,
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200225 &ring->base_addr_unaligned, GFP_KERNEL);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700226
227 if (!ring->descs_unaligned) {
228 printk(KERN_ERR
229 "Failed to allocate ring (size=%d), aborting\n",
230 (int)ring->size);
231 return -ENOMEM;
232 }
233
234 ring->base_addr = ALIGN(ring->base_addr_unaligned,
235 ring->base_align);
236 ring->descs = (u8 *)ring->descs_unaligned +
237 (ring->base_addr - ring->base_addr_unaligned);
238
239 vnic_dev_clear_desc_ring(ring);
240
241 ring->desc_avail = ring->desc_count - 1;
242
243 return 0;
244}
245
246void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
247{
248 if (ring->descs) {
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200249 dma_free_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700250 ring->size_unaligned,
251 ring->descs_unaligned,
252 ring->base_addr_unaligned);
253 ring->descs = NULL;
254 }
255}
256
Satish Kharat6c10cb42019-01-18 14:51:41 -0800257int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700258{
259 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
260 int delay;
261 u32 status;
262 int dev_cmd_err[] = {
263 /* convert from fw's version of error.h to host's version */
264 0, /* ERR_SUCCESS */
265 EINVAL, /* ERR_EINVAL */
266 EFAULT, /* ERR_EFAULT */
267 EPERM, /* ERR_EPERM */
268 EBUSY, /* ERR_EBUSY */
269 };
270 int err;
Satish Kharat6c10cb42019-01-18 14:51:41 -0800271 u64 *a0 = &vdev->args[0];
272 u64 *a1 = &vdev->args[1];
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700273
274 status = ioread32(&devcmd->status);
275 if (status & STAT_BUSY) {
276 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
277 return -EBUSY;
278 }
279
280 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
281 writeq(*a0, &devcmd->args[0]);
282 writeq(*a1, &devcmd->args[1]);
283 wmb();
284 }
285
286 iowrite32(cmd, &devcmd->cmd);
287
288 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
289 return 0;
290
291 for (delay = 0; delay < wait; delay++) {
292
293 udelay(100);
294
295 status = ioread32(&devcmd->status);
296 if (!(status & STAT_BUSY)) {
297
298 if (status & STAT_ERROR) {
299 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
300 printk(KERN_ERR "Error %d devcmd %d\n",
301 err, _CMD_N(cmd));
302 return -err;
303 }
304
305 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
306 rmb();
307 *a0 = readq(&devcmd->args[0]);
308 *a1 = readq(&devcmd->args[1]);
309 }
310
311 return 0;
312 }
313 }
314
315 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
316 return -ETIMEDOUT;
317}
318
319int vnic_dev_fw_info(struct vnic_dev *vdev,
320 struct vnic_devcmd_fw_info **fw_info)
321{
322 u64 a0, a1 = 0;
323 int wait = 1000;
324 int err = 0;
325
326 if (!vdev->fw_info) {
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200327 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700328 sizeof(struct vnic_devcmd_fw_info),
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200329 &vdev->fw_info_pa, GFP_KERNEL);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700330 if (!vdev->fw_info)
331 return -ENOMEM;
332
333 a0 = vdev->fw_info_pa;
334
335 /* only get fw_info once and cache it */
336 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
337 }
338
339 *fw_info = vdev->fw_info;
340
341 return err;
342}
343
344int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
345 void *value)
346{
347 u64 a0, a1;
348 int wait = 1000;
349 int err;
350
351 a0 = offset;
352 a1 = size;
353
354 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
355
356 switch (size) {
357 case 1:
358 *(u8 *)value = (u8)a0;
359 break;
360 case 2:
361 *(u16 *)value = (u16)a0;
362 break;
363 case 4:
364 *(u32 *)value = (u32)a0;
365 break;
366 case 8:
367 *(u64 *)value = a0;
368 break;
369 default:
370 BUG();
371 break;
372 }
373
374 return err;
375}
376
377int vnic_dev_stats_clear(struct vnic_dev *vdev)
378{
379 u64 a0 = 0, a1 = 0;
380 int wait = 1000;
381 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
382}
383
384int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
385{
386 u64 a0, a1;
387 int wait = 1000;
388
389 if (!vdev->stats) {
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200390 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
391 sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700392 if (!vdev->stats)
393 return -ENOMEM;
394 }
395
396 *stats = vdev->stats;
397 a0 = vdev->stats_pa;
398 a1 = sizeof(struct vnic_stats);
399
400 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
401}
402
403int vnic_dev_close(struct vnic_dev *vdev)
404{
405 u64 a0 = 0, a1 = 0;
406 int wait = 1000;
407 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
408}
409
410int vnic_dev_enable(struct vnic_dev *vdev)
411{
412 u64 a0 = 0, a1 = 0;
413 int wait = 1000;
414 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
415}
416
417int vnic_dev_disable(struct vnic_dev *vdev)
418{
419 u64 a0 = 0, a1 = 0;
420 int wait = 1000;
421 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
422}
423
424int vnic_dev_open(struct vnic_dev *vdev, int arg)
425{
426 u64 a0 = (u32)arg, a1 = 0;
427 int wait = 1000;
428 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
429}
430
431int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
432{
433 u64 a0 = 0, a1 = 0;
434 int wait = 1000;
435 int err;
436
437 *done = 0;
438
439 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
440 if (err)
441 return err;
442
443 *done = (a0 == 0);
444
445 return 0;
446}
447
448int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
449{
450 u64 a0 = (u32)arg, a1 = 0;
451 int wait = 1000;
452 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
453}
454
455int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
456{
457 u64 a0 = 0, a1 = 0;
458 int wait = 1000;
459 int err;
460
461 *done = 0;
462
463 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
464 if (err)
465 return err;
466
467 *done = (a0 == 0);
468
469 return 0;
470}
471
472int vnic_dev_hang_notify(struct vnic_dev *vdev)
473{
474 u64 a0, a1;
475 int wait = 1000;
476 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
477}
478
479int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
480{
481 u64 a0, a1;
482 int wait = 1000;
483 int err, i;
484
485 for (i = 0; i < ETH_ALEN; i++)
486 mac_addr[i] = 0;
487
488 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
489 if (err)
490 return err;
491
492 for (i = 0; i < ETH_ALEN; i++)
493 mac_addr[i] = ((u8 *)&a0)[i];
494
495 return 0;
496}
497
498void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
499 int broadcast, int promisc, int allmulti)
500{
501 u64 a0, a1 = 0;
502 int wait = 1000;
503 int err;
504
505 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
506 (multicast ? CMD_PFILTER_MULTICAST : 0) |
507 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
508 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
509 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
510
511 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
512 if (err)
513 printk(KERN_ERR "Can't set packet filter\n");
514}
515
516void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
517{
518 u64 a0 = 0, a1 = 0;
519 int wait = 1000;
520 int err;
521 int i;
522
523 for (i = 0; i < ETH_ALEN; i++)
524 ((u8 *)&a0)[i] = addr[i];
525
526 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
527 if (err)
Andy Shevchenko36fe90b2016-10-22 20:32:26 +0300528 pr_err("Can't add addr [%pM], %d\n", addr, err);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700529}
530
531void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
532{
533 u64 a0 = 0, a1 = 0;
534 int wait = 1000;
535 int err;
536 int i;
537
538 for (i = 0; i < ETH_ALEN; i++)
539 ((u8 *)&a0)[i] = addr[i];
540
541 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
542 if (err)
Andy Shevchenko36fe90b2016-10-22 20:32:26 +0300543 pr_err("Can't del addr [%pM], %d\n", addr, err);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700544}
545
546int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
547{
548 u64 a0, a1;
549 int wait = 1000;
550
551 if (!vdev->notify) {
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200552 vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700553 sizeof(struct vnic_devcmd_notify),
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200554 &vdev->notify_pa, GFP_KERNEL);
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700555 if (!vdev->notify)
556 return -ENOMEM;
557 }
558
559 a0 = vdev->notify_pa;
560 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
561 a1 += sizeof(struct vnic_devcmd_notify);
562
563 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
564}
565
566void vnic_dev_notify_unset(struct vnic_dev *vdev)
567{
568 u64 a0, a1;
569 int wait = 1000;
570
571 a0 = 0; /* paddr = 0 to unset notify buffer */
572 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
573 a1 += sizeof(struct vnic_devcmd_notify);
574
575 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
576}
577
578static int vnic_dev_notify_ready(struct vnic_dev *vdev)
579{
580 u32 *words;
581 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
582 unsigned int i;
583 u32 csum;
584
585 if (!vdev->notify)
586 return 0;
587
588 do {
589 csum = 0;
590 memcpy(&vdev->notify_copy, vdev->notify,
591 sizeof(struct vnic_devcmd_notify));
592 words = (u32 *)&vdev->notify_copy;
593 for (i = 1; i < nwords; i++)
594 csum += words[i];
595 } while (csum != words[0]);
596
597 return 1;
598}
599
600int vnic_dev_init(struct vnic_dev *vdev, int arg)
601{
602 u64 a0 = (u32)arg, a1 = 0;
603 int wait = 1000;
604 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
605}
606
Hiral Pateld3c995f2013-02-25 16:18:36 -0800607u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
608{
609 u64 a0 = new_default_vlan, a1 = 0;
610 int wait = 1000;
611 int old_vlan = 0;
612
613 old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
614 return (u16)old_vlan;
615}
616
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700617int vnic_dev_link_status(struct vnic_dev *vdev)
618{
619 if (vdev->linkstatus)
620 return *vdev->linkstatus;
621
622 if (!vnic_dev_notify_ready(vdev))
623 return 0;
624
625 return vdev->notify_copy.link_state;
626}
627
628u32 vnic_dev_port_speed(struct vnic_dev *vdev)
629{
630 if (!vnic_dev_notify_ready(vdev))
631 return 0;
632
633 return vdev->notify_copy.port_speed;
634}
635
636u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
637{
638 if (!vnic_dev_notify_ready(vdev))
639 return 0;
640
641 return vdev->notify_copy.msglvl;
642}
643
644u32 vnic_dev_mtu(struct vnic_dev *vdev)
645{
646 if (!vnic_dev_notify_ready(vdev))
647 return 0;
648
649 return vdev->notify_copy.mtu;
650}
651
652u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
653{
654 if (!vnic_dev_notify_ready(vdev))
655 return 0;
656
657 return vdev->notify_copy.link_down_cnt;
658}
659
660void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
661 enum vnic_dev_intr_mode intr_mode)
662{
663 vdev->intr_mode = intr_mode;
664}
665
666enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
667 struct vnic_dev *vdev)
668{
669 return vdev->intr_mode;
670}
671
672void vnic_dev_unregister(struct vnic_dev *vdev)
673{
674 if (vdev) {
675 if (vdev->notify)
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200676 dma_free_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700677 sizeof(struct vnic_devcmd_notify),
678 vdev->notify,
679 vdev->notify_pa);
680 if (vdev->linkstatus)
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200681 dma_free_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700682 sizeof(u32),
683 vdev->linkstatus,
684 vdev->linkstatus_pa);
685 if (vdev->stats)
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200686 dma_free_coherent(&vdev->pdev->dev,
Venkata Siva Vijayendra Bhamidipatic954f8a2011-01-28 16:03:36 -0800687 sizeof(struct vnic_stats),
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700688 vdev->stats, vdev->stats_pa);
689 if (vdev->fw_info)
Christoph Hellwig7f9b0f72018-10-10 18:40:56 +0200690 dma_free_coherent(&vdev->pdev->dev,
Abhijeet Joglekar5df6d732009-04-17 18:33:26 -0700691 sizeof(struct vnic_devcmd_fw_info),
692 vdev->fw_info, vdev->fw_info_pa);
693 kfree(vdev);
694 }
695}
696
697struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
698 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
699{
700 if (!vdev) {
701 vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
702 if (!vdev)
703 return NULL;
704 }
705
706 vdev->priv = priv;
707 vdev->pdev = pdev;
708
709 if (vnic_dev_discover_res(vdev, bar))
710 goto err_out;
711
712 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
713 if (!vdev->devcmd)
714 goto err_out;
715
716 return vdev;
717
718err_out:
719 vnic_dev_unregister(vdev);
720 return NULL;
721}