blob: 6dc75ca70b377bed5b29f20dbecdfa83f66685f3 [file] [log] [blame]
Zhu Lingshan5a2414b2020-03-26 22:01:25 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel IFC VF NIC driver for virtio dataplane offloading
4 *
5 * Copyright (C) 2020 Intel Corporation.
6 *
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8 *
9 */
10
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/sysfs.h>
15#include "ifcvf_base.h"
16
Zhu Lingshan5a2414b2020-03-26 22:01:25 +080017#define DRIVER_AUTHOR "Intel Corporation"
18#define IFCVF_DRIVER_NAME "ifcvf"
19
Zhu Lingshane7991f32020-06-05 18:27:15 +080020static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21{
22 struct ifcvf_hw *vf = arg;
23
24 if (vf->config_cb.callback)
25 return vf->config_cb.callback(vf->config_cb.private);
26
27 return IRQ_HANDLED;
28}
29
Zhu Lingshan5a2414b2020-03-26 22:01:25 +080030static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
31{
32 struct vring_info *vring = arg;
33
34 if (vring->cb.callback)
35 return vring->cb.callback(vring->cb.private);
36
37 return IRQ_HANDLED;
38}
39
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080040static void ifcvf_free_irq_vectors(void *data)
41{
42 pci_free_irq_vectors(data);
43}
44
45static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
46{
47 struct pci_dev *pdev = adapter->pdev;
48 struct ifcvf_hw *vf = &adapter->vf;
49 int i;
50
51
Zhu Lingshan3597a2f2020-07-31 14:55:32 +080052 for (i = 0; i < queues; i++) {
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080053 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
Zhu Lingshan3597a2f2020-07-31 14:55:32 +080054 vf->vring[i].irq = -EINVAL;
55 }
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080056
Jason Wang2b9f28d2020-07-23 17:12:54 +080057 devm_free_irq(&pdev->dev, vf->config_irq, vf);
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080058 ifcvf_free_irq_vectors(pdev);
59}
60
61static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
62{
63 struct pci_dev *pdev = adapter->pdev;
64 struct ifcvf_hw *vf = &adapter->vf;
65 int vector, i, ret, irq;
Zhu Lingshan2ddae772021-08-18 17:57:13 +080066 u16 max_intr;
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080067
Zhu Lingshan2ddae772021-08-18 17:57:13 +080068 /* all queues and config interrupt */
69 max_intr = vf->nr_vring + 1;
70
71 ret = pci_alloc_irq_vectors(pdev, max_intr,
72 max_intr, PCI_IRQ_MSIX);
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080073 if (ret < 0) {
74 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
75 return ret;
76 }
77
Zhu Lingshane7991f32020-06-05 18:27:15 +080078 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
79 pci_name(pdev));
80 vector = 0;
Jason Wang2b9f28d2020-07-23 17:12:54 +080081 vf->config_irq = pci_irq_vector(pdev, vector);
82 ret = devm_request_irq(&pdev->dev, vf->config_irq,
Zhu Lingshane7991f32020-06-05 18:27:15 +080083 ifcvf_config_changed, 0,
84 vf->config_msix_name, vf);
Jason Wang9f4ce5d2020-07-23 17:12:53 +080085 if (ret) {
86 IFCVF_ERR(pdev, "Failed to request config irq\n");
87 return ret;
88 }
Zhu Lingshane7991f32020-06-05 18:27:15 +080089
Zhu Lingshan2ddae772021-08-18 17:57:13 +080090 for (i = 0; i < vf->nr_vring; i++) {
Zhu Lingshan7dd793f2020-05-12 16:00:44 +080091 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
92 pci_name(pdev), i);
93 vector = i + IFCVF_MSI_QUEUE_OFF;
94 irq = pci_irq_vector(pdev, vector);
95 ret = devm_request_irq(&pdev->dev, irq,
96 ifcvf_intr_handler, 0,
97 vf->vring[i].msix_name,
98 &vf->vring[i]);
99 if (ret) {
100 IFCVF_ERR(pdev,
101 "Failed to request irq for vq %d\n", i);
102 ifcvf_free_irq(adapter, i);
103
104 return ret;
105 }
106
107 vf->vring[i].irq = irq;
108 }
109
110 return 0;
111}
112
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800113static int ifcvf_start_datapath(void *private)
114{
115 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800116 u8 status;
117 int ret;
118
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800119 ret = ifcvf_start_hw(vf);
120 if (ret < 0) {
121 status = ifcvf_get_status(vf);
122 status |= VIRTIO_CONFIG_S_FAILED;
123 ifcvf_set_status(vf, status);
124 }
125
126 return ret;
127}
128
129static int ifcvf_stop_datapath(void *private)
130{
131 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
132 int i;
133
Zhu Lingshan2ddae772021-08-18 17:57:13 +0800134 for (i = 0; i < vf->nr_vring; i++)
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800135 vf->vring[i].cb.callback = NULL;
136
137 ifcvf_stop_hw(vf);
138
139 return 0;
140}
141
142static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
143{
144 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
145 int i;
146
Zhu Lingshan2ddae772021-08-18 17:57:13 +0800147 for (i = 0; i < vf->nr_vring; i++) {
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800148 vf->vring[i].last_avail_idx = 0;
149 vf->vring[i].desc = 0;
150 vf->vring[i].avail = 0;
151 vf->vring[i].used = 0;
152 vf->vring[i].ready = 0;
153 vf->vring[i].cb.callback = NULL;
154 vf->vring[i].cb.private = NULL;
155 }
156
157 ifcvf_reset(vf);
158}
159
160static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
161{
162 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
163}
164
165static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
166{
167 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
168
169 return &adapter->vf;
170}
171
172static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
173{
Zhu Lingshan6ad31d12021-04-19 14:33:25 +0800174 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800175 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
Zhu Lingshan6ad31d12021-04-19 14:33:25 +0800176 struct pci_dev *pdev = adapter->pdev;
Zhu Lingshan90d19362021-08-18 17:57:14 +0800177 u32 type = vf->dev_type;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800178 u64 features;
179
Zhu Lingshan90d19362021-08-18 17:57:14 +0800180 if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
Zhu Lingshan6ad31d12021-04-19 14:33:25 +0800181 features = ifcvf_get_features(vf);
Zhu Lingshan90d19362021-08-18 17:57:14 +0800182 else {
Zhu Lingshan6ad31d12021-04-19 14:33:25 +0800183 features = 0;
184 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
185 }
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800186
187 return features;
188}
189
190static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
191{
192 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
Zhu Lingshan1d895a62021-03-17 17:49:32 +0800193 int ret;
194
195 ret = ifcvf_verify_min_features(vf, features);
196 if (ret)
197 return ret;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800198
199 vf->req_features = features;
200
201 return 0;
202}
203
204static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
205{
206 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
207
208 return ifcvf_get_status(vf);
209}
210
211static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
212{
213 struct ifcvf_adapter *adapter;
214 struct ifcvf_hw *vf;
Zhu Lingshan7dd793f2020-05-12 16:00:44 +0800215 u8 status_old;
216 int ret;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800217
218 vf = vdpa_to_vf(vdpa_dev);
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800219 adapter = vdpa_to_adapter(vdpa_dev);
Zhu Lingshan7dd793f2020-05-12 16:00:44 +0800220 status_old = ifcvf_get_status(vf);
221
Zhu Lingshanbb02e6e2020-06-05 18:27:12 +0800222 if (status_old == status)
223 return;
224
Zhu Lingshan7dd793f2020-05-12 16:00:44 +0800225 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
226 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
227 ret = ifcvf_request_irq(adapter);
228 if (ret) {
229 status = ifcvf_get_status(vf);
230 status |= VIRTIO_CONFIG_S_FAILED;
231 ifcvf_set_status(vf, status);
232 return;
233 }
234
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800235 if (ifcvf_start_datapath(adapter) < 0)
236 IFCVF_ERR(adapter->pdev,
237 "Failed to set ifcvf vdpa status %u\n",
238 status);
239 }
240
241 ifcvf_set_status(vf, status);
242}
243
Xie Yongji06860822021-08-31 18:36:26 +0800244static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
245{
246 struct ifcvf_adapter *adapter;
247 struct ifcvf_hw *vf;
248 u8 status_old;
249
250 vf = vdpa_to_vf(vdpa_dev);
251 adapter = vdpa_to_adapter(vdpa_dev);
252 status_old = ifcvf_get_status(vf);
253
254 if (status_old == 0)
255 return 0;
256
257 if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
258 ifcvf_stop_datapath(adapter);
259 ifcvf_free_irq(adapter, vf->nr_vring);
260 }
261
262 ifcvf_reset_vring(adapter);
263
264 return 0;
265}
266
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800267static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
268{
269 return IFCVF_QUEUE_MAX;
270}
271
Eli Cohen23750e32020-08-04 19:20:44 +0300272static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
273 struct vdpa_vq_state *state)
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800274{
275 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
276
Jason Wang530a5672021-06-02 10:15:33 +0800277 state->split.avail_index = ifcvf_get_vq_state(vf, qid);
Eli Cohen23750e32020-08-04 19:20:44 +0300278 return 0;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800279}
280
281static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
Eli Cohenaac50c02020-08-04 19:20:43 +0300282 const struct vdpa_vq_state *state)
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800283{
284 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
285
Jason Wang530a5672021-06-02 10:15:33 +0800286 return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800287}
288
289static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
290 struct vdpa_callback *cb)
291{
292 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
293
294 vf->vring[qid].cb = *cb;
295}
296
297static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
298 u16 qid, bool ready)
299{
300 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
301
302 vf->vring[qid].ready = ready;
303}
304
305static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
306{
307 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
308
309 return vf->vring[qid].ready;
310}
311
312static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
313 u32 num)
314{
315 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
316
317 vf->vring[qid].size = num;
318}
319
320static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
321 u64 desc_area, u64 driver_area,
322 u64 device_area)
323{
324 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
325
326 vf->vring[qid].desc = desc_area;
327 vf->vring[qid].avail = driver_area;
328 vf->vring[qid].used = device_area;
329
330 return 0;
331}
332
333static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
334{
335 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
336
337 ifcvf_notify_queue(vf, qid);
338}
339
340static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
341{
342 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
343
344 return ioread8(&vf->common_cfg->config_generation);
345}
346
347static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
348{
Zhu Lingshan26bfea12021-04-19 14:33:24 +0800349 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
Zhu Lingshane8ef6122021-03-17 17:49:33 +0800350
Zhu Lingshan26bfea12021-04-19 14:33:24 +0800351 return vf->dev_type;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800352}
353
354static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
355{
Zhu Lingshan0ecb1962021-03-17 17:49:27 +0800356 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
357 struct pci_dev *pdev = adapter->pdev;
358
359 return pdev->subsystem_vendor;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800360}
361
Michael S. Tsirkin425a50702020-04-09 16:26:21 -0400362static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800363{
364 return IFCVF_QUEUE_ALIGNMENT;
365}
366
Stefano Garzarella442706f2021-03-15 17:34:44 +0100367static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
368{
Zhu Lingshan56190032021-04-19 14:33:26 +0800369 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
370 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
371 struct pci_dev *pdev = adapter->pdev;
372 size_t size;
373
374 switch (vf->dev_type) {
375 case VIRTIO_ID_NET:
376 size = sizeof(struct virtio_net_config);
377 break;
378 case VIRTIO_ID_BLOCK:
379 size = sizeof(struct virtio_blk_config);
380 break;
381 default:
382 size = 0;
383 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
384 }
385
386 return size;
Stefano Garzarella442706f2021-03-15 17:34:44 +0100387}
388
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800389static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
390 unsigned int offset,
391 void *buf, unsigned int len)
392{
393 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
394
395 WARN_ON(offset + len > sizeof(struct virtio_net_config));
396 ifcvf_read_net_config(vf, offset, buf, len);
397}
398
399static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
400 unsigned int offset, const void *buf,
401 unsigned int len)
402{
403 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
404
405 WARN_ON(offset + len > sizeof(struct virtio_net_config));
406 ifcvf_write_net_config(vf, offset, buf, len);
407}
408
409static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
410 struct vdpa_callback *cb)
411{
Zhu Lingshane7991f32020-06-05 18:27:15 +0800412 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
413
414 vf->config_cb.callback = cb->callback;
415 vf->config_cb.private = cb->private;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800416}
417
Zhu Lingshan3597a2f2020-07-31 14:55:32 +0800418static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
419 u16 qid)
420{
421 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
422
423 return vf->vring[qid].irq;
424}
425
Zhu Lingshan5f1b73a2021-06-02 16:45:50 +0800426static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
427 u16 idx)
428{
429 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
430 struct vdpa_notification_area area;
431
432 area.addr = vf->vring[idx].notify_pa;
433 if (!vf->notify_off_multiplier)
434 area.size = PAGE_SIZE;
435 else
436 area.size = vf->notify_off_multiplier;
437
438 return area;
439}
440
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800441/*
442 * IFCVF currently does't have on-chip IOMMU, so not
443 * implemented set_map()/dma_map()/dma_unmap()
444 */
445static const struct vdpa_config_ops ifc_vdpa_ops = {
446 .get_features = ifcvf_vdpa_get_features,
447 .set_features = ifcvf_vdpa_set_features,
448 .get_status = ifcvf_vdpa_get_status,
449 .set_status = ifcvf_vdpa_set_status,
Xie Yongji06860822021-08-31 18:36:26 +0800450 .reset = ifcvf_vdpa_reset,
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800451 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
452 .get_vq_state = ifcvf_vdpa_get_vq_state,
453 .set_vq_state = ifcvf_vdpa_set_vq_state,
454 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
455 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
456 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
457 .set_vq_num = ifcvf_vdpa_set_vq_num,
458 .set_vq_address = ifcvf_vdpa_set_vq_address,
Zhu Lingshan3597a2f2020-07-31 14:55:32 +0800459 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800460 .kick_vq = ifcvf_vdpa_kick_vq,
461 .get_generation = ifcvf_vdpa_get_generation,
462 .get_device_id = ifcvf_vdpa_get_device_id,
463 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
464 .get_vq_align = ifcvf_vdpa_get_vq_align,
Stefano Garzarella442706f2021-03-15 17:34:44 +0100465 .get_config_size = ifcvf_vdpa_get_config_size,
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800466 .get_config = ifcvf_vdpa_get_config,
467 .set_config = ifcvf_vdpa_set_config,
468 .set_config_cb = ifcvf_vdpa_set_config_cb,
Zhu Lingshan5f1b73a2021-06-02 16:45:50 +0800469 .get_vq_notification = ifcvf_get_vq_notification,
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800470};
471
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800472static struct virtio_device_id id_table_net[] = {
473 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
474 {0},
475};
476
477static struct virtio_device_id id_table_blk[] = {
478 {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
479 {0},
480};
481
Zhu Lingshan30326f92021-08-12 11:24:53 +0800482static u32 get_dev_type(struct pci_dev *pdev)
483{
484 u32 dev_type;
485
486 /* This drirver drives both modern virtio devices and transitional
487 * devices in modern mode.
488 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
489 * so legacy devices and transitional devices in legacy
490 * mode will not work for vDPA, this driver will not
491 * drive devices with legacy interface.
492 */
493
494 if (pdev->device < 0x1040)
495 dev_type = pdev->subsystem_device;
496 else
497 dev_type = pdev->device - 0x1040;
498
499 return dev_type;
500}
501
Parav Panditd8ca2fa2021-10-26 20:55:15 +0300502static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
503 const struct vdpa_dev_set_config *config)
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800504{
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800505 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800506 struct ifcvf_adapter *adapter;
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800507 struct pci_dev *pdev;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800508 struct ifcvf_hw *vf;
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800509 struct device *dev;
Zhu Lingshan3597a2f2020-07-31 14:55:32 +0800510 int ret, i;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800511
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800512 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
513 if (ifcvf_mgmt_dev->adapter)
514 return -EOPNOTSUPP;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800515
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800516 pdev = ifcvf_mgmt_dev->pdev;
517 dev = &pdev->dev;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800518 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
Xie Yongjid8945ec2021-08-31 18:36:31 +0800519 dev, &ifc_vdpa_ops, name, false);
Xie Yongji1057afa2021-07-15 16:00:25 +0800520 if (IS_ERR(adapter)) {
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800521 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
Xie Yongji1057afa2021-07-15 16:00:25 +0800522 return PTR_ERR(adapter);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800523 }
524
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800525 ifcvf_mgmt_dev->adapter = adapter;
526 pci_set_drvdata(pdev, ifcvf_mgmt_dev);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800527
528 vf = &adapter->vf;
Zhu Lingshan30326f92021-08-12 11:24:53 +0800529 vf->dev_type = get_dev_type(pdev);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800530 vf->base = pcim_iomap_table(pdev);
531
532 adapter->pdev = pdev;
533 adapter->vdpa.dma_dev = &pdev->dev;
534
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800535 ret = ifcvf_init_hw(vf, pdev);
536 if (ret) {
537 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
538 goto err;
539 }
540
Zhu Lingshan2ddae772021-08-18 17:57:13 +0800541 for (i = 0; i < vf->nr_vring; i++)
Zhu Lingshan3597a2f2020-07-31 14:55:32 +0800542 vf->vring[i].irq = -EINVAL;
543
Zhu Lingshan69d00d92021-03-17 17:49:31 +0800544 vf->hw_features = ifcvf_get_hw_features(vf);
545
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800546 adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
Zhu Lingshan2ddae772021-08-18 17:57:13 +0800547 ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800548 if (ret) {
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800549 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800550 goto err;
551 }
552
553 return 0;
554
555err:
556 put_device(&adapter->vdpa.dev);
557 return ret;
558}
559
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800560static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
561{
562 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
563
564 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
565 _vdpa_unregister_device(dev);
566 ifcvf_mgmt_dev->adapter = NULL;
567}
568
569static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
570 .dev_add = ifcvf_vdpa_dev_add,
571 .dev_del = ifcvf_vdpa_dev_del
572};
573
574static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
575{
576 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
577 struct device *dev = &pdev->dev;
578 u32 dev_type;
579 int ret;
580
581 ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
582 if (!ifcvf_mgmt_dev) {
583 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
584 return -ENOMEM;
585 }
586
587 dev_type = get_dev_type(pdev);
588 switch (dev_type) {
589 case VIRTIO_ID_NET:
590 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
591 break;
592 case VIRTIO_ID_BLOCK:
593 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
594 break;
595 default:
596 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
597 ret = -EOPNOTSUPP;
598 goto err;
599 }
600
601 ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
602 ifcvf_mgmt_dev->mdev.device = dev;
603 ifcvf_mgmt_dev->pdev = pdev;
604
605 ret = pcim_enable_device(pdev);
606 if (ret) {
607 IFCVF_ERR(pdev, "Failed to enable device\n");
608 goto err;
609 }
610
611 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
612 IFCVF_DRIVER_NAME);
613 if (ret) {
614 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
615 goto err;
616 }
617
618 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
619 if (ret) {
620 IFCVF_ERR(pdev, "No usable DMA configuration\n");
621 goto err;
622 }
623
624 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
625 if (ret) {
626 IFCVF_ERR(pdev,
627 "Failed for adding devres for freeing irq vectors\n");
628 goto err;
629 }
630
631 pci_set_master(pdev);
632
633 ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
634 if (ret) {
635 IFCVF_ERR(pdev,
636 "Failed to initialize the management interfaces\n");
637 goto err;
638 }
639
640 return 0;
641
642err:
643 kfree(ifcvf_mgmt_dev);
644 return ret;
645}
646
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800647static void ifcvf_remove(struct pci_dev *pdev)
648{
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800649 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800650
Zhu Lingshan6b5df342021-08-12 11:24:54 +0800651 ifcvf_mgmt_dev = pci_get_drvdata(pdev);
652 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
653 kfree(ifcvf_mgmt_dev);
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800654}
655
656static struct pci_device_id ifcvf_pci_ids[] = {
Zhu Lingshan423269032021-05-10 16:10:15 +0800657 /* N3000 network device */
658 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
Zhu Lingshan51fc3872021-03-17 17:49:29 +0800659 N3000_DEVICE_ID,
Zhu Lingshan423269032021-05-10 16:10:15 +0800660 PCI_VENDOR_ID_INTEL,
Zhu Lingshan51fc3872021-03-17 17:49:29 +0800661 N3000_SUBSYS_DEVICE_ID) },
Zhu Lingshan423269032021-05-10 16:10:15 +0800662 /* C5000X-PL network device */
663 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
664 VIRTIO_TRANS_ID_NET,
665 PCI_VENDOR_ID_INTEL,
666 VIRTIO_ID_NET) },
667 /* C5000X-PL block device */
668 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
669 VIRTIO_TRANS_ID_BLOCK,
670 PCI_VENDOR_ID_INTEL,
671 VIRTIO_ID_BLOCK) },
Zhu Lingshan139c3fd2021-03-17 17:49:28 +0800672
Zhu Lingshan5a2414b2020-03-26 22:01:25 +0800673 { 0 },
674};
675MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
676
677static struct pci_driver ifcvf_driver = {
678 .name = IFCVF_DRIVER_NAME,
679 .id_table = ifcvf_pci_ids,
680 .probe = ifcvf_probe,
681 .remove = ifcvf_remove,
682};
683
684module_pci_driver(ifcvf_driver);
685
686MODULE_LICENSE("GPL v2");