blob: c76f7277cea47e64f3dfc08d5e0f725918dd43a7 [file] [log] [blame]
Jason Wang2c53d0f2020-03-26 22:01:24 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VDPA networking device simulator.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/poll.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/wait.h>
19#include <linux/uuid.h>
20#include <linux/iommu.h>
21#include <linux/dma-mapping.h>
22#include <linux/sysfs.h>
23#include <linux/file.h>
24#include <linux/etherdevice.h>
25#include <linux/vringh.h>
26#include <linux/vdpa.h>
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -040027#include <linux/virtio_byteorder.h>
Jason Wang2c53d0f2020-03-26 22:01:24 +080028#include <linux/vhost_iotlb.h>
29#include <uapi/linux/virtio_config.h>
30#include <uapi/linux/virtio_net.h>
31
32#define DRV_VERSION "0.1"
33#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
34#define DRV_DESC "vDPA Device Simulator"
35#define DRV_LICENSE "GPL v2"
36
Jason Wangde91a4d2020-08-04 19:20:41 +030037static int batch_mapping = 1;
38module_param(batch_mapping, int, 0444);
39MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
40
Jason Wang2c53d0f2020-03-26 22:01:24 +080041struct vdpasim_virtqueue {
42 struct vringh vring;
43 struct vringh_kiov iov;
44 unsigned short head;
45 bool ready;
46 u64 desc_addr;
47 u64 device_addr;
48 u64 driver_addr;
49 u32 num;
50 void *private;
51 irqreturn_t (*cb)(void *data);
52};
53
54#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
55#define VDPASIM_QUEUE_MAX 256
56#define VDPASIM_DEVICE_ID 0x1
57#define VDPASIM_VENDOR_ID 0
58#define VDPASIM_VQ_NUM 0x2
59#define VDPASIM_NAME "vdpasim-netdev"
60
61static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
62 (1ULL << VIRTIO_F_VERSION_1) |
Michael S. Tsirkin321bd212020-06-24 18:24:33 -040063 (1ULL << VIRTIO_F_ACCESS_PLATFORM);
Jason Wang2c53d0f2020-03-26 22:01:24 +080064
65/* State of each vdpasim device */
66struct vdpasim {
67 struct vdpa_device vdpa;
Max Gurtovoya99744892020-08-04 19:20:42 +030068 struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
Jason Wang2c53d0f2020-03-26 22:01:24 +080069 struct work_struct work;
70 /* spinlock to synchronize virtqueue state */
71 spinlock_t lock;
72 struct virtio_net_config config;
73 struct vhost_iotlb *iommu;
74 void *buffer;
75 u32 status;
76 u32 generation;
77 u64 features;
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +080078 /* spinlock to synchronize iommu table */
79 spinlock_t iommu_lock;
Jason Wang2c53d0f2020-03-26 22:01:24 +080080};
81
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -040082/* TODO: cross-endian support */
83static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
84{
85 return virtio_legacy_is_little_endian() ||
86 (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
87}
88
89static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
90{
91 return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
92}
93
94static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
95{
96 return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
97}
98
Jason Wang2c53d0f2020-03-26 22:01:24 +080099static struct vdpasim *vdpasim_dev;
100
101static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
102{
103 return container_of(vdpa, struct vdpasim, vdpa);
104}
105
106static struct vdpasim *dev_to_sim(struct device *dev)
107{
108 struct vdpa_device *vdpa = dev_to_vdpa(dev);
109
110 return vdpa_to_sim(vdpa);
111}
112
113static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
114{
115 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
Jason Wang2c53d0f2020-03-26 22:01:24 +0800116
YueHaibing0f8e38232020-04-10 19:54:22 +0800117 vringh_init_iotlb(&vq->vring, vdpasim_features,
118 VDPASIM_QUEUE_MAX, false,
119 (struct vring_desc *)(uintptr_t)vq->desc_addr,
120 (struct vring_avail *)
121 (uintptr_t)vq->driver_addr,
122 (struct vring_used *)
123 (uintptr_t)vq->device_addr);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800124}
125
126static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
127{
Samuel Zou18e643c2020-05-09 10:20:02 +0800128 vq->ready = false;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800129 vq->desc_addr = 0;
130 vq->driver_addr = 0;
131 vq->device_addr = 0;
132 vq->cb = NULL;
133 vq->private = NULL;
134 vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
135 false, NULL, NULL, NULL);
136}
137
138static void vdpasim_reset(struct vdpasim *vdpasim)
139{
140 int i;
141
142 for (i = 0; i < VDPASIM_VQ_NUM; i++)
143 vdpasim_vq_reset(&vdpasim->vqs[i]);
144
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800145 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800146 vhost_iotlb_reset(vdpasim->iommu);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800147 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800148
149 vdpasim->features = 0;
150 vdpasim->status = 0;
151 ++vdpasim->generation;
152}
153
154static void vdpasim_work(struct work_struct *work)
155{
156 struct vdpasim *vdpasim = container_of(work, struct
157 vdpasim, work);
158 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
159 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
Samuel Zou18e643c2020-05-09 10:20:02 +0800160 ssize_t read, write;
161 size_t total_write;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800162 int pkts = 0;
Samuel Zou18e643c2020-05-09 10:20:02 +0800163 int err;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800164
165 spin_lock(&vdpasim->lock);
166
167 if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
168 goto out;
169
170 if (!txq->ready || !rxq->ready)
171 goto out;
172
173 while (true) {
174 total_write = 0;
175 err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
176 &txq->head, GFP_ATOMIC);
177 if (err <= 0)
178 break;
179
180 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
181 &rxq->head, GFP_ATOMIC);
182 if (err <= 0) {
183 vringh_complete_iotlb(&txq->vring, txq->head, 0);
184 break;
185 }
186
187 while (true) {
188 read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
189 vdpasim->buffer,
190 PAGE_SIZE);
191 if (read <= 0)
192 break;
193
194 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
195 vdpasim->buffer, read);
196 if (write <= 0)
197 break;
198
199 total_write += write;
200 }
201
202 /* Make sure data is wrote before advancing index */
203 smp_wmb();
204
205 vringh_complete_iotlb(&txq->vring, txq->head, 0);
206 vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
207
208 /* Make sure used is visible before rasing the interrupt. */
209 smp_wmb();
210
211 local_bh_disable();
212 if (txq->cb)
213 txq->cb(txq->private);
214 if (rxq->cb)
215 rxq->cb(rxq->private);
216 local_bh_enable();
217
218 if (++pkts > 4) {
219 schedule_work(&vdpasim->work);
220 goto out;
221 }
222 }
223
224out:
225 spin_unlock(&vdpasim->lock);
226}
227
228static int dir_to_perm(enum dma_data_direction dir)
229{
230 int perm = -EFAULT;
231
232 switch (dir) {
233 case DMA_FROM_DEVICE:
234 perm = VHOST_MAP_WO;
235 break;
236 case DMA_TO_DEVICE:
237 perm = VHOST_MAP_RO;
238 break;
239 case DMA_BIDIRECTIONAL:
240 perm = VHOST_MAP_RW;
241 break;
242 default:
243 break;
244 }
245
246 return perm;
247}
248
249static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
250 unsigned long offset, size_t size,
251 enum dma_data_direction dir,
252 unsigned long attrs)
253{
254 struct vdpasim *vdpasim = dev_to_sim(dev);
255 struct vhost_iotlb *iommu = vdpasim->iommu;
256 u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
257 int ret, perm = dir_to_perm(dir);
258
259 if (perm < 0)
260 return DMA_MAPPING_ERROR;
261
262 /* For simplicity, use identical mapping to avoid e.g iova
263 * allocator.
264 */
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800265 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800266 ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
267 pa, dir_to_perm(dir));
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800268 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800269 if (ret)
270 return DMA_MAPPING_ERROR;
271
272 return (dma_addr_t)(pa);
273}
274
275static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
276 size_t size, enum dma_data_direction dir,
277 unsigned long attrs)
278{
279 struct vdpasim *vdpasim = dev_to_sim(dev);
280 struct vhost_iotlb *iommu = vdpasim->iommu;
281
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800282 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800283 vhost_iotlb_del_range(iommu, (u64)dma_addr,
284 (u64)dma_addr + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800285 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800286}
287
288static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
289 dma_addr_t *dma_addr, gfp_t flag,
290 unsigned long attrs)
291{
292 struct vdpasim *vdpasim = dev_to_sim(dev);
293 struct vhost_iotlb *iommu = vdpasim->iommu;
294 void *addr = kmalloc(size, flag);
295 int ret;
296
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800297 spin_lock(&vdpasim->iommu_lock);
298 if (!addr) {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800299 *dma_addr = DMA_MAPPING_ERROR;
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800300 } else {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800301 u64 pa = virt_to_phys(addr);
302
303 ret = vhost_iotlb_add_range(iommu, (u64)pa,
304 (u64)pa + size - 1,
305 pa, VHOST_MAP_RW);
306 if (ret) {
307 *dma_addr = DMA_MAPPING_ERROR;
308 kfree(addr);
309 addr = NULL;
310 } else
311 *dma_addr = (dma_addr_t)pa;
312 }
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800313 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800314
315 return addr;
316}
317
318static void vdpasim_free_coherent(struct device *dev, size_t size,
319 void *vaddr, dma_addr_t dma_addr,
320 unsigned long attrs)
321{
322 struct vdpasim *vdpasim = dev_to_sim(dev);
323 struct vhost_iotlb *iommu = vdpasim->iommu;
324
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800325 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800326 vhost_iotlb_del_range(iommu, (u64)dma_addr,
327 (u64)dma_addr + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800328 spin_unlock(&vdpasim->iommu_lock);
329
Jason Wang2c53d0f2020-03-26 22:01:24 +0800330 kfree(phys_to_virt((uintptr_t)dma_addr));
331}
332
333static const struct dma_map_ops vdpasim_dma_ops = {
334 .map_page = vdpasim_map_page,
335 .unmap_page = vdpasim_unmap_page,
336 .alloc = vdpasim_alloc_coherent,
337 .free = vdpasim_free_coherent,
338};
339
340static const struct vdpa_config_ops vdpasim_net_config_ops;
Jason Wangde91a4d2020-08-04 19:20:41 +0300341static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800342
343static struct vdpasim *vdpasim_create(void)
344{
Jason Wangde91a4d2020-08-04 19:20:41 +0300345 const struct vdpa_config_ops *ops;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800346 struct vdpasim *vdpasim;
347 struct device *dev;
348 int ret = -ENOMEM;
349
Jason Wangde91a4d2020-08-04 19:20:41 +0300350 if (batch_mapping)
351 ops = &vdpasim_net_batch_config_ops;
352 else
353 ops = &vdpasim_net_config_ops;
354
Max Gurtovoya99744892020-08-04 19:20:42 +0300355 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800356 if (!vdpasim)
357 goto err_alloc;
358
359 INIT_WORK(&vdpasim->work, vdpasim_work);
360 spin_lock_init(&vdpasim->lock);
Michael S. Tsirkin1e3e7922020-08-10 08:44:43 -0400361 spin_lock_init(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800362
363 dev = &vdpasim->vdpa.dev;
Laurent Vivier1eca16b2020-10-27 18:59:14 +0100364 dev->dma_mask = &dev->coherent_dma_mask;
365 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
366 goto err_iommu;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800367 set_dma_ops(dev, &vdpasim_dma_ops);
368
369 vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
370 if (!vdpasim->iommu)
371 goto err_iommu;
372
373 vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
374 if (!vdpasim->buffer)
375 goto err_iommu;
376
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -0400377 eth_random_addr(vdpasim->config.mac);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800378
379 vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
380 vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
381
382 vdpasim->vdpa.dma_dev = dev;
383 ret = vdpa_register_device(&vdpasim->vdpa);
384 if (ret)
385 goto err_iommu;
386
387 return vdpasim;
388
389err_iommu:
390 put_device(dev);
391err_alloc:
392 return ERR_PTR(ret);
393}
394
395static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
396 u64 desc_area, u64 driver_area,
397 u64 device_area)
398{
399 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
400 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
401
402 vq->desc_addr = desc_area;
403 vq->driver_addr = driver_area;
404 vq->device_addr = device_area;
405
406 return 0;
407}
408
409static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
410{
411 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
412 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
413
414 vq->num = num;
415}
416
417static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
418{
419 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
420 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
421
422 if (vq->ready)
423 schedule_work(&vdpasim->work);
424}
425
426static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
427 struct vdpa_callback *cb)
428{
429 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
430 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
431
432 vq->cb = cb->callback;
433 vq->private = cb->private;
434}
435
436static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
437{
438 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
439 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
440
441 spin_lock(&vdpasim->lock);
442 vq->ready = ready;
443 if (vq->ready)
444 vdpasim_queue_ready(vdpasim, idx);
445 spin_unlock(&vdpasim->lock);
446}
447
448static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
449{
450 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
451 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
452
453 return vq->ready;
454}
455
Eli Cohenaac50c02020-08-04 19:20:43 +0300456static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
457 const struct vdpa_vq_state *state)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800458{
459 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
460 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
461 struct vringh *vrh = &vq->vring;
462
463 spin_lock(&vdpasim->lock);
Eli Cohenaac50c02020-08-04 19:20:43 +0300464 vrh->last_avail_idx = state->avail_index;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800465 spin_unlock(&vdpasim->lock);
466
467 return 0;
468}
469
Eli Cohen23750e32020-08-04 19:20:44 +0300470static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
471 struct vdpa_vq_state *state)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800472{
473 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
474 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
475 struct vringh *vrh = &vq->vring;
476
Eli Cohenaac50c02020-08-04 19:20:43 +0300477 state->avail_index = vrh->last_avail_idx;
Eli Cohen23750e32020-08-04 19:20:44 +0300478 return 0;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800479}
480
Michael S. Tsirkin425a50702020-04-09 16:26:21 -0400481static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800482{
483 return VDPASIM_QUEUE_ALIGN;
484}
485
486static u64 vdpasim_get_features(struct vdpa_device *vdpa)
487{
488 return vdpasim_features;
489}
490
491static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
492{
493 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -0400494 struct virtio_net_config *config = &vdpasim->config;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800495
496 /* DMA mapping must be done by driver */
Michael S. Tsirkin321bd212020-06-24 18:24:33 -0400497 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
Jason Wang2c53d0f2020-03-26 22:01:24 +0800498 return -EINVAL;
499
500 vdpasim->features = features & vdpasim_features;
501
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -0400502 /* We generally only know whether guest is using the legacy interface
503 * here, so generally that's the earliest we can set config fields.
504 * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
505 * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
506 */
507
508 config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
509 config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800510 return 0;
511}
512
513static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
514 struct vdpa_callback *cb)
515{
516 /* We don't support config interrupt */
517}
518
519static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
520{
521 return VDPASIM_QUEUE_MAX;
522}
523
524static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
525{
526 return VDPASIM_DEVICE_ID;
527}
528
529static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
530{
531 return VDPASIM_VENDOR_ID;
532}
533
534static u8 vdpasim_get_status(struct vdpa_device *vdpa)
535{
536 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
537 u8 status;
538
539 spin_lock(&vdpasim->lock);
540 status = vdpasim->status;
541 spin_unlock(&vdpasim->lock);
542
YueHaibing21818ed2020-04-02 10:49:16 +0800543 return status;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800544}
545
546static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
547{
548 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
549
550 spin_lock(&vdpasim->lock);
551 vdpasim->status = status;
552 if (status == 0)
553 vdpasim_reset(vdpasim);
554 spin_unlock(&vdpasim->lock);
555}
556
557static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
558 void *buf, unsigned int len)
559{
560 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
561
562 if (offset + len < sizeof(struct virtio_net_config))
Dan Carpentercf16fe92020-04-06 17:45:52 +0300563 memcpy(buf, (u8 *)&vdpasim->config + offset, len);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800564}
565
566static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
567 const void *buf, unsigned int len)
568{
569 /* No writable config supportted by vdpasim */
570}
571
572static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
573{
574 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
575
576 return vdpasim->generation;
577}
578
Jason Wang70a62fc2020-10-23 17:00:43 +0800579static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
580{
581 struct vdpa_iova_range range = {
582 .first = 0ULL,
583 .last = ULLONG_MAX,
584 };
585
586 return range;
587}
588
Jason Wang2c53d0f2020-03-26 22:01:24 +0800589static int vdpasim_set_map(struct vdpa_device *vdpa,
590 struct vhost_iotlb *iotlb)
591{
592 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
593 struct vhost_iotlb_map *map;
594 u64 start = 0ULL, last = 0ULL - 1;
595 int ret;
596
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800597 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800598 vhost_iotlb_reset(vdpasim->iommu);
599
600 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
601 map = vhost_iotlb_itree_next(map, start, last)) {
602 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
603 map->last, map->addr, map->perm);
604 if (ret)
605 goto err;
606 }
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800607 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800608 return 0;
609
610err:
611 vhost_iotlb_reset(vdpasim->iommu);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800612 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800613 return ret;
614}
615
616static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
617 u64 pa, u32 perm)
618{
619 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800620 int ret;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800621
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800622 spin_lock(&vdpasim->iommu_lock);
623 ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
624 perm);
625 spin_unlock(&vdpasim->iommu_lock);
626
627 return ret;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800628}
629
630static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
631{
632 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
633
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800634 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800635 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800636 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800637
638 return 0;
639}
640
641static void vdpasim_free(struct vdpa_device *vdpa)
642{
643 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
644
645 cancel_work_sync(&vdpasim->work);
646 kfree(vdpasim->buffer);
647 if (vdpasim->iommu)
648 vhost_iotlb_free(vdpasim->iommu);
649}
650
651static const struct vdpa_config_ops vdpasim_net_config_ops = {
652 .set_vq_address = vdpasim_set_vq_address,
653 .set_vq_num = vdpasim_set_vq_num,
654 .kick_vq = vdpasim_kick_vq,
655 .set_vq_cb = vdpasim_set_vq_cb,
656 .set_vq_ready = vdpasim_set_vq_ready,
657 .get_vq_ready = vdpasim_get_vq_ready,
658 .set_vq_state = vdpasim_set_vq_state,
659 .get_vq_state = vdpasim_get_vq_state,
660 .get_vq_align = vdpasim_get_vq_align,
661 .get_features = vdpasim_get_features,
662 .set_features = vdpasim_set_features,
663 .set_config_cb = vdpasim_set_config_cb,
664 .get_vq_num_max = vdpasim_get_vq_num_max,
665 .get_device_id = vdpasim_get_device_id,
666 .get_vendor_id = vdpasim_get_vendor_id,
667 .get_status = vdpasim_get_status,
668 .set_status = vdpasim_set_status,
669 .get_config = vdpasim_get_config,
670 .set_config = vdpasim_set_config,
671 .get_generation = vdpasim_get_generation,
Jason Wang70a62fc2020-10-23 17:00:43 +0800672 .get_iova_range = vdpasim_get_iova_range,
Jason Wang2c53d0f2020-03-26 22:01:24 +0800673 .dma_map = vdpasim_dma_map,
674 .dma_unmap = vdpasim_dma_unmap,
675 .free = vdpasim_free,
676};
677
Jason Wangde91a4d2020-08-04 19:20:41 +0300678static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
679 .set_vq_address = vdpasim_set_vq_address,
680 .set_vq_num = vdpasim_set_vq_num,
681 .kick_vq = vdpasim_kick_vq,
682 .set_vq_cb = vdpasim_set_vq_cb,
683 .set_vq_ready = vdpasim_set_vq_ready,
684 .get_vq_ready = vdpasim_get_vq_ready,
685 .set_vq_state = vdpasim_set_vq_state,
686 .get_vq_state = vdpasim_get_vq_state,
687 .get_vq_align = vdpasim_get_vq_align,
688 .get_features = vdpasim_get_features,
689 .set_features = vdpasim_set_features,
690 .set_config_cb = vdpasim_set_config_cb,
691 .get_vq_num_max = vdpasim_get_vq_num_max,
692 .get_device_id = vdpasim_get_device_id,
693 .get_vendor_id = vdpasim_get_vendor_id,
694 .get_status = vdpasim_get_status,
695 .set_status = vdpasim_set_status,
696 .get_config = vdpasim_get_config,
697 .set_config = vdpasim_set_config,
698 .get_generation = vdpasim_get_generation,
Jason Wang70a62fc2020-10-23 17:00:43 +0800699 .get_iova_range = vdpasim_get_iova_range,
Jason Wangde91a4d2020-08-04 19:20:41 +0300700 .set_map = vdpasim_set_map,
701 .free = vdpasim_free,
702};
703
Jason Wang2c53d0f2020-03-26 22:01:24 +0800704static int __init vdpasim_dev_init(void)
705{
706 vdpasim_dev = vdpasim_create();
707
708 if (!IS_ERR(vdpasim_dev))
709 return 0;
710
711 return PTR_ERR(vdpasim_dev);
712}
713
714static void __exit vdpasim_dev_exit(void)
715{
716 struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
717
718 vdpa_unregister_device(vdpa);
719}
720
721module_init(vdpasim_dev_init)
722module_exit(vdpasim_dev_exit)
723
724MODULE_VERSION(DRV_VERSION);
725MODULE_LICENSE(DRV_LICENSE);
726MODULE_AUTHOR(DRV_AUTHOR);
727MODULE_DESCRIPTION(DRV_DESC);