blob: 60e45db29b158081adbe5395cbbb46946e218d96 [file] [log] [blame]
Jason Wang2c53d0f2020-03-26 22:01:24 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VDPA networking device simulator.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
Jason Wang2c53d0f2020-03-26 22:01:24 +080014#include <linux/slab.h>
15#include <linux/sched.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020016#include <linux/dma-map-ops.h>
Jason Wang2c53d0f2020-03-26 22:01:24 +080017#include <linux/etherdevice.h>
18#include <linux/vringh.h>
19#include <linux/vdpa.h>
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -040020#include <linux/virtio_byteorder.h>
Jason Wang2c53d0f2020-03-26 22:01:24 +080021#include <linux/vhost_iotlb.h>
22#include <uapi/linux/virtio_config.h>
23#include <uapi/linux/virtio_net.h>
24
25#define DRV_VERSION "0.1"
26#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
27#define DRV_DESC "vDPA Device Simulator"
28#define DRV_LICENSE "GPL v2"
29
Jason Wangde91a4d2020-08-04 19:20:41 +030030static int batch_mapping = 1;
31module_param(batch_mapping, int, 0444);
32MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33
Stefano Garzarella2fc0ebf2020-12-15 15:42:42 +010034static int max_iotlb_entries = 2048;
35module_param(max_iotlb_entries, int, 0444);
36MODULE_PARM_DESC(max_iotlb_entries,
37 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
38
Laurent Vivier0c86d772020-10-29 13:20:50 +010039static char *macaddr;
40module_param(macaddr, charp, 0);
41MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
42
Stefano Garzarellacf1a3b32020-12-15 15:42:48 +010043u8 macaddr_buf[ETH_ALEN];
44
Jason Wang2c53d0f2020-03-26 22:01:24 +080045struct vdpasim_virtqueue {
46 struct vringh vring;
47 struct vringh_kiov iov;
48 unsigned short head;
49 bool ready;
50 u64 desc_addr;
51 u64 device_addr;
52 u64 driver_addr;
53 u32 num;
54 void *private;
55 irqreturn_t (*cb)(void *data);
56};
57
58#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
59#define VDPASIM_QUEUE_MAX 256
Jason Wang2c53d0f2020-03-26 22:01:24 +080060#define VDPASIM_VENDOR_ID 0
61#define VDPASIM_VQ_NUM 0x2
62#define VDPASIM_NAME "vdpasim-netdev"
63
Stefano Garzarella011c35b2020-12-15 15:42:46 +010064#define VDPASIM_FEATURES ((1ULL << VIRTIO_F_ANY_LAYOUT) | \
65 (1ULL << VIRTIO_F_VERSION_1) | \
66 (1ULL << VIRTIO_F_ACCESS_PLATFORM))
67
68#define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \
69 (1ULL << VIRTIO_NET_F_MAC))
Jason Wang2c53d0f2020-03-26 22:01:24 +080070
Stefano Garzarella65b70952020-12-15 15:42:50 +010071struct vdpasim;
72
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +010073struct vdpasim_dev_attr {
Stefano Garzarella011c35b2020-12-15 15:42:46 +010074 u64 supported_features;
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +010075 size_t config_size;
Stefano Garzarellada7af692020-12-15 15:42:54 +010076 size_t buffer_size;
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +010077 int nvqs;
Stefano Garzarella2f8f4612020-12-15 15:42:45 +010078 u32 id;
Stefano Garzarellaa13b5912020-12-15 15:42:47 +010079
80 work_func_t work_fn;
Stefano Garzarella65b70952020-12-15 15:42:50 +010081 void (*get_config)(struct vdpasim *vdpasim, void *config);
Stefano Garzarellac124a952020-12-15 15:42:51 +010082 void (*set_config)(struct vdpasim *vdpasim, const void *config);
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +010083};
84
Jason Wang2c53d0f2020-03-26 22:01:24 +080085/* State of each vdpasim device */
86struct vdpasim {
87 struct vdpa_device vdpa;
Max Gurtovoy423248d2020-12-15 15:42:41 +010088 struct vdpasim_virtqueue *vqs;
Jason Wang2c53d0f2020-03-26 22:01:24 +080089 struct work_struct work;
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +010090 struct vdpasim_dev_attr dev_attr;
Jason Wang2c53d0f2020-03-26 22:01:24 +080091 /* spinlock to synchronize virtqueue state */
92 spinlock_t lock;
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +010093 /* virtio config according to device type */
94 void *config;
Jason Wang2c53d0f2020-03-26 22:01:24 +080095 struct vhost_iotlb *iommu;
96 void *buffer;
97 u32 status;
98 u32 generation;
99 u64 features;
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800100 /* spinlock to synchronize iommu table */
101 spinlock_t iommu_lock;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800102};
103
Michael S. Tsirkin5d7d0f32020-07-12 10:57:02 -0400104/* TODO: cross-endian support */
105static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
106{
107 return virtio_legacy_is_little_endian() ||
108 (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
109}
110
111static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
112{
113 return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
114}
115
116static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
117{
118 return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
119}
120
Jason Wang2c53d0f2020-03-26 22:01:24 +0800121static struct vdpasim *vdpasim_dev;
122
123static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
124{
125 return container_of(vdpa, struct vdpasim, vdpa);
126}
127
128static struct vdpasim *dev_to_sim(struct device *dev)
129{
130 struct vdpa_device *vdpa = dev_to_vdpa(dev);
131
132 return vdpa_to_sim(vdpa);
133}
134
Stefano Garzarellab2404912020-12-15 15:42:52 +0100135static void vdpasim_vq_notify(struct vringh *vring)
136{
137 struct vdpasim_virtqueue *vq =
138 container_of(vring, struct vdpasim_virtqueue, vring);
139
140 if (!vq->cb)
141 return;
142
143 vq->cb(vq->private);
144}
145
Jason Wang2c53d0f2020-03-26 22:01:24 +0800146static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
147{
148 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
Jason Wang2c53d0f2020-03-26 22:01:24 +0800149
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100150 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
YueHaibing0f8e38232020-04-10 19:54:22 +0800151 VDPASIM_QUEUE_MAX, false,
152 (struct vring_desc *)(uintptr_t)vq->desc_addr,
153 (struct vring_avail *)
154 (uintptr_t)vq->driver_addr,
155 (struct vring_used *)
156 (uintptr_t)vq->device_addr);
Stefano Garzarellab2404912020-12-15 15:42:52 +0100157
158 vq->vring.notify = vdpasim_vq_notify;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800159}
160
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100161static void vdpasim_vq_reset(struct vdpasim *vdpasim,
162 struct vdpasim_virtqueue *vq)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800163{
Samuel Zou18e643c2020-05-09 10:20:02 +0800164 vq->ready = false;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800165 vq->desc_addr = 0;
166 vq->driver_addr = 0;
167 vq->device_addr = 0;
168 vq->cb = NULL;
169 vq->private = NULL;
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100170 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
171 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
Stefano Garzarellab2404912020-12-15 15:42:52 +0100172
173 vq->vring.notify = NULL;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800174}
175
176static void vdpasim_reset(struct vdpasim *vdpasim)
177{
178 int i;
179
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100180 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100181 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800182
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800183 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800184 vhost_iotlb_reset(vdpasim->iommu);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800185 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800186
187 vdpasim->features = 0;
188 vdpasim->status = 0;
189 ++vdpasim->generation;
190}
191
Stefano Garzarellaa13b5912020-12-15 15:42:47 +0100192static void vdpasim_net_work(struct work_struct *work)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800193{
194 struct vdpasim *vdpasim = container_of(work, struct
195 vdpasim, work);
196 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
197 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
Samuel Zou18e643c2020-05-09 10:20:02 +0800198 ssize_t read, write;
199 size_t total_write;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800200 int pkts = 0;
Samuel Zou18e643c2020-05-09 10:20:02 +0800201 int err;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800202
203 spin_lock(&vdpasim->lock);
204
205 if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
206 goto out;
207
208 if (!txq->ready || !rxq->ready)
209 goto out;
210
211 while (true) {
212 total_write = 0;
213 err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
214 &txq->head, GFP_ATOMIC);
215 if (err <= 0)
216 break;
217
218 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
219 &rxq->head, GFP_ATOMIC);
220 if (err <= 0) {
221 vringh_complete_iotlb(&txq->vring, txq->head, 0);
222 break;
223 }
224
225 while (true) {
226 read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
227 vdpasim->buffer,
228 PAGE_SIZE);
229 if (read <= 0)
230 break;
231
232 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
233 vdpasim->buffer, read);
234 if (write <= 0)
235 break;
236
237 total_write += write;
238 }
239
240 /* Make sure data is wrote before advancing index */
241 smp_wmb();
242
243 vringh_complete_iotlb(&txq->vring, txq->head, 0);
244 vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
245
246 /* Make sure used is visible before rasing the interrupt. */
247 smp_wmb();
248
249 local_bh_disable();
Stefano Garzarellab2404912020-12-15 15:42:52 +0100250 if (vringh_need_notify_iotlb(&txq->vring) > 0)
251 vringh_notify(&txq->vring);
252 if (vringh_need_notify_iotlb(&rxq->vring) > 0)
253 vringh_notify(&rxq->vring);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800254 local_bh_enable();
255
256 if (++pkts > 4) {
257 schedule_work(&vdpasim->work);
258 goto out;
259 }
260 }
261
262out:
263 spin_unlock(&vdpasim->lock);
264}
265
266static int dir_to_perm(enum dma_data_direction dir)
267{
268 int perm = -EFAULT;
269
270 switch (dir) {
271 case DMA_FROM_DEVICE:
272 perm = VHOST_MAP_WO;
273 break;
274 case DMA_TO_DEVICE:
275 perm = VHOST_MAP_RO;
276 break;
277 case DMA_BIDIRECTIONAL:
278 perm = VHOST_MAP_RW;
279 break;
280 default:
281 break;
282 }
283
284 return perm;
285}
286
287static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
288 unsigned long offset, size_t size,
289 enum dma_data_direction dir,
290 unsigned long attrs)
291{
292 struct vdpasim *vdpasim = dev_to_sim(dev);
293 struct vhost_iotlb *iommu = vdpasim->iommu;
294 u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
295 int ret, perm = dir_to_perm(dir);
296
297 if (perm < 0)
298 return DMA_MAPPING_ERROR;
299
300 /* For simplicity, use identical mapping to avoid e.g iova
301 * allocator.
302 */
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800303 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800304 ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
305 pa, dir_to_perm(dir));
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800306 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800307 if (ret)
308 return DMA_MAPPING_ERROR;
309
310 return (dma_addr_t)(pa);
311}
312
313static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
314 size_t size, enum dma_data_direction dir,
315 unsigned long attrs)
316{
317 struct vdpasim *vdpasim = dev_to_sim(dev);
318 struct vhost_iotlb *iommu = vdpasim->iommu;
319
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800320 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800321 vhost_iotlb_del_range(iommu, (u64)dma_addr,
322 (u64)dma_addr + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800323 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800324}
325
326static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
327 dma_addr_t *dma_addr, gfp_t flag,
328 unsigned long attrs)
329{
330 struct vdpasim *vdpasim = dev_to_sim(dev);
331 struct vhost_iotlb *iommu = vdpasim->iommu;
332 void *addr = kmalloc(size, flag);
333 int ret;
334
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800335 spin_lock(&vdpasim->iommu_lock);
336 if (!addr) {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800337 *dma_addr = DMA_MAPPING_ERROR;
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800338 } else {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800339 u64 pa = virt_to_phys(addr);
340
341 ret = vhost_iotlb_add_range(iommu, (u64)pa,
342 (u64)pa + size - 1,
343 pa, VHOST_MAP_RW);
344 if (ret) {
345 *dma_addr = DMA_MAPPING_ERROR;
346 kfree(addr);
347 addr = NULL;
348 } else
349 *dma_addr = (dma_addr_t)pa;
350 }
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800351 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800352
353 return addr;
354}
355
356static void vdpasim_free_coherent(struct device *dev, size_t size,
357 void *vaddr, dma_addr_t dma_addr,
358 unsigned long attrs)
359{
360 struct vdpasim *vdpasim = dev_to_sim(dev);
361 struct vhost_iotlb *iommu = vdpasim->iommu;
362
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800363 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800364 vhost_iotlb_del_range(iommu, (u64)dma_addr,
365 (u64)dma_addr + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800366 spin_unlock(&vdpasim->iommu_lock);
367
Jason Wang2c53d0f2020-03-26 22:01:24 +0800368 kfree(phys_to_virt((uintptr_t)dma_addr));
369}
370
371static const struct dma_map_ops vdpasim_dma_ops = {
372 .map_page = vdpasim_map_page,
373 .unmap_page = vdpasim_unmap_page,
374 .alloc = vdpasim_alloc_coherent,
375 .free = vdpasim_free_coherent,
376};
377
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100378static const struct vdpa_config_ops vdpasim_config_ops;
379static const struct vdpa_config_ops vdpasim_batch_config_ops;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800380
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100381static struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800382{
Jason Wangde91a4d2020-08-04 19:20:41 +0300383 const struct vdpa_config_ops *ops;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800384 struct vdpasim *vdpasim;
385 struct device *dev;
Max Gurtovoy423248d2020-12-15 15:42:41 +0100386 int i, ret = -ENOMEM;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800387
Jason Wangde91a4d2020-08-04 19:20:41 +0300388 if (batch_mapping)
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100389 ops = &vdpasim_batch_config_ops;
Jason Wangde91a4d2020-08-04 19:20:41 +0300390 else
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100391 ops = &vdpasim_config_ops;
Jason Wangde91a4d2020-08-04 19:20:41 +0300392
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100393 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
394 dev_attr->nvqs);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800395 if (!vdpasim)
396 goto err_alloc;
397
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100398 vdpasim->dev_attr = *dev_attr;
Stefano Garzarellaa13b5912020-12-15 15:42:47 +0100399 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800400 spin_lock_init(&vdpasim->lock);
Michael S. Tsirkin1e3e7922020-08-10 08:44:43 -0400401 spin_lock_init(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800402
403 dev = &vdpasim->vdpa.dev;
Laurent Vivier1eca16b2020-10-27 18:59:14 +0100404 dev->dma_mask = &dev->coherent_dma_mask;
405 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
406 goto err_iommu;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800407 set_dma_ops(dev, &vdpasim_dma_ops);
408
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +0100409 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
410 if (!vdpasim->config)
411 goto err_iommu;
412
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100413 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
Max Gurtovoy423248d2020-12-15 15:42:41 +0100414 GFP_KERNEL);
415 if (!vdpasim->vqs)
416 goto err_iommu;
417
Stefano Garzarella2fc0ebf2020-12-15 15:42:42 +0100418 vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800419 if (!vdpasim->iommu)
420 goto err_iommu;
421
Stefano Garzarellada7af692020-12-15 15:42:54 +0100422 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800423 if (!vdpasim->buffer)
424 goto err_iommu;
425
Laurent Vivier0c86d772020-10-29 13:20:50 +0100426 if (macaddr) {
Stefano Garzarellacf1a3b32020-12-15 15:42:48 +0100427 mac_pton(macaddr, macaddr_buf);
428 if (!is_valid_ether_addr(macaddr_buf)) {
Laurent Vivier0c86d772020-10-29 13:20:50 +0100429 ret = -EADDRNOTAVAIL;
430 goto err_iommu;
431 }
432 } else {
Stefano Garzarellacf1a3b32020-12-15 15:42:48 +0100433 eth_random_addr(macaddr_buf);
Laurent Vivier0c86d772020-10-29 13:20:50 +0100434 }
Jason Wang2c53d0f2020-03-26 22:01:24 +0800435
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100436 for (i = 0; i < dev_attr->nvqs; i++)
Max Gurtovoy423248d2020-12-15 15:42:41 +0100437 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800438
439 vdpasim->vdpa.dma_dev = dev;
440 ret = vdpa_register_device(&vdpasim->vdpa);
441 if (ret)
442 goto err_iommu;
443
444 return vdpasim;
445
446err_iommu:
447 put_device(dev);
448err_alloc:
449 return ERR_PTR(ret);
450}
451
452static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
453 u64 desc_area, u64 driver_area,
454 u64 device_area)
455{
456 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
457 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
458
459 vq->desc_addr = desc_area;
460 vq->driver_addr = driver_area;
461 vq->device_addr = device_area;
462
463 return 0;
464}
465
466static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
467{
468 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
469 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
470
471 vq->num = num;
472}
473
474static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
475{
476 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
477 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
478
479 if (vq->ready)
480 schedule_work(&vdpasim->work);
481}
482
483static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
484 struct vdpa_callback *cb)
485{
486 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
487 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
488
489 vq->cb = cb->callback;
490 vq->private = cb->private;
491}
492
493static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
494{
495 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
496 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
497
498 spin_lock(&vdpasim->lock);
499 vq->ready = ready;
500 if (vq->ready)
501 vdpasim_queue_ready(vdpasim, idx);
502 spin_unlock(&vdpasim->lock);
503}
504
505static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
506{
507 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
508 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
509
510 return vq->ready;
511}
512
Eli Cohenaac50c02020-08-04 19:20:43 +0300513static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
514 const struct vdpa_vq_state *state)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800515{
516 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
517 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
518 struct vringh *vrh = &vq->vring;
519
520 spin_lock(&vdpasim->lock);
Eli Cohenaac50c02020-08-04 19:20:43 +0300521 vrh->last_avail_idx = state->avail_index;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800522 spin_unlock(&vdpasim->lock);
523
524 return 0;
525}
526
Eli Cohen23750e32020-08-04 19:20:44 +0300527static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
528 struct vdpa_vq_state *state)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800529{
530 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
531 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
532 struct vringh *vrh = &vq->vring;
533
Eli Cohenaac50c02020-08-04 19:20:43 +0300534 state->avail_index = vrh->last_avail_idx;
Eli Cohen23750e32020-08-04 19:20:44 +0300535 return 0;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800536}
537
Michael S. Tsirkin425a50702020-04-09 16:26:21 -0400538static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800539{
540 return VDPASIM_QUEUE_ALIGN;
541}
542
543static u64 vdpasim_get_features(struct vdpa_device *vdpa)
544{
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100545 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
546
547 return vdpasim->dev_attr.supported_features;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800548}
549
550static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
551{
552 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
553
554 /* DMA mapping must be done by driver */
Michael S. Tsirkin321bd212020-06-24 18:24:33 -0400555 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
Jason Wang2c53d0f2020-03-26 22:01:24 +0800556 return -EINVAL;
557
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100558 vdpasim->features = features & vdpasim->dev_attr.supported_features;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800559
560 return 0;
561}
562
563static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
564 struct vdpa_callback *cb)
565{
566 /* We don't support config interrupt */
567}
568
569static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
570{
571 return VDPASIM_QUEUE_MAX;
572}
573
574static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
575{
Stefano Garzarella2f8f4612020-12-15 15:42:45 +0100576 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
577
578 return vdpasim->dev_attr.id;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800579}
580
581static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
582{
583 return VDPASIM_VENDOR_ID;
584}
585
586static u8 vdpasim_get_status(struct vdpa_device *vdpa)
587{
588 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
589 u8 status;
590
591 spin_lock(&vdpasim->lock);
592 status = vdpasim->status;
593 spin_unlock(&vdpasim->lock);
594
YueHaibing21818ed2020-04-02 10:49:16 +0800595 return status;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800596}
597
598static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
599{
600 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
601
602 spin_lock(&vdpasim->lock);
603 vdpasim->status = status;
604 if (status == 0)
605 vdpasim_reset(vdpasim);
606 spin_unlock(&vdpasim->lock);
607}
608
609static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
610 void *buf, unsigned int len)
611{
612 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
613
Stefano Garzarella65b70952020-12-15 15:42:50 +0100614 if (offset + len > vdpasim->dev_attr.config_size)
615 return;
616
617 if (vdpasim->dev_attr.get_config)
618 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
619
620 memcpy(buf, vdpasim->config + offset, len);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800621}
622
623static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
624 const void *buf, unsigned int len)
625{
Stefano Garzarellac124a952020-12-15 15:42:51 +0100626 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
627
628 if (offset + len > vdpasim->dev_attr.config_size)
629 return;
630
631 memcpy(vdpasim->config + offset, buf, len);
632
633 if (vdpasim->dev_attr.set_config)
634 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800635}
636
637static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
638{
639 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
640
641 return vdpasim->generation;
642}
643
Jason Wang70a62fc2020-10-23 17:00:43 +0800644static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
645{
646 struct vdpa_iova_range range = {
647 .first = 0ULL,
648 .last = ULLONG_MAX,
649 };
650
651 return range;
652}
653
Jason Wang2c53d0f2020-03-26 22:01:24 +0800654static int vdpasim_set_map(struct vdpa_device *vdpa,
655 struct vhost_iotlb *iotlb)
656{
657 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
658 struct vhost_iotlb_map *map;
659 u64 start = 0ULL, last = 0ULL - 1;
660 int ret;
661
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800662 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800663 vhost_iotlb_reset(vdpasim->iommu);
664
665 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
666 map = vhost_iotlb_itree_next(map, start, last)) {
667 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
668 map->last, map->addr, map->perm);
669 if (ret)
670 goto err;
671 }
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800672 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800673 return 0;
674
675err:
676 vhost_iotlb_reset(vdpasim->iommu);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800677 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800678 return ret;
679}
680
681static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
682 u64 pa, u32 perm)
683{
684 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800685 int ret;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800686
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800687 spin_lock(&vdpasim->iommu_lock);
688 ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
689 perm);
690 spin_unlock(&vdpasim->iommu_lock);
691
692 return ret;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800693}
694
695static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
696{
697 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
698
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800699 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800700 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800701 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800702
703 return 0;
704}
705
706static void vdpasim_free(struct vdpa_device *vdpa)
707{
708 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
709
710 cancel_work_sync(&vdpasim->work);
Stefano Garzarella165be1f2020-12-15 15:42:53 +0100711 kvfree(vdpasim->buffer);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800712 if (vdpasim->iommu)
713 vhost_iotlb_free(vdpasim->iommu);
Max Gurtovoy423248d2020-12-15 15:42:41 +0100714 kfree(vdpasim->vqs);
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +0100715 kfree(vdpasim->config);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800716}
717
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100718static const struct vdpa_config_ops vdpasim_config_ops = {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800719 .set_vq_address = vdpasim_set_vq_address,
720 .set_vq_num = vdpasim_set_vq_num,
721 .kick_vq = vdpasim_kick_vq,
722 .set_vq_cb = vdpasim_set_vq_cb,
723 .set_vq_ready = vdpasim_set_vq_ready,
724 .get_vq_ready = vdpasim_get_vq_ready,
725 .set_vq_state = vdpasim_set_vq_state,
726 .get_vq_state = vdpasim_get_vq_state,
727 .get_vq_align = vdpasim_get_vq_align,
728 .get_features = vdpasim_get_features,
729 .set_features = vdpasim_set_features,
730 .set_config_cb = vdpasim_set_config_cb,
731 .get_vq_num_max = vdpasim_get_vq_num_max,
732 .get_device_id = vdpasim_get_device_id,
733 .get_vendor_id = vdpasim_get_vendor_id,
734 .get_status = vdpasim_get_status,
735 .set_status = vdpasim_set_status,
736 .get_config = vdpasim_get_config,
737 .set_config = vdpasim_set_config,
738 .get_generation = vdpasim_get_generation,
Jason Wang70a62fc2020-10-23 17:00:43 +0800739 .get_iova_range = vdpasim_get_iova_range,
Jason Wang2c53d0f2020-03-26 22:01:24 +0800740 .dma_map = vdpasim_dma_map,
741 .dma_unmap = vdpasim_dma_unmap,
742 .free = vdpasim_free,
743};
744
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100745static const struct vdpa_config_ops vdpasim_batch_config_ops = {
Jason Wangde91a4d2020-08-04 19:20:41 +0300746 .set_vq_address = vdpasim_set_vq_address,
747 .set_vq_num = vdpasim_set_vq_num,
748 .kick_vq = vdpasim_kick_vq,
749 .set_vq_cb = vdpasim_set_vq_cb,
750 .set_vq_ready = vdpasim_set_vq_ready,
751 .get_vq_ready = vdpasim_get_vq_ready,
752 .set_vq_state = vdpasim_set_vq_state,
753 .get_vq_state = vdpasim_get_vq_state,
754 .get_vq_align = vdpasim_get_vq_align,
755 .get_features = vdpasim_get_features,
756 .set_features = vdpasim_set_features,
757 .set_config_cb = vdpasim_set_config_cb,
758 .get_vq_num_max = vdpasim_get_vq_num_max,
759 .get_device_id = vdpasim_get_device_id,
760 .get_vendor_id = vdpasim_get_vendor_id,
761 .get_status = vdpasim_get_status,
762 .set_status = vdpasim_set_status,
763 .get_config = vdpasim_get_config,
764 .set_config = vdpasim_set_config,
765 .get_generation = vdpasim_get_generation,
Jason Wang70a62fc2020-10-23 17:00:43 +0800766 .get_iova_range = vdpasim_get_iova_range,
Jason Wangde91a4d2020-08-04 19:20:41 +0300767 .set_map = vdpasim_set_map,
768 .free = vdpasim_free,
769};
770
Stefano Garzarella65b70952020-12-15 15:42:50 +0100771static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
772{
773 struct virtio_net_config *net_config =
774 (struct virtio_net_config *)config;
775
776 net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
777 net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
778 memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
779}
780
Jason Wang2c53d0f2020-03-26 22:01:24 +0800781static int __init vdpasim_dev_init(void)
782{
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100783 struct vdpasim_dev_attr dev_attr = {};
784
Stefano Garzarella2f8f4612020-12-15 15:42:45 +0100785 dev_attr.id = VIRTIO_ID_NET;
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100786 dev_attr.supported_features = VDPASIM_NET_FEATURES;
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100787 dev_attr.nvqs = VDPASIM_VQ_NUM;
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +0100788 dev_attr.config_size = sizeof(struct virtio_net_config);
Stefano Garzarella65b70952020-12-15 15:42:50 +0100789 dev_attr.get_config = vdpasim_net_get_config;
Stefano Garzarellaa13b5912020-12-15 15:42:47 +0100790 dev_attr.work_fn = vdpasim_net_work;
Stefano Garzarellada7af692020-12-15 15:42:54 +0100791 dev_attr.buffer_size = PAGE_SIZE;
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100792
793 vdpasim_dev = vdpasim_create(&dev_attr);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800794
795 if (!IS_ERR(vdpasim_dev))
796 return 0;
797
798 return PTR_ERR(vdpasim_dev);
799}
800
801static void __exit vdpasim_dev_exit(void)
802{
803 struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
804
805 vdpa_unregister_device(vdpa);
806}
807
808module_init(vdpasim_dev_init)
809module_exit(vdpasim_dev_exit)
810
811MODULE_VERSION(DRV_VERSION);
812MODULE_LICENSE(DRV_LICENSE);
813MODULE_AUTHOR(DRV_AUTHOR);
814MODULE_DESCRIPTION(DRV_DESC);