blob: ddbe142af09aedd204577e30a77a882d82b704fd [file] [log] [blame]
Jason Wang2c53d0f2020-03-26 22:01:24 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
Max Gurtovoydb1e8bb2020-12-15 15:42:56 +01003 * VDPA device simulator core.
Jason Wang2c53d0f2020-03-26 22:01:24 +08004 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
Jason Wang2c53d0f2020-03-26 22:01:24 +080014#include <linux/slab.h>
15#include <linux/sched.h>
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +020016#include <linux/dma-map-ops.h>
Jason Wang2c53d0f2020-03-26 22:01:24 +080017#include <linux/vringh.h>
18#include <linux/vdpa.h>
19#include <linux/vhost_iotlb.h>
Stefano Garzarella4080fc12021-03-15 17:34:37 +010020#include <linux/iova.h>
Max Gurtovoydb1e8bb2020-12-15 15:42:56 +010021
22#include "vdpa_sim.h"
Jason Wang2c53d0f2020-03-26 22:01:24 +080023
24#define DRV_VERSION "0.1"
25#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
Max Gurtovoydb1e8bb2020-12-15 15:42:56 +010026#define DRV_DESC "vDPA Device Simulator core"
Jason Wang2c53d0f2020-03-26 22:01:24 +080027#define DRV_LICENSE "GPL v2"
28
Jason Wangde91a4d2020-08-04 19:20:41 +030029static int batch_mapping = 1;
30module_param(batch_mapping, int, 0444);
31MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
32
Stefano Garzarella2fc0ebf2020-12-15 15:42:42 +010033static int max_iotlb_entries = 2048;
34module_param(max_iotlb_entries, int, 0444);
35MODULE_PARM_DESC(max_iotlb_entries,
36 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
37
Jason Wang2c53d0f2020-03-26 22:01:24 +080038#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
39#define VDPASIM_QUEUE_MAX 256
Jason Wang2c53d0f2020-03-26 22:01:24 +080040#define VDPASIM_VENDOR_ID 0
Jason Wang2c53d0f2020-03-26 22:01:24 +080041
42static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
43{
44 return container_of(vdpa, struct vdpasim, vdpa);
45}
46
47static struct vdpasim *dev_to_sim(struct device *dev)
48{
49 struct vdpa_device *vdpa = dev_to_vdpa(dev);
50
51 return vdpa_to_sim(vdpa);
52}
53
Stefano Garzarellab2404912020-12-15 15:42:52 +010054static void vdpasim_vq_notify(struct vringh *vring)
55{
56 struct vdpasim_virtqueue *vq =
57 container_of(vring, struct vdpasim_virtqueue, vring);
58
59 if (!vq->cb)
60 return;
61
62 vq->cb(vq->private);
63}
64
Jason Wang2c53d0f2020-03-26 22:01:24 +080065static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
66{
67 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
Jason Wang2c53d0f2020-03-26 22:01:24 +080068
Stefano Garzarella011c35b2020-12-15 15:42:46 +010069 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
YueHaibing0f8e38232020-04-10 19:54:22 +080070 VDPASIM_QUEUE_MAX, false,
71 (struct vring_desc *)(uintptr_t)vq->desc_addr,
72 (struct vring_avail *)
73 (uintptr_t)vq->driver_addr,
74 (struct vring_used *)
75 (uintptr_t)vq->device_addr);
Stefano Garzarellab2404912020-12-15 15:42:52 +010076
77 vq->vring.notify = vdpasim_vq_notify;
Jason Wang2c53d0f2020-03-26 22:01:24 +080078}
79
Stefano Garzarella011c35b2020-12-15 15:42:46 +010080static void vdpasim_vq_reset(struct vdpasim *vdpasim,
81 struct vdpasim_virtqueue *vq)
Jason Wang2c53d0f2020-03-26 22:01:24 +080082{
Samuel Zou18e643c2020-05-09 10:20:02 +080083 vq->ready = false;
Jason Wang2c53d0f2020-03-26 22:01:24 +080084 vq->desc_addr = 0;
85 vq->driver_addr = 0;
86 vq->device_addr = 0;
87 vq->cb = NULL;
88 vq->private = NULL;
Stefano Garzarella011c35b2020-12-15 15:42:46 +010089 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
90 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
Stefano Garzarellab2404912020-12-15 15:42:52 +010091
92 vq->vring.notify = NULL;
Jason Wang2c53d0f2020-03-26 22:01:24 +080093}
94
Xie Yongji06860822021-08-31 18:36:26 +080095static void vdpasim_do_reset(struct vdpasim *vdpasim)
Jason Wang2c53d0f2020-03-26 22:01:24 +080096{
97 int i;
98
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +010099 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100100 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800101
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800102 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800103 vhost_iotlb_reset(vdpasim->iommu);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800104 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800105
106 vdpasim->features = 0;
107 vdpasim->status = 0;
108 ++vdpasim->generation;
109}
110
Jason Wang2c53d0f2020-03-26 22:01:24 +0800111static int dir_to_perm(enum dma_data_direction dir)
112{
113 int perm = -EFAULT;
114
115 switch (dir) {
116 case DMA_FROM_DEVICE:
117 perm = VHOST_MAP_WO;
118 break;
119 case DMA_TO_DEVICE:
120 perm = VHOST_MAP_RO;
121 break;
122 case DMA_BIDIRECTIONAL:
123 perm = VHOST_MAP_RW;
124 break;
125 default:
126 break;
127 }
128
129 return perm;
130}
131
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100132static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
133 size_t size, unsigned int perm)
134{
135 struct iova *iova;
136 dma_addr_t dma_addr;
137 int ret;
138
139 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
Xie Yongji0d8c9e72021-08-09 18:09:23 +0800140 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
141 ULONG_MAX - 1, true);
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100142 if (!iova)
143 return DMA_MAPPING_ERROR;
144
145 dma_addr = iova_dma_addr(&vdpasim->iova, iova);
146
147 spin_lock(&vdpasim->iommu_lock);
148 ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
149 (u64)dma_addr + size - 1, (u64)paddr, perm);
150 spin_unlock(&vdpasim->iommu_lock);
151
152 if (ret) {
153 __free_iova(&vdpasim->iova, iova);
154 return DMA_MAPPING_ERROR;
155 }
156
157 return dma_addr;
158}
159
160static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
161 size_t size)
162{
163 spin_lock(&vdpasim->iommu_lock);
164 vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
165 (u64)dma_addr + size - 1);
166 spin_unlock(&vdpasim->iommu_lock);
167
168 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
169}
170
Jason Wang2c53d0f2020-03-26 22:01:24 +0800171static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
172 unsigned long offset, size_t size,
173 enum dma_data_direction dir,
174 unsigned long attrs)
175{
176 struct vdpasim *vdpasim = dev_to_sim(dev);
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100177 phys_addr_t paddr = page_to_phys(page) + offset;
178 int perm = dir_to_perm(dir);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800179
180 if (perm < 0)
181 return DMA_MAPPING_ERROR;
182
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100183 return vdpasim_map_range(vdpasim, paddr, size, perm);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800184}
185
186static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
187 size_t size, enum dma_data_direction dir,
188 unsigned long attrs)
189{
190 struct vdpasim *vdpasim = dev_to_sim(dev);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800191
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100192 vdpasim_unmap_range(vdpasim, dma_addr, size);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800193}
194
195static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
196 dma_addr_t *dma_addr, gfp_t flag,
197 unsigned long attrs)
198{
199 struct vdpasim *vdpasim = dev_to_sim(dev);
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100200 phys_addr_t paddr;
201 void *addr;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800202
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100203 addr = kmalloc(size, flag);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800204 if (!addr) {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800205 *dma_addr = DMA_MAPPING_ERROR;
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100206 return NULL;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800207 }
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100208
209 paddr = virt_to_phys(addr);
210
211 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
212 if (*dma_addr == DMA_MAPPING_ERROR) {
213 kfree(addr);
214 return NULL;
215 }
Jason Wang2c53d0f2020-03-26 22:01:24 +0800216
217 return addr;
218}
219
220static void vdpasim_free_coherent(struct device *dev, size_t size,
221 void *vaddr, dma_addr_t dma_addr,
222 unsigned long attrs)
223{
224 struct vdpasim *vdpasim = dev_to_sim(dev);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800225
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100226 vdpasim_unmap_range(vdpasim, dma_addr, size);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800227
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100228 kfree(vaddr);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800229}
230
231static const struct dma_map_ops vdpasim_dma_ops = {
232 .map_page = vdpasim_map_page,
233 .unmap_page = vdpasim_unmap_page,
234 .alloc = vdpasim_alloc_coherent,
235 .free = vdpasim_free_coherent,
236};
237
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100238static const struct vdpa_config_ops vdpasim_config_ops;
239static const struct vdpa_config_ops vdpasim_batch_config_ops;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800240
Max Gurtovoydb1e8bb2020-12-15 15:42:56 +0100241struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800242{
Jason Wangde91a4d2020-08-04 19:20:41 +0300243 const struct vdpa_config_ops *ops;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800244 struct vdpasim *vdpasim;
245 struct device *dev;
Max Gurtovoy423248d2020-12-15 15:42:41 +0100246 int i, ret = -ENOMEM;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800247
Jason Wangde91a4d2020-08-04 19:20:41 +0300248 if (batch_mapping)
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100249 ops = &vdpasim_batch_config_ops;
Jason Wangde91a4d2020-08-04 19:20:41 +0300250 else
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100251 ops = &vdpasim_config_ops;
Jason Wangde91a4d2020-08-04 19:20:41 +0300252
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100253 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
Xie Yongjid8945ec2021-08-31 18:36:31 +0800254 dev_attr->name, false);
Xie Yongji2b847f22021-07-15 16:00:23 +0800255 if (IS_ERR(vdpasim)) {
256 ret = PTR_ERR(vdpasim);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800257 goto err_alloc;
Xie Yongji2b847f22021-07-15 16:00:23 +0800258 }
Jason Wang2c53d0f2020-03-26 22:01:24 +0800259
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100260 vdpasim->dev_attr = *dev_attr;
Stefano Garzarellaa13b5912020-12-15 15:42:47 +0100261 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800262 spin_lock_init(&vdpasim->lock);
Michael S. Tsirkin1e3e7922020-08-10 08:44:43 -0400263 spin_lock_init(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800264
265 dev = &vdpasim->vdpa.dev;
Laurent Vivier1eca16b2020-10-27 18:59:14 +0100266 dev->dma_mask = &dev->coherent_dma_mask;
267 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
268 goto err_iommu;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800269 set_dma_ops(dev, &vdpasim_dma_ops);
Parav Pandita3c06ae2021-01-05 12:32:03 +0200270 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800271
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +0100272 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
273 if (!vdpasim->config)
274 goto err_iommu;
275
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100276 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
Max Gurtovoy423248d2020-12-15 15:42:41 +0100277 GFP_KERNEL);
278 if (!vdpasim->vqs)
279 goto err_iommu;
280
Stefano Garzarella2fc0ebf2020-12-15 15:42:42 +0100281 vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800282 if (!vdpasim->iommu)
283 goto err_iommu;
284
Stefano Garzarellada7af692020-12-15 15:42:54 +0100285 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800286 if (!vdpasim->buffer)
287 goto err_iommu;
288
Stefano Garzarella6c6e28f2020-12-15 15:42:44 +0100289 for (i = 0; i < dev_attr->nvqs; i++)
Stefano Garzarellaf53d9912021-03-15 17:34:38 +0100290 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
291 &vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800292
Stefano Garzarella4080fc12021-03-15 17:34:37 +0100293 ret = iova_cache_get();
294 if (ret)
295 goto err_iommu;
296
297 /* For simplicity we use an IOVA allocator with byte granularity */
298 init_iova_domain(&vdpasim->iova, 1, 0);
299
Jason Wang2c53d0f2020-03-26 22:01:24 +0800300 vdpasim->vdpa.dma_dev = dev;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800301
302 return vdpasim;
303
304err_iommu:
305 put_device(dev);
306err_alloc:
307 return ERR_PTR(ret);
308}
Max Gurtovoydb1e8bb2020-12-15 15:42:56 +0100309EXPORT_SYMBOL_GPL(vdpasim_create);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800310
311static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
312 u64 desc_area, u64 driver_area,
313 u64 device_area)
314{
315 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
316 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
317
318 vq->desc_addr = desc_area;
319 vq->driver_addr = driver_area;
320 vq->device_addr = device_area;
321
322 return 0;
323}
324
325static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
326{
327 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
328 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
329
330 vq->num = num;
331}
332
333static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
334{
335 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
336 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
337
338 if (vq->ready)
339 schedule_work(&vdpasim->work);
340}
341
342static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
343 struct vdpa_callback *cb)
344{
345 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
346 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
347
348 vq->cb = cb->callback;
349 vq->private = cb->private;
350}
351
352static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
353{
354 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
355 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
356
357 spin_lock(&vdpasim->lock);
358 vq->ready = ready;
359 if (vq->ready)
360 vdpasim_queue_ready(vdpasim, idx);
361 spin_unlock(&vdpasim->lock);
362}
363
364static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
365{
366 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
367 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
368
369 return vq->ready;
370}
371
Eli Cohenaac50c02020-08-04 19:20:43 +0300372static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
373 const struct vdpa_vq_state *state)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800374{
375 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
376 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
377 struct vringh *vrh = &vq->vring;
378
379 spin_lock(&vdpasim->lock);
Jason Wang530a5672021-06-02 10:15:33 +0800380 vrh->last_avail_idx = state->split.avail_index;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800381 spin_unlock(&vdpasim->lock);
382
383 return 0;
384}
385
Eli Cohen23750e32020-08-04 19:20:44 +0300386static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
387 struct vdpa_vq_state *state)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800388{
389 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
390 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
391 struct vringh *vrh = &vq->vring;
392
Jason Wang530a5672021-06-02 10:15:33 +0800393 state->split.avail_index = vrh->last_avail_idx;
Eli Cohen23750e32020-08-04 19:20:44 +0300394 return 0;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800395}
396
Michael S. Tsirkin425a50702020-04-09 16:26:21 -0400397static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800398{
399 return VDPASIM_QUEUE_ALIGN;
400}
401
Eli Cohena64917b2022-01-05 13:46:33 +0200402static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800403{
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100404 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
405
406 return vdpasim->dev_attr.supported_features;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800407}
408
Eli Cohena64917b2022-01-05 13:46:33 +0200409static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800410{
411 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
412
413 /* DMA mapping must be done by driver */
Michael S. Tsirkin321bd212020-06-24 18:24:33 -0400414 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
Jason Wang2c53d0f2020-03-26 22:01:24 +0800415 return -EINVAL;
416
Stefano Garzarella011c35b2020-12-15 15:42:46 +0100417 vdpasim->features = features & vdpasim->dev_attr.supported_features;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800418
419 return 0;
420}
421
Eli Cohena64917b2022-01-05 13:46:33 +0200422static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
423{
424 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
425
426 return vdpasim->features;
427}
428
Jason Wang2c53d0f2020-03-26 22:01:24 +0800429static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
430 struct vdpa_callback *cb)
431{
432 /* We don't support config interrupt */
433}
434
435static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
436{
437 return VDPASIM_QUEUE_MAX;
438}
439
440static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
441{
Stefano Garzarella2f8f4612020-12-15 15:42:45 +0100442 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
443
444 return vdpasim->dev_attr.id;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800445}
446
447static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
448{
449 return VDPASIM_VENDOR_ID;
450}
451
452static u8 vdpasim_get_status(struct vdpa_device *vdpa)
453{
454 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
455 u8 status;
456
457 spin_lock(&vdpasim->lock);
458 status = vdpasim->status;
459 spin_unlock(&vdpasim->lock);
460
YueHaibing21818ed2020-04-02 10:49:16 +0800461 return status;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800462}
463
464static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
465{
466 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
467
468 spin_lock(&vdpasim->lock);
469 vdpasim->status = status;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800470 spin_unlock(&vdpasim->lock);
471}
472
Xie Yongji06860822021-08-31 18:36:26 +0800473static int vdpasim_reset(struct vdpa_device *vdpa)
474{
475 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
476
477 spin_lock(&vdpasim->lock);
478 vdpasim->status = 0;
479 vdpasim_do_reset(vdpasim);
480 spin_unlock(&vdpasim->lock);
481
482 return 0;
483}
484
Stefano Garzarella442706f2021-03-15 17:34:44 +0100485static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
486{
487 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
488
489 return vdpasim->dev_attr.config_size;
490}
491
Jason Wang2c53d0f2020-03-26 22:01:24 +0800492static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
493 void *buf, unsigned int len)
494{
495 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
496
Stefano Garzarella65b70952020-12-15 15:42:50 +0100497 if (offset + len > vdpasim->dev_attr.config_size)
498 return;
499
500 if (vdpasim->dev_attr.get_config)
501 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
502
503 memcpy(buf, vdpasim->config + offset, len);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800504}
505
506static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
507 const void *buf, unsigned int len)
508{
Stefano Garzarellac124a952020-12-15 15:42:51 +0100509 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
510
511 if (offset + len > vdpasim->dev_attr.config_size)
512 return;
513
514 memcpy(vdpasim->config + offset, buf, len);
515
516 if (vdpasim->dev_attr.set_config)
517 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800518}
519
520static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
521{
522 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
523
524 return vdpasim->generation;
525}
526
Jason Wang70a62fc2020-10-23 17:00:43 +0800527static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
528{
529 struct vdpa_iova_range range = {
530 .first = 0ULL,
531 .last = ULLONG_MAX,
532 };
533
534 return range;
535}
536
Jason Wang2c53d0f2020-03-26 22:01:24 +0800537static int vdpasim_set_map(struct vdpa_device *vdpa,
538 struct vhost_iotlb *iotlb)
539{
540 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
541 struct vhost_iotlb_map *map;
542 u64 start = 0ULL, last = 0ULL - 1;
543 int ret;
544
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800545 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800546 vhost_iotlb_reset(vdpasim->iommu);
547
548 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
549 map = vhost_iotlb_itree_next(map, start, last)) {
550 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
551 map->last, map->addr, map->perm);
552 if (ret)
553 goto err;
554 }
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800555 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800556 return 0;
557
558err:
559 vhost_iotlb_reset(vdpasim->iommu);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800560 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800561 return ret;
562}
563
564static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
Xie Yongjic10fb942021-08-31 18:36:29 +0800565 u64 pa, u32 perm, void *opaque)
Jason Wang2c53d0f2020-03-26 22:01:24 +0800566{
567 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800568 int ret;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800569
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800570 spin_lock(&vdpasim->iommu_lock);
Xie Yongjic10fb942021-08-31 18:36:29 +0800571 ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
572 pa, perm, opaque);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800573 spin_unlock(&vdpasim->iommu_lock);
574
575 return ret;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800576}
577
578static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
579{
580 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
581
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800582 spin_lock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800583 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
Max Gurtovoy0ea9ee42020-07-31 15:38:22 +0800584 spin_unlock(&vdpasim->iommu_lock);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800585
586 return 0;
587}
588
589static void vdpasim_free(struct vdpa_device *vdpa)
590{
591 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
Stefano Garzarellabc433e52021-03-15 17:34:43 +0100592 int i;
Jason Wang2c53d0f2020-03-26 22:01:24 +0800593
594 cancel_work_sync(&vdpasim->work);
Stefano Garzarellabc433e52021-03-15 17:34:43 +0100595
596 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
597 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
598 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
599 }
600
Longpengbb93ce42021-11-24 09:52:15 +0800601 if (vdpa_get_dma_dev(vdpa)) {
602 put_iova_domain(&vdpasim->iova);
603 iova_cache_put();
604 }
605
Stefano Garzarella165be1f2020-12-15 15:42:53 +0100606 kvfree(vdpasim->buffer);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800607 if (vdpasim->iommu)
608 vhost_iotlb_free(vdpasim->iommu);
Max Gurtovoy423248d2020-12-15 15:42:41 +0100609 kfree(vdpasim->vqs);
Stefano Garzarellaf37cbbc2020-12-15 15:42:49 +0100610 kfree(vdpasim->config);
Jason Wang2c53d0f2020-03-26 22:01:24 +0800611}
612
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100613static const struct vdpa_config_ops vdpasim_config_ops = {
Jason Wang2c53d0f2020-03-26 22:01:24 +0800614 .set_vq_address = vdpasim_set_vq_address,
615 .set_vq_num = vdpasim_set_vq_num,
616 .kick_vq = vdpasim_kick_vq,
617 .set_vq_cb = vdpasim_set_vq_cb,
618 .set_vq_ready = vdpasim_set_vq_ready,
619 .get_vq_ready = vdpasim_get_vq_ready,
620 .set_vq_state = vdpasim_set_vq_state,
621 .get_vq_state = vdpasim_get_vq_state,
622 .get_vq_align = vdpasim_get_vq_align,
Eli Cohena64917b2022-01-05 13:46:33 +0200623 .get_device_features = vdpasim_get_device_features,
624 .set_driver_features = vdpasim_set_driver_features,
625 .get_driver_features = vdpasim_get_driver_features,
Jason Wang2c53d0f2020-03-26 22:01:24 +0800626 .set_config_cb = vdpasim_set_config_cb,
627 .get_vq_num_max = vdpasim_get_vq_num_max,
628 .get_device_id = vdpasim_get_device_id,
629 .get_vendor_id = vdpasim_get_vendor_id,
630 .get_status = vdpasim_get_status,
631 .set_status = vdpasim_set_status,
Xie Yongji06860822021-08-31 18:36:26 +0800632 .reset = vdpasim_reset,
Stefano Garzarella442706f2021-03-15 17:34:44 +0100633 .get_config_size = vdpasim_get_config_size,
Jason Wang2c53d0f2020-03-26 22:01:24 +0800634 .get_config = vdpasim_get_config,
635 .set_config = vdpasim_set_config,
636 .get_generation = vdpasim_get_generation,
Jason Wang70a62fc2020-10-23 17:00:43 +0800637 .get_iova_range = vdpasim_get_iova_range,
Jason Wang2c53d0f2020-03-26 22:01:24 +0800638 .dma_map = vdpasim_dma_map,
639 .dma_unmap = vdpasim_dma_unmap,
640 .free = vdpasim_free,
641};
642
Stefano Garzarella36a9c302020-12-15 15:42:43 +0100643static const struct vdpa_config_ops vdpasim_batch_config_ops = {
Jason Wangde91a4d2020-08-04 19:20:41 +0300644 .set_vq_address = vdpasim_set_vq_address,
645 .set_vq_num = vdpasim_set_vq_num,
646 .kick_vq = vdpasim_kick_vq,
647 .set_vq_cb = vdpasim_set_vq_cb,
648 .set_vq_ready = vdpasim_set_vq_ready,
649 .get_vq_ready = vdpasim_get_vq_ready,
650 .set_vq_state = vdpasim_set_vq_state,
651 .get_vq_state = vdpasim_get_vq_state,
652 .get_vq_align = vdpasim_get_vq_align,
Eli Cohena64917b2022-01-05 13:46:33 +0200653 .get_device_features = vdpasim_get_device_features,
654 .set_driver_features = vdpasim_set_driver_features,
655 .get_driver_features = vdpasim_get_driver_features,
Jason Wangde91a4d2020-08-04 19:20:41 +0300656 .set_config_cb = vdpasim_set_config_cb,
657 .get_vq_num_max = vdpasim_get_vq_num_max,
658 .get_device_id = vdpasim_get_device_id,
659 .get_vendor_id = vdpasim_get_vendor_id,
660 .get_status = vdpasim_get_status,
661 .set_status = vdpasim_set_status,
Xie Yongji06860822021-08-31 18:36:26 +0800662 .reset = vdpasim_reset,
Stefano Garzarella442706f2021-03-15 17:34:44 +0100663 .get_config_size = vdpasim_get_config_size,
Jason Wangde91a4d2020-08-04 19:20:41 +0300664 .get_config = vdpasim_get_config,
665 .set_config = vdpasim_set_config,
666 .get_generation = vdpasim_get_generation,
Jason Wang70a62fc2020-10-23 17:00:43 +0800667 .get_iova_range = vdpasim_get_iova_range,
Jason Wangde91a4d2020-08-04 19:20:41 +0300668 .set_map = vdpasim_set_map,
669 .free = vdpasim_free,
670};
671
Jason Wang2c53d0f2020-03-26 22:01:24 +0800672MODULE_VERSION(DRV_VERSION);
673MODULE_LICENSE(DRV_LICENSE);
674MODULE_AUTHOR(DRV_AUTHOR);
675MODULE_DESCRIPTION(DRV_DESC);