blob: 9e90e969af5550254a8d2b68e58b1fc6c3033427 [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +02002/* Copyright (C) 2009 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
4 *
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +02005 * test virtio server in host kernel.
6 */
7
8#include <linux/compat.h>
9#include <linux/eventfd.h>
10#include <linux/vhost.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/workqueue.h>
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020015#include <linux/file.h>
16#include <linux/slab.h>
17
18#include "test.h"
Asias He6ac1afb2013-05-06 16:38:21 +080019#include "vhost.h"
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020020
21/* Max number of bytes transferred before requeueing the job.
22 * Using this limit prevents one virtqueue from starving others. */
23#define VHOST_TEST_WEIGHT 0x80000
24
25enum {
26 VHOST_TEST_VQ = 0,
27 VHOST_TEST_VQ_MAX = 1,
28};
29
30struct vhost_test {
31 struct vhost_dev dev;
32 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
33};
34
35/* Expects to be always run from workqueue - which acts as
36 * read-size critical section for our kind of RCU. */
37static void handle_vq(struct vhost_test *n)
38{
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +030039 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020040 unsigned out, in;
41 int head;
42 size_t len, total_len = 0;
43 void *private;
44
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020045 mutex_lock(&vq->mutex);
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +030046 private = vq->private_data;
47 if (!private) {
48 mutex_unlock(&vq->mutex);
49 return;
50 }
51
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030052 vhost_disable_notify(&n->dev, vq);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020053
54 for (;;) {
Michael S. Tsirkin47283be2014-06-05 15:20:27 +030055 head = vhost_get_vq_desc(vq, vq->iov,
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020056 ARRAY_SIZE(vq->iov),
57 &out, &in,
58 NULL, NULL);
59 /* On error, stop handling until the next kick. */
60 if (unlikely(head < 0))
61 break;
62 /* Nothing new? Wait for eventfd to tell us they refilled. */
63 if (head == vq->num) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030064 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
65 vhost_disable_notify(&n->dev, vq);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +020066 continue;
67 }
68 break;
69 }
70 if (in) {
71 vq_err(vq, "Unexpected descriptor format for TX: "
72 "out %d, int %d\n", out, in);
73 break;
74 }
75 len = iov_length(vq->iov, out);
76 /* Sanity check */
77 if (!len) {
78 vq_err(vq, "Unexpected 0 len for TX\n");
79 break;
80 }
81 vhost_add_used_and_signal(&n->dev, vq, head, 0);
82 total_len += len;
83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
84 vhost_poll_queue(&vq->poll);
85 break;
86 }
87 }
88
89 mutex_unlock(&vq->mutex);
90}
91
92static void handle_vq_kick(struct vhost_work *work)
93{
94 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
95 poll.work);
96 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
97
98 handle_vq(n);
99}
100
101static int vhost_test_open(struct inode *inode, struct file *f)
102{
103 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
104 struct vhost_dev *dev;
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300105 struct vhost_virtqueue **vqs;
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200106
107 if (!n)
108 return -ENOMEM;
Kees Cook6da2ec52018-06-12 13:55:00 -0700109 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300110 if (!vqs) {
111 kfree(n);
112 return -ENOMEM;
113 }
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200114
115 dev = &n->dev;
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300116 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200117 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
Zhi Yong Wu59566b6e2013-12-07 04:13:03 +0800118 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200119
120 f->private_data = n;
121
122 return 0;
123}
124
125static void *vhost_test_stop_vq(struct vhost_test *n,
126 struct vhost_virtqueue *vq)
127{
128 void *private;
129
130 mutex_lock(&vq->mutex);
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300131 private = vq->private_data;
132 vq->private_data = NULL;
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200133 mutex_unlock(&vq->mutex);
134 return private;
135}
136
137static void vhost_test_stop(struct vhost_test *n, void **privatep)
138{
139 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
140}
141
142static void vhost_test_flush_vq(struct vhost_test *n, int index)
143{
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300144 vhost_poll_flush(&n->vqs[index].poll);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200145}
146
147static void vhost_test_flush(struct vhost_test *n)
148{
149 vhost_test_flush_vq(n, VHOST_TEST_VQ);
150}
151
152static int vhost_test_release(struct inode *inode, struct file *f)
153{
154 struct vhost_test *n = f->private_data;
155 void *private;
156
157 vhost_test_stop(n, &private);
158 vhost_test_flush(n);
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800159 vhost_dev_cleanup(&n->dev);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200160 /* We do an extra flush before freeing memory,
161 * since jobs can re-queue themselves. */
162 vhost_test_flush(n);
163 kfree(n);
164 return 0;
165}
166
167static long vhost_test_run(struct vhost_test *n, int test)
168{
169 void *priv, *oldpriv;
170 struct vhost_virtqueue *vq;
171 int r, index;
172
173 if (test < 0 || test > 1)
174 return -EINVAL;
175
176 mutex_lock(&n->dev.mutex);
177 r = vhost_dev_check_owner(&n->dev);
178 if (r)
179 goto err;
180
181 for (index = 0; index < n->dev.nvqs; ++index) {
182 /* Verify that ring has been setup correctly. */
183 if (!vhost_vq_access_ok(&n->vqs[index])) {
184 r = -EFAULT;
185 goto err;
186 }
187 }
188
189 for (index = 0; index < n->dev.nvqs; ++index) {
190 vq = n->vqs + index;
191 mutex_lock(&vq->mutex);
192 priv = test ? n : NULL;
193
194 /* start polling new socket */
Asias He22fa90c2013-05-07 14:54:36 +0800195 oldpriv = vq->private_data;
196 vq->private_data = priv;
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200197
Greg Kurz80f7d032016-02-16 15:59:44 +0100198 r = vhost_vq_init_access(&n->vqs[index]);
Jason Wangf59281d2011-06-21 18:04:27 +0800199
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200200 mutex_unlock(&vq->mutex);
201
Jason Wangf59281d2011-06-21 18:04:27 +0800202 if (r)
203 goto err;
204
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200205 if (oldpriv) {
206 vhost_test_flush_vq(n, index);
207 }
208 }
209
210 mutex_unlock(&n->dev.mutex);
211 return 0;
212
213err:
214 mutex_unlock(&n->dev.mutex);
215 return r;
216}
217
218static long vhost_test_reset_owner(struct vhost_test *n)
219{
220 void *priv = NULL;
221 long err;
Michael S. Tsirkin446374d2016-08-15 04:28:12 +0300222 struct vhost_umem *umem;
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300223
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200224 mutex_lock(&n->dev.mutex);
225 err = vhost_dev_check_owner(&n->dev);
226 if (err)
227 goto done;
Michael S. Tsirkin446374d2016-08-15 04:28:12 +0300228 umem = vhost_dev_reset_owner_prepare();
229 if (!umem) {
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300230 err = -ENOMEM;
231 goto done;
232 }
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200233 vhost_test_stop(n, &priv);
234 vhost_test_flush(n);
Michael S. Tsirkin446374d2016-08-15 04:28:12 +0300235 vhost_dev_reset_owner(&n->dev, umem);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200236done:
237 mutex_unlock(&n->dev.mutex);
238 return err;
239}
240
241static int vhost_test_set_features(struct vhost_test *n, u64 features)
242{
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300243 struct vhost_virtqueue *vq;
244
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200245 mutex_lock(&n->dev.mutex);
246 if ((features & (1 << VHOST_F_LOG_ALL)) &&
247 !vhost_log_access_ok(&n->dev)) {
248 mutex_unlock(&n->dev.mutex);
249 return -EFAULT;
250 }
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300251 vq = &n->vqs[VHOST_TEST_VQ];
252 mutex_lock(&vq->mutex);
253 vq->acked_features = features;
254 mutex_unlock(&vq->mutex);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200255 mutex_unlock(&n->dev.mutex);
256 return 0;
257}
258
259static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
260 unsigned long arg)
261{
262 struct vhost_test *n = f->private_data;
263 void __user *argp = (void __user *)arg;
264 u64 __user *featurep = argp;
265 int test;
266 u64 features;
267 int r;
268 switch (ioctl) {
269 case VHOST_TEST_RUN:
270 if (copy_from_user(&test, argp, sizeof test))
271 return -EFAULT;
272 return vhost_test_run(n, test);
273 case VHOST_GET_FEATURES:
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300274 features = VHOST_FEATURES;
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200275 if (copy_to_user(featurep, &features, sizeof features))
276 return -EFAULT;
277 return 0;
278 case VHOST_SET_FEATURES:
Michael S. Tsirkin4e9fa502015-09-09 22:24:56 +0300279 printk(KERN_ERR "1\n");
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200280 if (copy_from_user(&features, featurep, sizeof features))
281 return -EFAULT;
Michael S. Tsirkin4e9fa502015-09-09 22:24:56 +0300282 printk(KERN_ERR "2\n");
Michael S. Tsirkin09a34c82013-07-07 17:12:36 +0300283 if (features & ~VHOST_FEATURES)
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200284 return -EOPNOTSUPP;
Michael S. Tsirkin4e9fa502015-09-09 22:24:56 +0300285 printk(KERN_ERR "3\n");
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200286 return vhost_test_set_features(n, features);
287 case VHOST_RESET_OWNER:
288 return vhost_test_reset_owner(n);
289 default:
290 mutex_lock(&n->dev.mutex);
Michael S. Tsirkin73640c92013-03-18 13:22:18 +1030291 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
292 if (r == -ENOIOCTLCMD)
293 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200294 vhost_test_flush(n);
295 mutex_unlock(&n->dev.mutex);
296 return r;
297 }
298}
299
300#ifdef CONFIG_COMPAT
301static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
302 unsigned long arg)
303{
304 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
305}
306#endif
307
308static const struct file_operations vhost_test_fops = {
309 .owner = THIS_MODULE,
310 .release = vhost_test_release,
311 .unlocked_ioctl = vhost_test_ioctl,
312#ifdef CONFIG_COMPAT
313 .compat_ioctl = vhost_test_compat_ioctl,
314#endif
315 .open = vhost_test_open,
316 .llseek = noop_llseek,
317};
318
319static struct miscdevice vhost_test_misc = {
320 MISC_DYNAMIC_MINOR,
321 "vhost-test",
322 &vhost_test_fops,
323};
PrasannaKumar Muralidharanca75d602016-08-25 22:30:49 +0530324module_misc_device(vhost_test_misc);
Michael S. Tsirkin71ccc212010-11-29 19:09:01 +0200325
326MODULE_VERSION("0.0.1");
327MODULE_LICENSE("GPL v2");
328MODULE_AUTHOR("Michael S. Tsirkin");
329MODULE_DESCRIPTION("Host kernel side for virtio simulator");