blob: 3a5f81a66d34f0b5d19d09d16f9e07451e7958ff [file] [log] [blame]
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001/* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
3 *
4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 *
6 * Inspiration, some code, and most witty comments come from
Rob Landley61516582011-05-06 09:27:36 -07007 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00008 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 *
11 * Generic code for virtio server in host kernel.
12 */
13
14#include <linux/eventfd.h>
15#include <linux/vhost.h>
Asias He35596b22013-08-19 09:23:19 +080016#include <linux/uio.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000017#include <linux/mm.h>
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +020018#include <linux/mmu_context.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000019#include <linux/miscdevice.h>
20#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000021#include <linux/poll.h>
22#include <linux/file.h>
23#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Igor Mammedov4de72552015-07-01 11:07:09 +020025#include <linux/vmalloc.h>
Tejun Heoc23f34452010-06-02 20:40:00 +020026#include <linux/kthread.h>
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +030027#include <linux/cgroup.h>
Asias He6ac1afb2013-05-06 16:38:21 +080028#include <linux/module.h>
Igor Mammedovbcfeaca2015-06-16 18:33:35 +020029#include <linux/sort.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010030#include <linux/sched/mm.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010031#include <linux/sched/signal.h>
Jason Wanga9709d62016-06-23 02:04:31 -040032#include <linux/interval_tree_generic.h>
Jason Wangff002262018-10-30 14:10:49 +080033#include <linux/nospec.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000034
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000035#include "vhost.h"
36
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020037static ushort max_mem_regions = 64;
38module_param(max_mem_regions, ushort, 0444);
39MODULE_PARM_DESC(max_mem_regions,
40 "Maximum number of memory regions in memory map. (default: 64)");
Jason Wang6b1e6cc2016-06-23 02:04:32 -040041static int max_iotlb_entries = 2048;
42module_param(max_iotlb_entries, int, 0444);
43MODULE_PARM_DESC(max_iotlb_entries,
44 "Maximum number of iotlb entries. (default: 2048)");
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020045
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000046enum {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000047 VHOST_MEMORY_F_LOG = 0x1,
48};
49
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +030050#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
51#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030052
Jason Wanga9709d62016-06-23 02:04:31 -040053INTERVAL_TREE_DEFINE(struct vhost_umem_node,
54 rb, __u64, __subtree_last,
Michael S. Tsirkin2f952c02016-12-06 05:57:54 +020055 START, LAST, static inline, vhost_umem_interval_tree);
Jason Wanga9709d62016-06-23 02:04:31 -040056
Greg Kurz2751c982015-04-24 14:27:24 +020057#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
Greg Kurzc5072032016-02-16 15:59:34 +010058static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +020059{
60 vq->user_be = !virtio_legacy_is_little_endian();
61}
62
Greg Kurzc5072032016-02-16 15:59:34 +010063static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
64{
65 vq->user_be = true;
66}
67
68static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
69{
70 vq->user_be = false;
71}
72
Greg Kurz2751c982015-04-24 14:27:24 +020073static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
74{
75 struct vhost_vring_state s;
76
77 if (vq->private_data)
78 return -EBUSY;
79
80 if (copy_from_user(&s, argp, sizeof(s)))
81 return -EFAULT;
82
83 if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
84 s.num != VHOST_VRING_BIG_ENDIAN)
85 return -EINVAL;
86
Greg Kurzc5072032016-02-16 15:59:34 +010087 if (s.num == VHOST_VRING_BIG_ENDIAN)
88 vhost_enable_cross_endian_big(vq);
89 else
90 vhost_enable_cross_endian_little(vq);
Greg Kurz2751c982015-04-24 14:27:24 +020091
92 return 0;
93}
94
95static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
96 int __user *argp)
97{
98 struct vhost_vring_state s = {
99 .index = idx,
100 .num = vq->user_be
101 };
102
103 if (copy_to_user(argp, &s, sizeof(s)))
104 return -EFAULT;
105
106 return 0;
107}
108
109static void vhost_init_is_le(struct vhost_virtqueue *vq)
110{
111 /* Note for legacy virtio: user_be is initialized at reset time
112 * according to the host endianness. If userspace does not set an
113 * explicit endianness, the default behavior is native endian, as
114 * expected by legacy virtio.
115 */
116 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
117}
118#else
Greg Kurzc5072032016-02-16 15:59:34 +0100119static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +0200120{
121}
122
123static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
124{
125 return -ENOIOCTLCMD;
126}
127
128static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
129 int __user *argp)
130{
131 return -ENOIOCTLCMD;
132}
133
134static void vhost_init_is_le(struct vhost_virtqueue *vq)
135{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100136 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
137 || virtio_legacy_is_little_endian();
Greg Kurz2751c982015-04-24 14:27:24 +0200138}
139#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
140
Greg Kurzc5072032016-02-16 15:59:34 +0100141static void vhost_reset_is_le(struct vhost_virtqueue *vq)
142{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100143 vhost_init_is_le(vq);
Greg Kurzc5072032016-02-16 15:59:34 +0100144}
145
Jason Wang7235acd2016-04-25 22:14:32 -0400146struct vhost_flush_struct {
147 struct vhost_work work;
148 struct completion wait_event;
149};
150
151static void vhost_flush_work(struct vhost_work *work)
152{
153 struct vhost_flush_struct *s;
154
155 s = container_of(work, struct vhost_flush_struct, work);
156 complete(&s->wait_event);
157}
158
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000159static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
160 poll_table *pt)
161{
162 struct vhost_poll *poll;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000163
Krishna Kumard47effe2011-03-01 17:06:37 +0530164 poll = container_of(pt, struct vhost_poll, table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000165 poll->wqh = wqh;
166 add_wait_queue(wqh, &poll->wait);
167}
168
Ingo Molnarac6424b2017-06-20 12:06:13 +0200169static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000170 void *key)
171{
Tejun Heoc23f34452010-06-02 20:40:00 +0200172 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
173
Al Viro3ad6f932017-07-03 20:14:56 -0400174 if (!(key_to_poll(key) & poll->mask))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000175 return 0;
176
Tejun Heoc23f34452010-06-02 20:40:00 +0200177 vhost_poll_queue(poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000178 return 0;
179}
180
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000181void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000182{
Jason Wang04b96e52016-04-25 22:14:33 -0400183 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200184 work->fn = fn;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000185}
Asias He6ac1afb2013-05-06 16:38:21 +0800186EXPORT_SYMBOL_GPL(vhost_work_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000187
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300188/* Init poll structure */
189void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
Al Viro58e3b602017-07-03 23:50:40 -0400190 __poll_t mask, struct vhost_dev *dev)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300191{
192 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193 init_poll_funcptr(&poll->table, vhost_poll_func);
194 poll->mask = mask;
195 poll->dev = dev;
Jason Wang2b8b3282013-01-28 01:05:18 +0000196 poll->wqh = NULL;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300197
198 vhost_work_init(&poll->work, fn);
199}
Asias He6ac1afb2013-05-06 16:38:21 +0800200EXPORT_SYMBOL_GPL(vhost_poll_init);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300201
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000202/* Start polling a file. We add ourselves to file's wait queue. The caller must
203 * keep a reference to a file until after vhost_poll_stop is called. */
Jason Wang2b8b3282013-01-28 01:05:18 +0000204int vhost_poll_start(struct vhost_poll *poll, struct file *file)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000205{
Al Viroe6c8adc2017-07-03 22:25:56 -0400206 __poll_t mask;
Jason Wang2b8b3282013-01-28 01:05:18 +0000207 int ret = 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530208
Jason Wang70181d512013-04-10 20:50:48 +0000209 if (poll->wqh)
210 return 0;
211
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800212 mask = vfs_poll(file, &poll->table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000213 if (mask)
Al Viro3ad6f932017-07-03 20:14:56 -0400214 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800215 if (mask & EPOLLERR) {
Jason Wangdc6455a2018-03-27 20:50:52 +0800216 vhost_poll_stop(poll);
Jason Wang2b8b3282013-01-28 01:05:18 +0000217 ret = -EINVAL;
218 }
219
220 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000221}
Asias He6ac1afb2013-05-06 16:38:21 +0800222EXPORT_SYMBOL_GPL(vhost_poll_start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000223
224/* Stop polling a file. After this function returns, it becomes safe to drop the
225 * file reference. You must also flush afterwards. */
226void vhost_poll_stop(struct vhost_poll *poll)
227{
Jason Wang2b8b3282013-01-28 01:05:18 +0000228 if (poll->wqh) {
229 remove_wait_queue(poll->wqh, &poll->wait);
230 poll->wqh = NULL;
231 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000232}
Asias He6ac1afb2013-05-06 16:38:21 +0800233EXPORT_SYMBOL_GPL(vhost_poll_stop);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000234
Asias He6ac1afb2013-05-06 16:38:21 +0800235void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000236{
Jason Wang7235acd2016-04-25 22:14:32 -0400237 struct vhost_flush_struct flush;
Tejun Heoc23f34452010-06-02 20:40:00 +0200238
Jason Wang7235acd2016-04-25 22:14:32 -0400239 if (dev->worker) {
240 init_completion(&flush.wait_event);
241 vhost_work_init(&flush.work, vhost_flush_work);
242
243 vhost_work_queue(dev, &flush.work);
244 wait_for_completion(&flush.wait_event);
245 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000246}
Asias He6ac1afb2013-05-06 16:38:21 +0800247EXPORT_SYMBOL_GPL(vhost_work_flush);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000248
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300249/* Flush any work that has been scheduled. When calling this, don't hold any
250 * locks that are also used by the callback. */
251void vhost_poll_flush(struct vhost_poll *poll)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000252{
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300253 vhost_work_flush(poll->dev, &poll->work);
254}
Asias He6ac1afb2013-05-06 16:38:21 +0800255EXPORT_SYMBOL_GPL(vhost_poll_flush);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300256
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000257void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300258{
Jason Wang04b96e52016-04-25 22:14:33 -0400259 if (!dev->worker)
260 return;
Tejun Heoc23f34452010-06-02 20:40:00 +0200261
Jason Wang04b96e52016-04-25 22:14:33 -0400262 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
263 /* We can only add the work to the list after we're
264 * sure it was not in the list.
Peng Tao635abf02016-12-07 17:52:19 +0800265 * test_and_set_bit() implies a memory barrier.
Jason Wang04b96e52016-04-25 22:14:33 -0400266 */
Jason Wang04b96e52016-04-25 22:14:33 -0400267 llist_add(&work->node, &dev->work_list);
Tejun Heoc23f34452010-06-02 20:40:00 +0200268 wake_up_process(dev->worker);
269 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000270}
Asias He6ac1afb2013-05-06 16:38:21 +0800271EXPORT_SYMBOL_GPL(vhost_work_queue);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000272
Jason Wang526d3e72016-03-04 06:24:51 -0500273/* A lockless hint for busy polling code to exit the loop */
274bool vhost_has_work(struct vhost_dev *dev)
275{
Jason Wang04b96e52016-04-25 22:14:33 -0400276 return !llist_empty(&dev->work_list);
Jason Wang526d3e72016-03-04 06:24:51 -0500277}
278EXPORT_SYMBOL_GPL(vhost_has_work);
279
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300280void vhost_poll_queue(struct vhost_poll *poll)
281{
282 vhost_work_queue(poll->dev, &poll->work);
283}
Asias He6ac1afb2013-05-06 16:38:21 +0800284EXPORT_SYMBOL_GPL(vhost_poll_queue);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300285
Jason Wangf8894912017-02-28 17:56:02 +0800286static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
287{
288 int j;
289
290 for (j = 0; j < VHOST_NUM_ADDRS; j++)
291 vq->meta_iotlb[j] = NULL;
292}
293
294static void vhost_vq_meta_reset(struct vhost_dev *d)
295{
296 int i;
297
Tonghao Zhang78139c92018-09-25 05:36:49 -0700298 for (i = 0; i < d->nvqs; ++i) {
299 mutex_lock(&d->vqs[i]->mutex);
Jason Wangf8894912017-02-28 17:56:02 +0800300 __vhost_vq_meta_reset(d->vqs[i]);
Tonghao Zhang78139c92018-09-25 05:36:49 -0700301 mutex_unlock(&d->vqs[i]->mutex);
302 }
Jason Wangf8894912017-02-28 17:56:02 +0800303}
304
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000305static void vhost_vq_reset(struct vhost_dev *dev,
306 struct vhost_virtqueue *vq)
307{
308 vq->num = 1;
309 vq->desc = NULL;
310 vq->avail = NULL;
311 vq->used = NULL;
312 vq->last_avail_idx = 0;
313 vq->avail_idx = 0;
314 vq->last_used_idx = 0;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300315 vq->signalled_used = 0;
316 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000317 vq->used_flags = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000318 vq->log_used = false;
319 vq->log_addr = -1ull;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000320 vq->private_data = NULL;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300321 vq->acked_features = 0;
Jason Wang429711a2018-08-06 11:17:47 +0800322 vq->acked_backend_features = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000323 vq->log_base = NULL;
324 vq->error_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000325 vq->kick = NULL;
326 vq->call_ctx = NULL;
Michael S. Tsirkin73a99f02010-02-23 11:23:45 +0200327 vq->log_ctx = NULL;
Greg Kurzc5072032016-02-16 15:59:34 +0100328 vhost_reset_is_le(vq);
329 vhost_disable_cross_endian(vq);
Jason Wang03088132016-03-04 06:24:53 -0500330 vq->busyloop_timeout = 0;
Jason Wanga9709d62016-06-23 02:04:31 -0400331 vq->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400332 vq->iotlb = NULL;
Jason Wangf8894912017-02-28 17:56:02 +0800333 __vhost_vq_meta_reset(vq);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000334}
335
Tejun Heoc23f34452010-06-02 20:40:00 +0200336static int vhost_worker(void *data)
337{
338 struct vhost_dev *dev = data;
Jason Wang04b96e52016-04-25 22:14:33 -0400339 struct vhost_work *work, *work_next;
340 struct llist_node *node;
Jens Freimannd7ffde32012-06-26 00:59:58 +0000341 mm_segment_t oldfs = get_fs();
Tejun Heoc23f34452010-06-02 20:40:00 +0200342
Jens Freimannd7ffde32012-06-26 00:59:58 +0000343 set_fs(USER_DS);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200344 use_mm(dev->mm);
345
Tejun Heoc23f34452010-06-02 20:40:00 +0200346 for (;;) {
347 /* mb paired w/ kthread_stop */
348 set_current_state(TASK_INTERRUPTIBLE);
349
Tejun Heoc23f34452010-06-02 20:40:00 +0200350 if (kthread_should_stop()) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200351 __set_current_state(TASK_RUNNING);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200352 break;
Tejun Heoc23f34452010-06-02 20:40:00 +0200353 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200354
Jason Wang04b96e52016-04-25 22:14:33 -0400355 node = llist_del_all(&dev->work_list);
356 if (!node)
357 schedule();
358
359 node = llist_reverse_order(node);
360 /* make sure flag is seen after deletion */
361 smp_wmb();
362 llist_for_each_entry_safe(work, work_next, node, node) {
363 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200364 __set_current_state(TASK_RUNNING);
365 work->fn(work);
Nadav Har'Eld550dda2012-02-27 15:07:29 +0200366 if (need_resched())
367 schedule();
Jason Wang04b96e52016-04-25 22:14:33 -0400368 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200369 }
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200370 unuse_mm(dev->mm);
Jens Freimannd7ffde32012-06-26 00:59:58 +0000371 set_fs(oldfs);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200372 return 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200373}
374
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000375static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
376{
377 kfree(vq->indirect);
378 vq->indirect = NULL;
379 kfree(vq->log);
380 vq->log = NULL;
381 kfree(vq->heads);
382 vq->heads = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000383}
384
Jason Wange0e9b402010-09-14 23:53:05 +0800385/* Helper to allocate iovec buffers for all vqs. */
386static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
387{
Asias He6d5e6aa2013-05-06 16:38:23 +0800388 struct vhost_virtqueue *vq;
Jason Wange0e9b402010-09-14 23:53:05 +0800389 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530390
Jason Wange0e9b402010-09-14 23:53:05 +0800391 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800392 vq = dev->vqs[i];
Kees Cook6da2ec52018-06-12 13:55:00 -0700393 vq->indirect = kmalloc_array(UIO_MAXIOV,
394 sizeof(*vq->indirect),
395 GFP_KERNEL);
396 vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
397 GFP_KERNEL);
398 vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
399 GFP_KERNEL);
Asias He6d5e6aa2013-05-06 16:38:23 +0800400 if (!vq->indirect || !vq->log || !vq->heads)
Jason Wange0e9b402010-09-14 23:53:05 +0800401 goto err_nomem;
402 }
403 return 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530404
Jason Wange0e9b402010-09-14 23:53:05 +0800405err_nomem:
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000406 for (; i >= 0; --i)
Asias He3ab2e422013-04-27 11:16:48 +0800407 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800408 return -ENOMEM;
409}
410
411static void vhost_dev_free_iovecs(struct vhost_dev *dev)
412{
413 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530414
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000415 for (i = 0; i < dev->nvqs; ++i)
Asias He3ab2e422013-04-27 11:16:48 +0800416 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800417}
418
Zhi Yong Wu59566b6e2013-12-07 04:13:03 +0800419void vhost_dev_init(struct vhost_dev *dev,
Asias He3ab2e422013-04-27 11:16:48 +0800420 struct vhost_virtqueue **vqs, int nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000421{
Asias He6d5e6aa2013-05-06 16:38:23 +0800422 struct vhost_virtqueue *vq;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000423 int i;
Tejun Heoc23f34452010-06-02 20:40:00 +0200424
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000425 dev->vqs = vqs;
426 dev->nvqs = nvqs;
427 mutex_init(&dev->mutex);
428 dev->log_ctx = NULL;
Jason Wanga9709d62016-06-23 02:04:31 -0400429 dev->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400430 dev->iotlb = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000431 dev->mm = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200432 dev->worker = NULL;
Jason Wang04b96e52016-04-25 22:14:33 -0400433 init_llist_head(&dev->work_list);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400434 init_waitqueue_head(&dev->wait);
435 INIT_LIST_HEAD(&dev->read_list);
436 INIT_LIST_HEAD(&dev->pending_list);
437 spin_lock_init(&dev->iotlb_lock);
Jason Wang04b96e52016-04-25 22:14:33 -0400438
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000439
440 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800441 vq = dev->vqs[i];
442 vq->log = NULL;
443 vq->indirect = NULL;
444 vq->heads = NULL;
445 vq->dev = dev;
446 mutex_init(&vq->mutex);
447 vhost_vq_reset(dev, vq);
448 if (vq->handle_kick)
449 vhost_poll_init(&vq->poll, vq->handle_kick,
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800450 EPOLLIN, dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000451 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000452}
Asias He6ac1afb2013-05-06 16:38:21 +0800453EXPORT_SYMBOL_GPL(vhost_dev_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000454
455/* Caller should have device mutex */
456long vhost_dev_check_owner(struct vhost_dev *dev)
457{
458 /* Are you the owner? If not, I don't think you mean to do that */
459 return dev->mm == current->mm ? 0 : -EPERM;
460}
Asias He6ac1afb2013-05-06 16:38:21 +0800461EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000462
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300463struct vhost_attach_cgroups_struct {
Krishna Kumard47effe2011-03-01 17:06:37 +0530464 struct vhost_work work;
465 struct task_struct *owner;
466 int ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300467};
468
469static void vhost_attach_cgroups_work(struct vhost_work *work)
470{
Krishna Kumard47effe2011-03-01 17:06:37 +0530471 struct vhost_attach_cgroups_struct *s;
472
473 s = container_of(work, struct vhost_attach_cgroups_struct, work);
474 s->ret = cgroup_attach_task_all(s->owner, current);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300475}
476
477static int vhost_attach_cgroups(struct vhost_dev *dev)
478{
Krishna Kumard47effe2011-03-01 17:06:37 +0530479 struct vhost_attach_cgroups_struct attach;
480
481 attach.owner = current;
482 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
483 vhost_work_queue(dev, &attach.work);
484 vhost_work_flush(dev, &attach.work);
485 return attach.ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300486}
487
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000488/* Caller should have device mutex */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300489bool vhost_dev_has_owner(struct vhost_dev *dev)
490{
491 return dev->mm;
492}
Asias He6ac1afb2013-05-06 16:38:21 +0800493EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300494
495/* Caller should have device mutex */
Asias He54db63c2013-05-06 11:15:59 +0800496long vhost_dev_set_owner(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000497{
Tejun Heoc23f34452010-06-02 20:40:00 +0200498 struct task_struct *worker;
499 int err;
Krishna Kumard47effe2011-03-01 17:06:37 +0530500
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000501 /* Is there an owner already? */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300502 if (vhost_dev_has_owner(dev)) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200503 err = -EBUSY;
504 goto err_mm;
505 }
Krishna Kumard47effe2011-03-01 17:06:37 +0530506
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000507 /* No owner, become one */
508 dev->mm = get_task_mm(current);
Tejun Heoc23f34452010-06-02 20:40:00 +0200509 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
510 if (IS_ERR(worker)) {
511 err = PTR_ERR(worker);
512 goto err_worker;
513 }
514
515 dev->worker = worker;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300516 wake_up_process(worker); /* avoid contributing to loadavg */
517
518 err = vhost_attach_cgroups(dev);
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300519 if (err)
520 goto err_cgroup;
Tejun Heoc23f34452010-06-02 20:40:00 +0200521
Jason Wange0e9b402010-09-14 23:53:05 +0800522 err = vhost_dev_alloc_iovecs(dev);
523 if (err)
524 goto err_cgroup;
525
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000526 return 0;
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300527err_cgroup:
528 kthread_stop(worker);
Michael S. Tsirkin615cc222010-09-02 14:16:36 +0300529 dev->worker = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200530err_worker:
531 if (dev->mm)
532 mmput(dev->mm);
533 dev->mm = NULL;
534err_mm:
535 return err;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000536}
Asias He6ac1afb2013-05-06 16:38:21 +0800537EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000538
Jason Wanga9709d62016-06-23 02:04:31 -0400539struct vhost_umem *vhost_dev_reset_owner_prepare(void)
540{
Michal Hocko6c5ab652017-05-08 15:57:15 -0700541 return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300542}
Asias He6ac1afb2013-05-06 16:38:21 +0800543EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000544
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300545/* Caller should have device mutex */
Jason Wanga9709d62016-06-23 02:04:31 -0400546void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300547{
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300548 int i;
549
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800550 vhost_dev_cleanup(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000551
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300552 /* Restore memory to default empty mapping. */
Jason Wanga9709d62016-06-23 02:04:31 -0400553 INIT_LIST_HEAD(&umem->umem_list);
554 dev->umem = umem;
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300555 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
556 * VQs aren't running.
557 */
558 for (i = 0; i < dev->nvqs; ++i)
Jason Wanga9709d62016-06-23 02:04:31 -0400559 dev->vqs[i]->umem = umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000560}
Asias He6ac1afb2013-05-06 16:38:21 +0800561EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000562
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000563void vhost_dev_stop(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000564{
565 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530566
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000567 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800568 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
569 vhost_poll_stop(&dev->vqs[i]->poll);
570 vhost_poll_flush(&dev->vqs[i]->poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000571 }
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000572 }
573}
Asias He6ac1afb2013-05-06 16:38:21 +0800574EXPORT_SYMBOL_GPL(vhost_dev_stop);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000575
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400576static void vhost_umem_free(struct vhost_umem *umem,
577 struct vhost_umem_node *node)
578{
579 vhost_umem_interval_tree_remove(node, &umem->umem_tree);
580 list_del(&node->link);
581 kfree(node);
582 umem->numem--;
583}
584
Jason Wanga9709d62016-06-23 02:04:31 -0400585static void vhost_umem_clean(struct vhost_umem *umem)
586{
587 struct vhost_umem_node *node, *tmp;
588
589 if (!umem)
590 return;
591
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400592 list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
593 vhost_umem_free(umem, node);
594
Jason Wanga9709d62016-06-23 02:04:31 -0400595 kvfree(umem);
596}
597
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400598static void vhost_clear_msg(struct vhost_dev *dev)
599{
600 struct vhost_msg_node *node, *n;
601
602 spin_lock(&dev->iotlb_lock);
603
604 list_for_each_entry_safe(node, n, &dev->read_list, node) {
605 list_del(&node->node);
606 kfree(node);
607 }
608
609 list_for_each_entry_safe(node, n, &dev->pending_list, node) {
610 list_del(&node->node);
611 kfree(node);
612 }
613
614 spin_unlock(&dev->iotlb_lock);
615}
616
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800617void vhost_dev_cleanup(struct vhost_dev *dev)
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000618{
619 int i;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000620
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000621 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800622 if (dev->vqs[i]->error_ctx)
623 eventfd_ctx_put(dev->vqs[i]->error_ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800624 if (dev->vqs[i]->kick)
625 fput(dev->vqs[i]->kick);
626 if (dev->vqs[i]->call_ctx)
627 eventfd_ctx_put(dev->vqs[i]->call_ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800628 vhost_vq_reset(dev, dev->vqs[i]);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000629 }
Jason Wange0e9b402010-09-14 23:53:05 +0800630 vhost_dev_free_iovecs(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000631 if (dev->log_ctx)
632 eventfd_ctx_put(dev->log_ctx);
633 dev->log_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000634 /* No one will access memory at this point */
Jason Wanga9709d62016-06-23 02:04:31 -0400635 vhost_umem_clean(dev->umem);
636 dev->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400637 vhost_umem_clean(dev->iotlb);
638 dev->iotlb = NULL;
639 vhost_clear_msg(dev);
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800640 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang04b96e52016-04-25 22:14:33 -0400641 WARN_ON(!llist_empty(&dev->work_list));
Eric Dumazet78b620c2010-08-31 02:05:57 +0000642 if (dev->worker) {
643 kthread_stop(dev->worker);
644 dev->worker = NULL;
645 }
Michael S. Tsirkin533a19b2010-10-06 15:34:38 +0200646 if (dev->mm)
647 mmput(dev->mm);
648 dev->mm = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000649}
Asias He6ac1afb2013-05-06 16:38:21 +0800650EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000651
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800652static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000653{
654 u64 a = addr / VHOST_PAGE_SIZE / 8;
Krishna Kumard47effe2011-03-01 17:06:37 +0530655
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000656 /* Make sure 64 bit math will not overflow. */
657 if (a > ULONG_MAX - (unsigned long)log_base ||
658 a + (unsigned long)log_base > ULONG_MAX)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800659 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000660
661 return access_ok(VERIFY_WRITE, log_base + a,
662 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
663}
664
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300665static bool vhost_overflow(u64 uaddr, u64 size)
666{
667 /* Make sure 64 bit math will not overflow. */
668 return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
669}
670
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000671/* Caller should have vq mutex and device mutex. */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800672static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
673 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000674{
Jason Wanga9709d62016-06-23 02:04:31 -0400675 struct vhost_umem_node *node;
Jeff Dike179b2842010-04-07 09:59:10 -0400676
Jason Wanga9709d62016-06-23 02:04:31 -0400677 if (!umem)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800678 return false;
Jeff Dike179b2842010-04-07 09:59:10 -0400679
Jason Wanga9709d62016-06-23 02:04:31 -0400680 list_for_each_entry(node, &umem->umem_list, link) {
681 unsigned long a = node->userspace_addr;
682
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300683 if (vhost_overflow(node->userspace_addr, node->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800684 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300685
686
687 if (!access_ok(VERIFY_WRITE, (void __user *)a,
Jason Wanga9709d62016-06-23 02:04:31 -0400688 node->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800689 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000690 else if (log_all && !log_access_ok(log_base,
Jason Wanga9709d62016-06-23 02:04:31 -0400691 node->start,
692 node->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800693 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000694 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800695 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000696}
697
Jason Wangf8894912017-02-28 17:56:02 +0800698static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
699 u64 addr, unsigned int size,
700 int type)
701{
702 const struct vhost_umem_node *node = vq->meta_iotlb[type];
703
704 if (!node)
705 return NULL;
706
707 return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
708}
709
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000710/* Can we switch to this memory table? */
711/* Caller should have device mutex but not vq mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800712static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
713 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000714{
715 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530716
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000717 for (i = 0; i < d->nvqs; ++i) {
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800718 bool ok;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300719 bool log;
720
Asias He3ab2e422013-04-27 11:16:48 +0800721 mutex_lock(&d->vqs[i]->mutex);
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300722 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000723 /* If ring is inactive, will check when it's enabled. */
Asias He3ab2e422013-04-27 11:16:48 +0800724 if (d->vqs[i]->private_data)
Jason Wanga9709d62016-06-23 02:04:31 -0400725 ok = vq_memory_access_ok(d->vqs[i]->log_base,
726 umem, log);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000727 else
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800728 ok = true;
Asias He3ab2e422013-04-27 11:16:48 +0800729 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000730 if (!ok)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800731 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000732 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800733 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000734}
735
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400736static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
737 struct iovec iov[], int iov_size, int access);
Jason Wangbfe2bc52016-06-23 02:04:30 -0400738
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200739static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
Jason Wangbfe2bc52016-06-23 02:04:30 -0400740 const void *from, unsigned size)
741{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400742 int ret;
Jason Wangbfe2bc52016-06-23 02:04:30 -0400743
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400744 if (!vq->iotlb)
745 return __copy_to_user(to, from, size);
746 else {
747 /* This function should be called after iotlb
748 * prefetch, which means we're sure that all vq
749 * could be access through iotlb. So -EAGAIN should
750 * not happen in this case.
751 */
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400752 struct iov_iter t;
Jason Wangf8894912017-02-28 17:56:02 +0800753 void __user *uaddr = vhost_vq_meta_fetch(vq,
754 (u64)(uintptr_t)to, size,
Eric Auger7ced6c92018-04-11 15:30:38 +0200755 VHOST_ADDR_USED);
Jason Wangf8894912017-02-28 17:56:02 +0800756
757 if (uaddr)
758 return __copy_to_user(uaddr, from, size);
759
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400760 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
761 ARRAY_SIZE(vq->iotlb_iov),
762 VHOST_ACCESS_WO);
763 if (ret < 0)
764 goto out;
765 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
766 ret = copy_to_iter(from, size, &t);
767 if (ret == size)
768 ret = 0;
769 }
770out:
771 return ret;
772}
Jason Wangbfe2bc52016-06-23 02:04:30 -0400773
774static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200775 void __user *from, unsigned size)
Jason Wangbfe2bc52016-06-23 02:04:30 -0400776{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400777 int ret;
778
779 if (!vq->iotlb)
780 return __copy_from_user(to, from, size);
781 else {
782 /* This function should be called after iotlb
783 * prefetch, which means we're sure that vq
784 * could be access through iotlb. So -EAGAIN should
785 * not happen in this case.
786 */
Jason Wangf8894912017-02-28 17:56:02 +0800787 void __user *uaddr = vhost_vq_meta_fetch(vq,
788 (u64)(uintptr_t)from, size,
789 VHOST_ADDR_DESC);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400790 struct iov_iter f;
Jason Wangf8894912017-02-28 17:56:02 +0800791
792 if (uaddr)
793 return __copy_from_user(to, uaddr, size);
794
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400795 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
796 ARRAY_SIZE(vq->iotlb_iov),
797 VHOST_ACCESS_RO);
798 if (ret < 0) {
799 vq_err(vq, "IOTLB translation failure: uaddr "
800 "%p size 0x%llx\n", from,
801 (unsigned long long) size);
802 goto out;
803 }
804 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
805 ret = copy_from_iter(to, size, &f);
806 if (ret == size)
807 ret = 0;
808 }
809
810out:
811 return ret;
812}
813
Jason Wangf8894912017-02-28 17:56:02 +0800814static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
815 void __user *addr, unsigned int size,
816 int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400817{
818 int ret;
819
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400820 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
821 ARRAY_SIZE(vq->iotlb_iov),
822 VHOST_ACCESS_RO);
823 if (ret < 0) {
824 vq_err(vq, "IOTLB translation failure: uaddr "
825 "%p size 0x%llx\n", addr,
826 (unsigned long long) size);
827 return NULL;
828 }
829
830 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
831 vq_err(vq, "Non atomic userspace memory access: uaddr "
832 "%p size 0x%llx\n", addr,
833 (unsigned long long) size);
834 return NULL;
835 }
836
837 return vq->iotlb_iov[0].iov_base;
838}
839
Jason Wangf8894912017-02-28 17:56:02 +0800840/* This function should be called after iotlb
841 * prefetch, which means we're sure that vq
842 * could be access through iotlb. So -EAGAIN should
843 * not happen in this case.
844 */
845static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
846 void *addr, unsigned int size,
847 int type)
848{
849 void __user *uaddr = vhost_vq_meta_fetch(vq,
850 (u64)(uintptr_t)addr, size, type);
851 if (uaddr)
852 return uaddr;
853
854 return __vhost_get_user_slow(vq, addr, size, type);
855}
856
857#define vhost_put_user(vq, x, ptr) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400858({ \
859 int ret = -EFAULT; \
860 if (!vq->iotlb) { \
861 ret = __put_user(x, ptr); \
862 } else { \
863 __typeof__(ptr) to = \
Jason Wangf8894912017-02-28 17:56:02 +0800864 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
865 sizeof(*ptr), VHOST_ADDR_USED); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400866 if (to != NULL) \
867 ret = __put_user(x, to); \
868 else \
869 ret = -EFAULT; \
870 } \
871 ret; \
872})
873
Jason Wangf8894912017-02-28 17:56:02 +0800874#define vhost_get_user(vq, x, ptr, type) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400875({ \
876 int ret; \
877 if (!vq->iotlb) { \
878 ret = __get_user(x, ptr); \
879 } else { \
880 __typeof__(ptr) from = \
Jason Wangf8894912017-02-28 17:56:02 +0800881 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
882 sizeof(*ptr), \
883 type); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400884 if (from != NULL) \
885 ret = __get_user(x, from); \
886 else \
887 ret = -EFAULT; \
888 } \
889 ret; \
890})
891
Jason Wangf8894912017-02-28 17:56:02 +0800892#define vhost_get_avail(vq, x, ptr) \
893 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
894
895#define vhost_get_used(vq, x, ptr) \
896 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
897
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400898static int vhost_new_umem_range(struct vhost_umem *umem,
899 u64 start, u64 size, u64 end,
900 u64 userspace_addr, int perm)
901{
902 struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
903
904 if (!node)
905 return -ENOMEM;
906
907 if (umem->numem == max_iotlb_entries) {
908 tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
909 vhost_umem_free(umem, tmp);
910 }
911
912 node->start = start;
913 node->size = size;
914 node->last = end;
915 node->userspace_addr = userspace_addr;
916 node->perm = perm;
917 INIT_LIST_HEAD(&node->link);
918 list_add_tail(&node->link, &umem->umem_list);
919 vhost_umem_interval_tree_insert(node, &umem->umem_tree);
920 umem->numem++;
921
922 return 0;
923}
924
925static void vhost_del_umem_range(struct vhost_umem *umem,
926 u64 start, u64 end)
927{
928 struct vhost_umem_node *node;
929
930 while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
931 start, end)))
932 vhost_umem_free(umem, node);
933}
934
935static void vhost_iotlb_notify_vq(struct vhost_dev *d,
936 struct vhost_iotlb_msg *msg)
937{
938 struct vhost_msg_node *node, *n;
939
940 spin_lock(&d->iotlb_lock);
941
942 list_for_each_entry_safe(node, n, &d->pending_list, node) {
943 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
944 if (msg->iova <= vq_msg->iova &&
Jason Wang2d66f992018-08-24 16:53:13 +0800945 msg->iova + msg->size - 1 >= vq_msg->iova &&
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400946 vq_msg->type == VHOST_IOTLB_MISS) {
Tonghao Zhang78139c92018-09-25 05:36:49 -0700947 mutex_lock(&node->vq->mutex);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400948 vhost_poll_queue(&node->vq->poll);
Tonghao Zhang78139c92018-09-25 05:36:49 -0700949 mutex_unlock(&node->vq->mutex);
950
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400951 list_del(&node->node);
952 kfree(node);
953 }
954 }
955
956 spin_unlock(&d->iotlb_lock);
957}
958
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800959static bool umem_access_ok(u64 uaddr, u64 size, int access)
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400960{
961 unsigned long a = uaddr;
962
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300963 /* Make sure 64 bit math will not overflow. */
964 if (vhost_overflow(uaddr, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800965 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300966
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400967 if ((access & VHOST_ACCESS_RO) &&
968 !access_ok(VERIFY_READ, (void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800969 return false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400970 if ((access & VHOST_ACCESS_WO) &&
971 !access_ok(VERIFY_WRITE, (void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800972 return false;
973 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400974}
975
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200976static int vhost_process_iotlb_msg(struct vhost_dev *dev,
977 struct vhost_iotlb_msg *msg)
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400978{
979 int ret = 0;
980
Jason Wang1b15ad62018-05-22 19:58:57 +0800981 mutex_lock(&dev->mutex);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400982 switch (msg->type) {
983 case VHOST_IOTLB_UPDATE:
984 if (!dev->iotlb) {
985 ret = -EFAULT;
986 break;
987 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800988 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400989 ret = -EFAULT;
990 break;
991 }
Jason Wangf8894912017-02-28 17:56:02 +0800992 vhost_vq_meta_reset(dev);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400993 if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
994 msg->iova + msg->size - 1,
995 msg->uaddr, msg->perm)) {
996 ret = -ENOMEM;
997 break;
998 }
999 vhost_iotlb_notify_vq(dev, msg);
1000 break;
1001 case VHOST_IOTLB_INVALIDATE:
Jason Wang6f3180a2018-01-23 17:27:26 +08001002 if (!dev->iotlb) {
1003 ret = -EFAULT;
1004 break;
1005 }
Jason Wangf8894912017-02-28 17:56:02 +08001006 vhost_vq_meta_reset(dev);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001007 vhost_del_umem_range(dev->iotlb, msg->iova,
1008 msg->iova + msg->size - 1);
1009 break;
1010 default:
1011 ret = -EINVAL;
1012 break;
1013 }
1014
Jason Wang1b15ad62018-05-22 19:58:57 +08001015 mutex_unlock(&dev->mutex);
1016
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001017 return ret;
1018}
1019ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1020 struct iov_iter *from)
1021{
Jason Wang429711a2018-08-06 11:17:47 +08001022 struct vhost_iotlb_msg msg;
1023 size_t offset;
1024 int type, ret;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001025
Jason Wang429711a2018-08-06 11:17:47 +08001026 ret = copy_from_iter(&type, sizeof(type), from);
1027 if (ret != sizeof(type))
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001028 goto done;
1029
Jason Wang429711a2018-08-06 11:17:47 +08001030 switch (type) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001031 case VHOST_IOTLB_MSG:
Jason Wang429711a2018-08-06 11:17:47 +08001032 /* There maybe a hole after type for V1 message type,
1033 * so skip it here.
1034 */
1035 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1036 break;
1037 case VHOST_IOTLB_MSG_V2:
1038 offset = sizeof(__u32);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001039 break;
1040 default:
1041 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001042 goto done;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001043 }
1044
Jason Wang429711a2018-08-06 11:17:47 +08001045 iov_iter_advance(from, offset);
1046 ret = copy_from_iter(&msg, sizeof(msg), from);
1047 if (ret != sizeof(msg))
1048 goto done;
1049 if (vhost_process_iotlb_msg(dev, &msg)) {
1050 ret = -EFAULT;
1051 goto done;
1052 }
1053
1054 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1055 sizeof(struct vhost_msg_v2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001056done:
1057 return ret;
1058}
1059EXPORT_SYMBOL(vhost_chr_write_iter);
1060
Al Viroafc9a422017-07-03 06:39:46 -04001061__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001062 poll_table *wait)
1063{
Al Viroafc9a422017-07-03 06:39:46 -04001064 __poll_t mask = 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001065
1066 poll_wait(file, &dev->wait, wait);
1067
1068 if (!list_empty(&dev->read_list))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001069 mask |= EPOLLIN | EPOLLRDNORM;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001070
1071 return mask;
1072}
1073EXPORT_SYMBOL(vhost_chr_poll);
1074
1075ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1076 int noblock)
1077{
1078 DEFINE_WAIT(wait);
1079 struct vhost_msg_node *node;
1080 ssize_t ret = 0;
1081 unsigned size = sizeof(struct vhost_msg);
1082
1083 if (iov_iter_count(to) < size)
1084 return 0;
1085
1086 while (1) {
1087 if (!noblock)
1088 prepare_to_wait(&dev->wait, &wait,
1089 TASK_INTERRUPTIBLE);
1090
1091 node = vhost_dequeue_msg(dev, &dev->read_list);
1092 if (node)
1093 break;
1094 if (noblock) {
1095 ret = -EAGAIN;
1096 break;
1097 }
1098 if (signal_pending(current)) {
1099 ret = -ERESTARTSYS;
1100 break;
1101 }
1102 if (!dev->iotlb) {
1103 ret = -EBADFD;
1104 break;
1105 }
1106
1107 schedule();
1108 }
1109
1110 if (!noblock)
1111 finish_wait(&dev->wait, &wait);
1112
1113 if (node) {
Jason Wang429711a2018-08-06 11:17:47 +08001114 struct vhost_iotlb_msg *msg;
1115 void *start = &node->msg;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001116
Jason Wang429711a2018-08-06 11:17:47 +08001117 switch (node->msg.type) {
1118 case VHOST_IOTLB_MSG:
1119 size = sizeof(node->msg);
1120 msg = &node->msg.iotlb;
1121 break;
1122 case VHOST_IOTLB_MSG_V2:
1123 size = sizeof(node->msg_v2);
1124 msg = &node->msg_v2.iotlb;
1125 break;
1126 default:
1127 BUG();
1128 break;
1129 }
1130
1131 ret = copy_to_iter(start, size, to);
1132 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001133 kfree(node);
1134 return ret;
1135 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001136 vhost_enqueue_msg(dev, &dev->pending_list, node);
1137 }
1138
1139 return ret;
1140}
1141EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1142
1143static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1144{
1145 struct vhost_dev *dev = vq->dev;
1146 struct vhost_msg_node *node;
1147 struct vhost_iotlb_msg *msg;
Jason Wang429711a2018-08-06 11:17:47 +08001148 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001149
Jason Wang429711a2018-08-06 11:17:47 +08001150 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001151 if (!node)
1152 return -ENOMEM;
1153
Jason Wang429711a2018-08-06 11:17:47 +08001154 if (v2) {
1155 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1156 msg = &node->msg_v2.iotlb;
1157 } else {
1158 msg = &node->msg.iotlb;
1159 }
1160
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001161 msg->type = VHOST_IOTLB_MISS;
1162 msg->iova = iova;
1163 msg->perm = access;
1164
1165 vhost_enqueue_msg(dev, &dev->read_list, node);
1166
1167 return 0;
Jason Wangbfe2bc52016-06-23 02:04:30 -04001168}
1169
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001170static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1171 struct vring_desc __user *desc,
1172 struct vring_avail __user *avail,
1173 struct vring_used __user *used)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001174
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001175{
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001176 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001177
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001178 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
1179 access_ok(VERIFY_READ, avail,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001180 sizeof *avail + num * sizeof *avail->ring + s) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001181 access_ok(VERIFY_WRITE, used,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001182 sizeof *used + num * sizeof *used->ring + s);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001183}
1184
Jason Wangf8894912017-02-28 17:56:02 +08001185static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1186 const struct vhost_umem_node *node,
1187 int type)
1188{
1189 int access = (type == VHOST_ADDR_USED) ?
1190 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1191
1192 if (likely(node->perm & access))
1193 vq->meta_iotlb[type] = node;
1194}
1195
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001196static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1197 int access, u64 addr, u64 len, int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001198{
1199 const struct vhost_umem_node *node;
1200 struct vhost_umem *umem = vq->iotlb;
Michael S. Tsirkinca2c5b32017-08-21 22:33:33 +03001201 u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
Jason Wangf8894912017-02-28 17:56:02 +08001202
1203 if (vhost_vq_meta_fetch(vq, addr, len, type))
1204 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001205
1206 while (len > s) {
1207 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1208 addr,
Michael S. Tsirkinca2c5b32017-08-21 22:33:33 +03001209 last);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001210 if (node == NULL || node->start > addr) {
1211 vhost_iotlb_miss(vq, addr, access);
1212 return false;
1213 } else if (!(node->perm & access)) {
1214 /* Report the possible access violation by
1215 * request another translation from userspace.
1216 */
1217 return false;
1218 }
1219
1220 size = node->size - addr + node->start;
Jason Wangf8894912017-02-28 17:56:02 +08001221
1222 if (orig_addr == addr && size >= len)
1223 vhost_vq_meta_update(vq, node, type);
1224
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001225 s += size;
1226 addr += size;
1227 }
1228
1229 return true;
1230}
1231
1232int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
1233{
1234 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1235 unsigned int num = vq->num;
1236
1237 if (!vq->iotlb)
1238 return 1;
1239
1240 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
Jason Wangf8894912017-02-28 17:56:02 +08001241 num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001242 iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1243 sizeof *vq->avail +
Jason Wangf8894912017-02-28 17:56:02 +08001244 num * sizeof(*vq->avail->ring) + s,
1245 VHOST_ADDR_AVAIL) &&
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001246 iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1247 sizeof *vq->used +
Jason Wangf8894912017-02-28 17:56:02 +08001248 num * sizeof(*vq->used->ring) + s,
1249 VHOST_ADDR_USED);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001250}
1251EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
1252
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001253/* Can we log writes? */
1254/* Caller should have device mutex but not vq mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001255bool vhost_log_access_ok(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001256{
Jason Wanga9709d62016-06-23 02:04:31 -04001257 return memory_access_ok(dev, dev->umem, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001258}
Asias He6ac1afb2013-05-06 16:38:21 +08001259EXPORT_SYMBOL_GPL(vhost_log_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001260
1261/* Verify access for write logging. */
1262/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001263static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1264 void __user *log_base)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001265{
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001266 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
Arnd Bergmann28457ee2010-03-09 19:24:45 +01001267
Jason Wanga9709d62016-06-23 02:04:31 -04001268 return vq_memory_access_ok(log_base, vq->umem,
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001269 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001270 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
1271 sizeof *vq->used +
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001272 vq->num * sizeof *vq->used->ring + s));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001273}
1274
1275/* Can we start vq? */
1276/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001277bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001278{
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001279 if (!vq_log_access_ok(vq, vq->log_base))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001280 return false;
Jason Wangd65026c2018-03-29 16:00:04 +08001281
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001282 /* Access validation occurs at prefetch time with IOTLB */
1283 if (vq->iotlb)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001284 return true;
Jason Wangd65026c2018-03-29 16:00:04 +08001285
1286 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001287}
Asias He6ac1afb2013-05-06 16:38:21 +08001288EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001289
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001290static struct vhost_umem *vhost_umem_alloc(void)
1291{
Michal Hocko6c5ab652017-05-08 15:57:15 -07001292 struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001293
1294 if (!umem)
1295 return NULL;
1296
Davidlohr Buesof808c132017-09-08 16:15:08 -07001297 umem->umem_tree = RB_ROOT_CACHED;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001298 umem->numem = 0;
1299 INIT_LIST_HEAD(&umem->umem_list);
1300
1301 return umem;
1302}
1303
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001304static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1305{
Jason Wanga9709d62016-06-23 02:04:31 -04001306 struct vhost_memory mem, *newmem;
1307 struct vhost_memory_region *region;
Jason Wanga9709d62016-06-23 02:04:31 -04001308 struct vhost_umem *newumem, *oldumem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001309 unsigned long size = offsetof(struct vhost_memory, regions);
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001310 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +05301311
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001312 if (copy_from_user(&mem, m, size))
1313 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001314 if (mem.padding)
1315 return -EOPNOTSUPP;
Igor Mammedovc9ce42f2015-07-02 15:08:11 +02001316 if (mem.nregions > max_mem_regions)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001317 return -E2BIG;
Matthew Wilcoxb2303d72018-06-07 07:57:18 -07001318 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1319 GFP_KERNEL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001320 if (!newmem)
1321 return -ENOMEM;
1322
1323 memcpy(newmem, &mem, size);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001324 if (copy_from_user(newmem->regions, m->regions,
1325 mem.nregions * sizeof *m->regions)) {
Igor Mammedovbcfeaca2015-06-16 18:33:35 +02001326 kvfree(newmem);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001327 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001328 }
1329
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001330 newumem = vhost_umem_alloc();
Jason Wanga9709d62016-06-23 02:04:31 -04001331 if (!newumem) {
Igor Mammedov4de72552015-07-01 11:07:09 +02001332 kvfree(newmem);
Jason Wanga9709d62016-06-23 02:04:31 -04001333 return -ENOMEM;
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +09001334 }
Jason Wanga9709d62016-06-23 02:04:31 -04001335
Jason Wanga9709d62016-06-23 02:04:31 -04001336 for (region = newmem->regions;
1337 region < newmem->regions + mem.nregions;
1338 region++) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001339 if (vhost_new_umem_range(newumem,
1340 region->guest_phys_addr,
1341 region->memory_size,
1342 region->guest_phys_addr +
1343 region->memory_size - 1,
1344 region->userspace_addr,
1345 VHOST_ACCESS_RW))
Jason Wanga9709d62016-06-23 02:04:31 -04001346 goto err;
Jason Wanga9709d62016-06-23 02:04:31 -04001347 }
1348
1349 if (!memory_access_ok(d, newumem, 0))
1350 goto err;
1351
1352 oldumem = d->umem;
1353 d->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001354
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001355 /* All memory accesses are done under some VQ mutex. */
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001356 for (i = 0; i < d->nvqs; ++i) {
1357 mutex_lock(&d->vqs[i]->mutex);
Jason Wanga9709d62016-06-23 02:04:31 -04001358 d->vqs[i]->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001359 mutex_unlock(&d->vqs[i]->mutex);
1360 }
Jason Wanga9709d62016-06-23 02:04:31 -04001361
1362 kvfree(newmem);
1363 vhost_umem_clean(oldumem);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001364 return 0;
Jason Wanga9709d62016-06-23 02:04:31 -04001365
1366err:
1367 vhost_umem_clean(newumem);
1368 kvfree(newmem);
1369 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001370}
1371
Sonny Rao26b36602018-03-14 10:05:06 -07001372long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001373{
Al Virocecb46f2012-08-27 14:21:39 -04001374 struct file *eventfp, *filep = NULL;
1375 bool pollstart = false, pollstop = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001376 struct eventfd_ctx *ctx = NULL;
1377 u32 __user *idxp = argp;
1378 struct vhost_virtqueue *vq;
1379 struct vhost_vring_state s;
1380 struct vhost_vring_file f;
1381 struct vhost_vring_addr a;
1382 u32 idx;
1383 long r;
1384
1385 r = get_user(idx, idxp);
1386 if (r < 0)
1387 return r;
Krishna Kumar0f3d9a12010-05-25 11:10:36 +05301388 if (idx >= d->nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001389 return -ENOBUFS;
1390
Jason Wangff002262018-10-30 14:10:49 +08001391 idx = array_index_nospec(idx, d->nvqs);
Asias He3ab2e422013-04-27 11:16:48 +08001392 vq = d->vqs[idx];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001393
1394 mutex_lock(&vq->mutex);
1395
1396 switch (ioctl) {
1397 case VHOST_SET_VRING_NUM:
1398 /* Resizing ring with an active backend?
1399 * You don't want to do that. */
1400 if (vq->private_data) {
1401 r = -EBUSY;
1402 break;
1403 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001404 if (copy_from_user(&s, argp, sizeof s)) {
1405 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001406 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001407 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001408 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
1409 r = -EINVAL;
1410 break;
1411 }
1412 vq->num = s.num;
1413 break;
1414 case VHOST_SET_VRING_BASE:
1415 /* Moving base with an active backend?
1416 * You don't want to do that. */
1417 if (vq->private_data) {
1418 r = -EBUSY;
1419 break;
1420 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001421 if (copy_from_user(&s, argp, sizeof s)) {
1422 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001423 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001424 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001425 if (s.num > 0xffff) {
1426 r = -EINVAL;
1427 break;
1428 }
Jason Wang8d658432017-07-27 11:22:05 +08001429 vq->last_avail_idx = s.num;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001430 /* Forget the cached index value. */
1431 vq->avail_idx = vq->last_avail_idx;
1432 break;
1433 case VHOST_GET_VRING_BASE:
1434 s.index = idx;
1435 s.num = vq->last_avail_idx;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001436 if (copy_to_user(argp, &s, sizeof s))
1437 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001438 break;
1439 case VHOST_SET_VRING_ADDR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001440 if (copy_from_user(&a, argp, sizeof a)) {
1441 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001442 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001443 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001444 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
1445 r = -EOPNOTSUPP;
1446 break;
1447 }
1448 /* For 32bit, verify that the top 32bits of the user
1449 data are set to zero. */
1450 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1451 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1452 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
1453 r = -EFAULT;
1454 break;
1455 }
Michael S. Tsirkin5d9a07b2014-12-21 01:00:23 +02001456
1457 /* Make sure it's safe to cast pointers to vring types. */
1458 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1459 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1460 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1461 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
Michael S. Tsirkind5424832015-11-16 16:57:08 +02001462 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001463 r = -EINVAL;
1464 break;
1465 }
1466
1467 /* We only verify access here if backend is configured.
1468 * If it is not, we don't as size might not have been setup.
1469 * We will verify when backend is configured. */
1470 if (vq->private_data) {
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001471 if (!vq_access_ok(vq, vq->num,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001472 (void __user *)(unsigned long)a.desc_user_addr,
1473 (void __user *)(unsigned long)a.avail_user_addr,
1474 (void __user *)(unsigned long)a.used_user_addr)) {
1475 r = -EINVAL;
1476 break;
1477 }
1478
1479 /* Also validate log access for used ring if enabled. */
1480 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1481 !log_access_ok(vq->log_base, a.log_guest_addr,
1482 sizeof *vq->used +
1483 vq->num * sizeof *vq->used->ring)) {
1484 r = -EINVAL;
1485 break;
1486 }
1487 }
1488
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001489 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1490 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1491 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1492 vq->log_addr = a.log_guest_addr;
1493 vq->used = (void __user *)(unsigned long)a.used_user_addr;
1494 break;
1495 case VHOST_SET_VRING_KICK:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001496 if (copy_from_user(&f, argp, sizeof f)) {
1497 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001498 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001499 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001500 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001501 if (IS_ERR(eventfp)) {
1502 r = PTR_ERR(eventfp);
1503 break;
1504 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001505 if (eventfp != vq->kick) {
Al Virocecb46f2012-08-27 14:21:39 -04001506 pollstop = (filep = vq->kick) != NULL;
1507 pollstart = (vq->kick = eventfp) != NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001508 } else
1509 filep = eventfp;
1510 break;
1511 case VHOST_SET_VRING_CALL:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001512 if (copy_from_user(&f, argp, sizeof f)) {
1513 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001514 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001515 }
Eric Biggerse050c7d2018-01-06 14:52:19 -08001516 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1517 if (IS_ERR(ctx)) {
1518 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001519 break;
1520 }
Eric Biggerse050c7d2018-01-06 14:52:19 -08001521 swap(ctx, vq->call_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001522 break;
1523 case VHOST_SET_VRING_ERR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001524 if (copy_from_user(&f, argp, sizeof f)) {
1525 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001526 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001527 }
Eric Biggers09f332a2018-01-06 14:52:20 -08001528 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1529 if (IS_ERR(ctx)) {
1530 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001531 break;
1532 }
Eric Biggers09f332a2018-01-06 14:52:20 -08001533 swap(ctx, vq->error_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001534 break;
Greg Kurz2751c982015-04-24 14:27:24 +02001535 case VHOST_SET_VRING_ENDIAN:
1536 r = vhost_set_vring_endian(vq, argp);
1537 break;
1538 case VHOST_GET_VRING_ENDIAN:
1539 r = vhost_get_vring_endian(vq, idx, argp);
1540 break;
Jason Wang03088132016-03-04 06:24:53 -05001541 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1542 if (copy_from_user(&s, argp, sizeof(s))) {
1543 r = -EFAULT;
1544 break;
1545 }
1546 vq->busyloop_timeout = s.num;
1547 break;
1548 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1549 s.index = idx;
1550 s.num = vq->busyloop_timeout;
1551 if (copy_to_user(argp, &s, sizeof(s)))
1552 r = -EFAULT;
1553 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001554 default:
1555 r = -ENOIOCTLCMD;
1556 }
1557
1558 if (pollstop && vq->handle_kick)
1559 vhost_poll_stop(&vq->poll);
1560
Eric Biggerse050c7d2018-01-06 14:52:19 -08001561 if (!IS_ERR_OR_NULL(ctx))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001562 eventfd_ctx_put(ctx);
1563 if (filep)
1564 fput(filep);
1565
1566 if (pollstart && vq->handle_kick)
Jason Wang2b8b3282013-01-28 01:05:18 +00001567 r = vhost_poll_start(&vq->poll, vq->kick);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001568
1569 mutex_unlock(&vq->mutex);
1570
1571 if (pollstop && vq->handle_kick)
1572 vhost_poll_flush(&vq->poll);
1573 return r;
1574}
Asias He6ac1afb2013-05-06 16:38:21 +08001575EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001576
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001577int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1578{
1579 struct vhost_umem *niotlb, *oiotlb;
1580 int i;
1581
1582 niotlb = vhost_umem_alloc();
1583 if (!niotlb)
1584 return -ENOMEM;
1585
1586 oiotlb = d->iotlb;
1587 d->iotlb = niotlb;
1588
1589 for (i = 0; i < d->nvqs; ++i) {
Jason Wangb13f9c62018-08-08 11:43:04 +08001590 struct vhost_virtqueue *vq = d->vqs[i];
1591
1592 mutex_lock(&vq->mutex);
1593 vq->iotlb = niotlb;
1594 __vhost_vq_meta_reset(vq);
1595 mutex_unlock(&vq->mutex);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001596 }
1597
1598 vhost_umem_clean(oiotlb);
1599
1600 return 0;
1601}
1602EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1603
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001604/* Caller must have device mutex */
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001605long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001606{
Eric Biggersd25cc432018-01-06 14:52:21 -08001607 struct eventfd_ctx *ctx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001608 u64 p;
1609 long r;
1610 int i, fd;
1611
1612 /* If you are not the owner, you can become one */
1613 if (ioctl == VHOST_SET_OWNER) {
1614 r = vhost_dev_set_owner(d);
1615 goto done;
1616 }
1617
1618 /* You must be the owner to do anything else */
1619 r = vhost_dev_check_owner(d);
1620 if (r)
1621 goto done;
1622
1623 switch (ioctl) {
1624 case VHOST_SET_MEM_TABLE:
1625 r = vhost_set_memory(d, argp);
1626 break;
1627 case VHOST_SET_LOG_BASE:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001628 if (copy_from_user(&p, argp, sizeof p)) {
1629 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001630 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001631 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001632 if ((u64)(unsigned long)p != p) {
1633 r = -EFAULT;
1634 break;
1635 }
1636 for (i = 0; i < d->nvqs; ++i) {
1637 struct vhost_virtqueue *vq;
1638 void __user *base = (void __user *)(unsigned long)p;
Asias He3ab2e422013-04-27 11:16:48 +08001639 vq = d->vqs[i];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001640 mutex_lock(&vq->mutex);
1641 /* If ring is inactive, will check when it's enabled. */
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001642 if (vq->private_data && !vq_log_access_ok(vq, base))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001643 r = -EFAULT;
1644 else
1645 vq->log_base = base;
1646 mutex_unlock(&vq->mutex);
1647 }
1648 break;
1649 case VHOST_SET_LOG_FD:
1650 r = get_user(fd, (int __user *)argp);
1651 if (r < 0)
1652 break;
Eric Biggersd25cc432018-01-06 14:52:21 -08001653 ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
1654 if (IS_ERR(ctx)) {
1655 r = PTR_ERR(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001656 break;
1657 }
Eric Biggersd25cc432018-01-06 14:52:21 -08001658 swap(ctx, d->log_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001659 for (i = 0; i < d->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +08001660 mutex_lock(&d->vqs[i]->mutex);
1661 d->vqs[i]->log_ctx = d->log_ctx;
1662 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001663 }
1664 if (ctx)
1665 eventfd_ctx_put(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001666 break;
1667 default:
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001668 r = -ENOIOCTLCMD;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001669 break;
1670 }
1671done:
1672 return r;
1673}
Asias He6ac1afb2013-05-06 16:38:21 +08001674EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001675
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001676/* TODO: This is really inefficient. We need something like get_user()
1677 * (instruction directly accesses the data, with an exception table entry
1678 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1679 */
1680static int set_bit_to_user(int nr, void __user *addr)
1681{
1682 unsigned long log = (unsigned long)addr;
1683 struct page *page;
1684 void *base;
1685 int bit = nr + (log % PAGE_SIZE) * 8;
1686 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301687
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001688 r = get_user_pages_fast(log, 1, 1, &page);
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001689 if (r < 0)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001690 return r;
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001691 BUG_ON(r != 1);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001692 base = kmap_atomic(page);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001693 set_bit(bit, base);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001694 kunmap_atomic(base);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001695 set_page_dirty_lock(page);
1696 put_page(page);
1697 return 0;
1698}
1699
1700static int log_write(void __user *log_base,
1701 u64 write_address, u64 write_length)
1702{
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001703 u64 write_page = write_address / VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001704 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301705
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001706 if (!write_length)
1707 return 0;
Michael S. Tsirkin3bf9be42010-11-29 10:19:07 +02001708 write_length += write_address % VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001709 for (;;) {
1710 u64 base = (u64)(unsigned long)log_base;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001711 u64 log = base + write_page / 8;
1712 int bit = write_page % 8;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001713 if ((u64)(unsigned long)log != log)
1714 return -EFAULT;
1715 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1716 if (r < 0)
1717 return r;
1718 if (write_length <= VHOST_PAGE_SIZE)
1719 break;
1720 write_length -= VHOST_PAGE_SIZE;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001721 write_page += 1;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001722 }
1723 return r;
1724}
1725
1726int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1727 unsigned int log_num, u64 len)
1728{
1729 int i, r;
1730
1731 /* Make sure data written is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001732 smp_wmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001733 for (i = 0; i < log_num; ++i) {
1734 u64 l = min(log[i].len, len);
1735 r = log_write(vq->log_base, log[i].addr, l);
1736 if (r < 0)
1737 return r;
1738 len -= l;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001739 if (!len) {
1740 if (vq->log_ctx)
1741 eventfd_signal(vq->log_ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001742 return 0;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001743 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001744 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001745 /* Length written exceeds what we have stored. This is a bug. */
1746 BUG();
1747 return 0;
1748}
Asias He6ac1afb2013-05-06 16:38:21 +08001749EXPORT_SYMBOL_GPL(vhost_log_write);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001750
Jason Wang2723fea2011-06-21 18:04:38 +08001751static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1752{
1753 void __user *used;
Jason Wangbfe2bc52016-06-23 02:04:30 -04001754 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1755 &vq->used->flags) < 0)
Jason Wang2723fea2011-06-21 18:04:38 +08001756 return -EFAULT;
1757 if (unlikely(vq->log_used)) {
1758 /* Make sure the flag is seen before log. */
1759 smp_wmb();
1760 /* Log used flag write. */
1761 used = &vq->used->flags;
1762 log_write(vq->log_base, vq->log_addr +
1763 (used - (void __user *)vq->used),
1764 sizeof vq->used->flags);
1765 if (vq->log_ctx)
1766 eventfd_signal(vq->log_ctx, 1);
1767 }
1768 return 0;
1769}
1770
1771static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1772{
Jason Wangbfe2bc52016-06-23 02:04:30 -04001773 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1774 vhost_avail_event(vq)))
Jason Wang2723fea2011-06-21 18:04:38 +08001775 return -EFAULT;
1776 if (unlikely(vq->log_used)) {
1777 void __user *used;
1778 /* Make sure the event is seen before log. */
1779 smp_wmb();
1780 /* Log avail event write */
1781 used = vhost_avail_event(vq);
1782 log_write(vq->log_base, vq->log_addr +
1783 (used - (void __user *)vq->used),
1784 sizeof *vhost_avail_event(vq));
1785 if (vq->log_ctx)
1786 eventfd_signal(vq->log_ctx, 1);
1787 }
1788 return 0;
1789}
1790
Greg Kurz80f7d032016-02-16 15:59:44 +01001791int vhost_vq_init_access(struct vhost_virtqueue *vq)
Jason Wang2723fea2011-06-21 18:04:38 +08001792{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001793 __virtio16 last_used_idx;
Jason Wang2723fea2011-06-21 18:04:38 +08001794 int r;
Greg Kurze1f33be2016-02-16 15:54:28 +01001795 bool is_le = vq->is_le;
1796
Halil Pasiccda8bba2017-01-30 11:09:36 +01001797 if (!vq->private_data)
Jason Wang2723fea2011-06-21 18:04:38 +08001798 return 0;
Greg Kurz2751c982015-04-24 14:27:24 +02001799
1800 vhost_init_is_le(vq);
Jason Wang2723fea2011-06-21 18:04:38 +08001801
1802 r = vhost_update_used_flags(vq);
1803 if (r)
Greg Kurze1f33be2016-02-16 15:54:28 +01001804 goto err;
Jason Wang2723fea2011-06-21 18:04:38 +08001805 vq->signalled_used_valid = false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001806 if (!vq->iotlb &&
1807 !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
Greg Kurze1f33be2016-02-16 15:54:28 +01001808 r = -EFAULT;
1809 goto err;
1810 }
Jason Wangf8894912017-02-28 17:56:02 +08001811 r = vhost_get_used(vq, last_used_idx, &vq->used->idx);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001812 if (r) {
1813 vq_err(vq, "Can't access used idx at %p\n",
1814 &vq->used->idx);
Greg Kurze1f33be2016-02-16 15:54:28 +01001815 goto err;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001816 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001817 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
Michael S. Tsirkin64f7f052014-12-01 17:39:39 +02001818 return 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001819
Greg Kurze1f33be2016-02-16 15:54:28 +01001820err:
1821 vq->is_le = is_le;
1822 return r;
Jason Wang2723fea2011-06-21 18:04:38 +08001823}
Greg Kurz80f7d032016-02-16 15:59:44 +01001824EXPORT_SYMBOL_GPL(vhost_vq_init_access);
Jason Wang2723fea2011-06-21 18:04:38 +08001825
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001826static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001827 struct iovec iov[], int iov_size, int access)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001828{
Jason Wanga9709d62016-06-23 02:04:31 -04001829 const struct vhost_umem_node *node;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001830 struct vhost_dev *dev = vq->dev;
1831 struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001832 struct iovec *_iov;
1833 u64 s = 0;
1834 int ret = 0;
1835
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001836 while ((u64)len > s) {
1837 u64 size;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001838 if (unlikely(ret >= iov_size)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001839 ret = -ENOBUFS;
1840 break;
1841 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001842
Jason Wanga9709d62016-06-23 02:04:31 -04001843 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1844 addr, addr + len - 1);
1845 if (node == NULL || node->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001846 if (umem != dev->iotlb) {
1847 ret = -EFAULT;
1848 break;
1849 }
1850 ret = -EAGAIN;
1851 break;
1852 } else if (!(node->perm & access)) {
1853 ret = -EPERM;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001854 break;
1855 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001856
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001857 _iov = iov + ret;
Jason Wanga9709d62016-06-23 02:04:31 -04001858 size = node->size - addr + node->start;
Michael S. Tsirkinbd971202012-11-26 05:57:27 +00001859 _iov->iov_len = min((u64)len - s, size);
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001860 _iov->iov_base = (void __user *)(unsigned long)
Jason Wanga9709d62016-06-23 02:04:31 -04001861 (node->userspace_addr + addr - node->start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001862 s += size;
1863 addr += size;
1864 ++ret;
1865 }
1866
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001867 if (ret == -EAGAIN)
1868 vhost_iotlb_miss(vq, addr, access);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001869 return ret;
1870}
1871
1872/* Each buffer in the virtqueues is actually a chain of descriptors. This
1873 * function returns the next descriptor in the chain,
1874 * or -1U if we're at the end. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001875static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001876{
1877 unsigned int next;
1878
1879 /* If this descriptor says it doesn't chain, we're done. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001880 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001881 return -1U;
1882
1883 /* Check they're not leading us off end of descriptors. */
Paul E. McKenney3a5db0b2017-11-27 09:45:10 -08001884 next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001885 return next;
1886}
1887
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001888static int get_indirect(struct vhost_virtqueue *vq,
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001889 struct iovec iov[], unsigned int iov_size,
1890 unsigned int *out_num, unsigned int *in_num,
1891 struct vhost_log *log, unsigned int *log_num,
1892 struct vring_desc *indirect)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001893{
1894 struct vring_desc desc;
1895 unsigned int i = 0, count, found = 0;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001896 u32 len = vhost32_to_cpu(vq, indirect->len);
Al Viroaad9a1c2014-12-10 14:49:01 -05001897 struct iov_iter from;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001898 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001899
1900 /* Sanity check */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001901 if (unlikely(len % sizeof desc)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001902 vq_err(vq, "Invalid length in indirect descriptor: "
1903 "len 0x%llx not multiple of 0x%zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001904 (unsigned long long)len,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001905 sizeof desc);
1906 return -EINVAL;
1907 }
1908
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001909 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001910 UIO_MAXIOV, VHOST_ACCESS_RO);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001911 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001912 if (ret != -EAGAIN)
1913 vq_err(vq, "Translation failure %d in indirect.\n", ret);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001914 return ret;
1915 }
Al Viroaad9a1c2014-12-10 14:49:01 -05001916 iov_iter_init(&from, READ, vq->indirect, ret, len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001917
1918 /* We will use the result as an address to read from, so most
1919 * architectures only need a compiler barrier here. */
1920 read_barrier_depends();
1921
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001922 count = len / sizeof desc;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001923 /* Buffers are chained via a 16 bit next field, so
1924 * we can have at most 2^16 of these. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001925 if (unlikely(count > USHRT_MAX + 1)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001926 vq_err(vq, "Indirect buffer length too big: %d\n",
1927 indirect->len);
1928 return -E2BIG;
1929 }
1930
1931 do {
1932 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001933 if (unlikely(++found > count)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001934 vq_err(vq, "Loop detected: last one at %u "
1935 "indirect size %u\n",
1936 i, count);
1937 return -EINVAL;
1938 }
Al Virocbbd26b2016-11-01 22:09:04 -04001939 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001940 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001941 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001942 return -EINVAL;
1943 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001944 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001945 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001946 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001947 return -EINVAL;
1948 }
1949
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001950 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
1951 access = VHOST_ACCESS_WO;
1952 else
1953 access = VHOST_ACCESS_RO;
1954
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001955 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1956 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001957 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001958 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001959 if (ret != -EAGAIN)
1960 vq_err(vq, "Translation failure %d indirect idx %d\n",
1961 ret, i);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001962 return ret;
1963 }
1964 /* If this is an input descriptor, increment that count. */
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001965 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001966 *in_num += ret;
1967 if (unlikely(log)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001968 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1969 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001970 ++*log_num;
1971 }
1972 } else {
1973 /* If it's an output descriptor, they're all supposed
1974 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001975 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001976 vq_err(vq, "Indirect descriptor "
1977 "has out after in: idx %d\n", i);
1978 return -EINVAL;
1979 }
1980 *out_num += ret;
1981 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001982 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001983 return 0;
1984}
1985
1986/* This looks in the virtqueue and for the first available buffer, and converts
1987 * it to an iovec for convenient access. Since descriptors consist of some
1988 * number of output then some number of input descriptors, it's actually two
1989 * iovecs, but we pack them into one and note how many of each there were.
1990 *
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001991 * This function returns the descriptor number found, or vq->num (which is
1992 * never a valid descriptor number) if none was found. A negative code is
1993 * returned on error. */
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001994int vhost_get_vq_desc(struct vhost_virtqueue *vq,
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001995 struct iovec iov[], unsigned int iov_size,
1996 unsigned int *out_num, unsigned int *in_num,
1997 struct vhost_log *log, unsigned int *log_num)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001998{
1999 struct vring_desc desc;
2000 unsigned int i, head, found = 0;
2001 u16 last_avail_idx;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002002 __virtio16 avail_idx;
2003 __virtio16 ring_head;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002004 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002005
2006 /* Check it isn't doing very strange things with descriptor numbers. */
2007 last_avail_idx = vq->last_avail_idx;
Jason Wange3b56cd2017-02-07 15:49:50 +08002008
2009 if (vq->avail_idx == vq->last_avail_idx) {
Jason Wangf8894912017-02-28 17:56:02 +08002010 if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) {
Jason Wange3b56cd2017-02-07 15:49:50 +08002011 vq_err(vq, "Failed to access avail idx at %p\n",
2012 &vq->avail->idx);
2013 return -EFAULT;
2014 }
2015 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2016
2017 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2018 vq_err(vq, "Guest moved used index from %u to %u",
2019 last_avail_idx, vq->avail_idx);
2020 return -EFAULT;
2021 }
2022
2023 /* If there's nothing new since last we looked, return
2024 * invalid.
2025 */
2026 if (vq->avail_idx == last_avail_idx)
2027 return vq->num;
2028
2029 /* Only get avail ring entries after they have been
2030 * exposed by guest.
2031 */
2032 smp_rmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002033 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002034
2035 /* Grab the next descriptor number they're advertising, and increment
2036 * the index we've seen. */
Jason Wangf8894912017-02-28 17:56:02 +08002037 if (unlikely(vhost_get_avail(vq, ring_head,
Jason Wangbfe2bc52016-06-23 02:04:30 -04002038 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002039 vq_err(vq, "Failed to read head: idx %d address %p\n",
2040 last_avail_idx,
2041 &vq->avail->ring[last_avail_idx % vq->num]);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002042 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002043 }
2044
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002045 head = vhost16_to_cpu(vq, ring_head);
2046
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002047 /* If their number is silly, that's an error. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002048 if (unlikely(head >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002049 vq_err(vq, "Guest says index %u > %u is available",
2050 head, vq->num);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002051 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002052 }
2053
2054 /* When we start there are none of either input nor output. */
2055 *out_num = *in_num = 0;
2056 if (unlikely(log))
2057 *log_num = 0;
2058
2059 i = head;
2060 do {
2061 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002062 if (unlikely(i >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002063 vq_err(vq, "Desc index is %u > %u, head = %u",
2064 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002065 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002066 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002067 if (unlikely(++found > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002068 vq_err(vq, "Loop detected: last one at %u "
2069 "vq size %u head %u\n",
2070 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002071 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002072 }
Jason Wangbfe2bc52016-06-23 02:04:30 -04002073 ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
2074 sizeof desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002075 if (unlikely(ret)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002076 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2077 i, vq->desc + i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002078 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002079 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002080 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002081 ret = get_indirect(vq, iov, iov_size,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002082 out_num, in_num,
2083 log, log_num, &desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002084 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002085 if (ret != -EAGAIN)
2086 vq_err(vq, "Failure detected "
2087 "in indirect descriptor at idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002088 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002089 }
2090 continue;
2091 }
2092
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002093 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2094 access = VHOST_ACCESS_WO;
2095 else
2096 access = VHOST_ACCESS_RO;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002097 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2098 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002099 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002100 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002101 if (ret != -EAGAIN)
2102 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2103 ret, i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002104 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002105 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002106 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002107 /* If this is an input descriptor,
2108 * increment that count. */
2109 *in_num += ret;
2110 if (unlikely(log)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002111 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2112 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002113 ++*log_num;
2114 }
2115 } else {
2116 /* If it's an output descriptor, they're all supposed
2117 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002118 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002119 vq_err(vq, "Descriptor has out after in: "
2120 "idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002121 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002122 }
2123 *out_num += ret;
2124 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002125 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002126
2127 /* On success, increment avail index. */
2128 vq->last_avail_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002129
2130 /* Assume notifications from guest are disabled at this point,
2131 * if they aren't we would need to update avail_event index. */
2132 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002133 return head;
2134}
Asias He6ac1afb2013-05-06 16:38:21 +08002135EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002136
2137/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
David Stevens8dd014a2010-07-27 18:52:21 +03002138void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002139{
David Stevens8dd014a2010-07-27 18:52:21 +03002140 vq->last_avail_idx -= n;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002141}
Asias He6ac1afb2013-05-06 16:38:21 +08002142EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002143
2144/* After we've used one of their buffers, we tell them about it. We'll then
2145 * want to notify the guest, using eventfd. */
2146int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2147{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002148 struct vring_used_elem heads = {
2149 cpu_to_vhost32(vq, head),
2150 cpu_to_vhost32(vq, len)
2151 };
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002152
Jason Wangc49e4e52013-09-02 16:40:58 +08002153 return vhost_add_used_n(vq, &heads, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002154}
Asias He6ac1afb2013-05-06 16:38:21 +08002155EXPORT_SYMBOL_GPL(vhost_add_used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002156
David Stevens8dd014a2010-07-27 18:52:21 +03002157static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2158 struct vring_used_elem *heads,
2159 unsigned count)
2160{
2161 struct vring_used_elem __user *used;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002162 u16 old, new;
David Stevens8dd014a2010-07-27 18:52:21 +03002163 int start;
2164
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002165 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002166 used = vq->used->ring + start;
Jason Wangc49e4e52013-09-02 16:40:58 +08002167 if (count == 1) {
Jason Wangbfe2bc52016-06-23 02:04:30 -04002168 if (vhost_put_user(vq, heads[0].id, &used->id)) {
Jason Wangc49e4e52013-09-02 16:40:58 +08002169 vq_err(vq, "Failed to write used id");
2170 return -EFAULT;
2171 }
Jason Wangbfe2bc52016-06-23 02:04:30 -04002172 if (vhost_put_user(vq, heads[0].len, &used->len)) {
Jason Wangc49e4e52013-09-02 16:40:58 +08002173 vq_err(vq, "Failed to write used len");
2174 return -EFAULT;
2175 }
Jason Wangbfe2bc52016-06-23 02:04:30 -04002176 } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002177 vq_err(vq, "Failed to write used");
2178 return -EFAULT;
2179 }
2180 if (unlikely(vq->log_used)) {
2181 /* Make sure data is seen before log. */
2182 smp_wmb();
2183 /* Log used ring entry write. */
2184 log_write(vq->log_base,
2185 vq->log_addr +
2186 ((void __user *)used - (void __user *)vq->used),
2187 count * sizeof *used);
2188 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002189 old = vq->last_used_idx;
2190 new = (vq->last_used_idx += count);
2191 /* If the driver never bothers to signal in a very long while,
2192 * used index might wrap around. If that happens, invalidate
2193 * signalled_used index we stored. TODO: make sure driver
2194 * signals at least once in 2^16 and remove this. */
2195 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2196 vq->signalled_used_valid = false;
David Stevens8dd014a2010-07-27 18:52:21 +03002197 return 0;
2198}
2199
2200/* After we've used one of their buffers, we tell them about it. We'll then
2201 * want to notify the guest, using eventfd. */
2202int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2203 unsigned count)
2204{
2205 int start, n, r;
2206
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002207 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002208 n = vq->num - start;
2209 if (n < count) {
2210 r = __vhost_add_used_n(vq, heads, n);
2211 if (r < 0)
2212 return r;
2213 heads += n;
2214 count -= n;
2215 }
2216 r = __vhost_add_used_n(vq, heads, count);
2217
2218 /* Make sure buffer is written before we update index. */
2219 smp_wmb();
Jason Wangbfe2bc52016-06-23 02:04:30 -04002220 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
2221 &vq->used->idx)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002222 vq_err(vq, "Failed to increment used idx");
2223 return -EFAULT;
2224 }
2225 if (unlikely(vq->log_used)) {
2226 /* Log used index update. */
2227 log_write(vq->log_base,
2228 vq->log_addr + offsetof(struct vring_used, idx),
2229 sizeof vq->used->idx);
2230 if (vq->log_ctx)
2231 eventfd_signal(vq->log_ctx, 1);
2232 }
2233 return r;
2234}
Asias He6ac1afb2013-05-06 16:38:21 +08002235EXPORT_SYMBOL_GPL(vhost_add_used_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002236
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002237static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002238{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002239 __u16 old, new;
2240 __virtio16 event;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002241 bool v;
Jason Wang8d658432017-07-27 11:22:05 +08002242 /* Flush out used index updates. This is paired
2243 * with the barrier that the Guest executes when enabling
2244 * interrupts. */
2245 smp_mb();
Michael S. Tsirkin0d499352010-05-11 19:44:17 +03002246
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002247 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002248 unlikely(vq->avail_idx == vq->last_avail_idx))
2249 return true;
2250
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002251 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002252 __virtio16 flags;
Jason Wangf8894912017-02-28 17:56:02 +08002253 if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002254 vq_err(vq, "Failed to get flags");
2255 return true;
2256 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002257 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002258 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002259 old = vq->signalled_used;
2260 v = vq->signalled_used_valid;
2261 new = vq->signalled_used = vq->last_used_idx;
2262 vq->signalled_used_valid = true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002263
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002264 if (unlikely(!v))
2265 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002266
Jason Wangf8894912017-02-28 17:56:02 +08002267 if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002268 vq_err(vq, "Failed to get used event idx");
2269 return true;
2270 }
Jason Wang8d658432017-07-27 11:22:05 +08002271 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002272}
2273
2274/* This actually signals the guest, using eventfd. */
2275void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2276{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002277 /* Signal the Guest tell them we used something up. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002278 if (vq->call_ctx && vhost_notify(dev, vq))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002279 eventfd_signal(vq->call_ctx, 1);
2280}
Asias He6ac1afb2013-05-06 16:38:21 +08002281EXPORT_SYMBOL_GPL(vhost_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002282
2283/* And here's the combo meal deal. Supersize me! */
2284void vhost_add_used_and_signal(struct vhost_dev *dev,
2285 struct vhost_virtqueue *vq,
2286 unsigned int head, int len)
2287{
2288 vhost_add_used(vq, head, len);
2289 vhost_signal(dev, vq);
2290}
Asias He6ac1afb2013-05-06 16:38:21 +08002291EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002292
David Stevens8dd014a2010-07-27 18:52:21 +03002293/* multi-buffer version of vhost_add_used_and_signal */
2294void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2295 struct vhost_virtqueue *vq,
2296 struct vring_used_elem *heads, unsigned count)
2297{
2298 vhost_add_used_n(vq, heads, count);
2299 vhost_signal(dev, vq);
2300}
Asias He6ac1afb2013-05-06 16:38:21 +08002301EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002302
Jason Wangd4a60602016-03-04 06:24:52 -05002303/* return true if we're sure that avaiable ring is empty */
2304bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2305{
2306 __virtio16 avail_idx;
2307 int r;
2308
Jason Wang275bf962017-01-18 15:02:01 +08002309 if (vq->avail_idx != vq->last_avail_idx)
Jason Wangd4a60602016-03-04 06:24:52 -05002310 return false;
2311
Linus Torvalds54d79892017-03-02 13:53:13 -08002312 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
Jason Wang275bf962017-01-18 15:02:01 +08002313 if (unlikely(r))
2314 return false;
2315 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2316
2317 return vq->avail_idx == vq->last_avail_idx;
Jason Wangd4a60602016-03-04 06:24:52 -05002318}
2319EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2320
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002321/* OK, now we need to know about added descriptors. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002322bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002323{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002324 __virtio16 avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002325 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302326
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002327 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2328 return false;
2329 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002330 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002331 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002332 if (r) {
2333 vq_err(vq, "Failed to enable notification at %p: %d\n",
2334 &vq->used->flags, r);
2335 return false;
2336 }
2337 } else {
Jason Wang2723fea2011-06-21 18:04:38 +08002338 r = vhost_update_avail_event(vq, vq->avail_idx);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002339 if (r) {
2340 vq_err(vq, "Failed to update avail event index at %p: %d\n",
2341 vhost_avail_event(vq), r);
2342 return false;
2343 }
2344 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002345 /* They could have slipped one in as we were doing that: make
2346 * sure it's written, then check again. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00002347 smp_mb();
Jason Wangf8894912017-02-28 17:56:02 +08002348 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002349 if (r) {
2350 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2351 &vq->avail->idx, r);
2352 return false;
2353 }
2354
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002355 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002356}
Asias He6ac1afb2013-05-06 16:38:21 +08002357EXPORT_SYMBOL_GPL(vhost_enable_notify);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002358
2359/* We don't need to be notified again. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002360void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002361{
2362 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302363
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002364 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2365 return;
2366 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002367 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002368 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002369 if (r)
2370 vq_err(vq, "Failed to enable notification at %p: %d\n",
2371 &vq->used->flags, r);
2372 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002373}
Asias He6ac1afb2013-05-06 16:38:21 +08002374EXPORT_SYMBOL_GPL(vhost_disable_notify);
2375
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002376/* Create a new message. */
2377struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2378{
2379 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2380 if (!node)
2381 return NULL;
Michael S. Tsirkin670ae9c2018-05-12 00:33:10 +03002382
2383 /* Make sure all padding within the structure is initialized. */
2384 memset(&node->msg, 0, sizeof node->msg);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002385 node->vq = vq;
2386 node->msg.type = type;
2387 return node;
2388}
2389EXPORT_SYMBOL_GPL(vhost_new_msg);
2390
2391void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2392 struct vhost_msg_node *node)
2393{
2394 spin_lock(&dev->iotlb_lock);
2395 list_add_tail(&node->node, head);
2396 spin_unlock(&dev->iotlb_lock);
2397
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002398 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002399}
2400EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2401
2402struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2403 struct list_head *head)
2404{
2405 struct vhost_msg_node *node = NULL;
2406
2407 spin_lock(&dev->iotlb_lock);
2408 if (!list_empty(head)) {
2409 node = list_first_entry(head, struct vhost_msg_node,
2410 node);
2411 list_del(&node->node);
2412 }
2413 spin_unlock(&dev->iotlb_lock);
2414
2415 return node;
2416}
2417EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2418
2419
Asias He6ac1afb2013-05-06 16:38:21 +08002420static int __init vhost_init(void)
2421{
2422 return 0;
2423}
2424
2425static void __exit vhost_exit(void)
2426{
2427}
2428
2429module_init(vhost_init);
2430module_exit(vhost_exit);
2431
2432MODULE_VERSION("0.0.1");
2433MODULE_LICENSE("GPL v2");
2434MODULE_AUTHOR("Michael S. Tsirkin");
2435MODULE_DESCRIPTION("Host kernel accelerator for virtio");