blob: a40d16bdebb5e69a726b1abe6589cc3e7ddee752 [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002/* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 *
5 * Author: Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Inspiration, some code, and most witty comments come from
Rob Landley61516582011-05-06 09:27:36 -07008 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00009 *
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000010 * Generic code for virtio server in host kernel.
11 */
12
13#include <linux/eventfd.h>
14#include <linux/vhost.h>
Asias He35596b22013-08-19 09:23:19 +080015#include <linux/uio.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000016#include <linux/mm.h>
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +020017#include <linux/mmu_context.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000018#include <linux/miscdevice.h>
19#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000020#include <linux/poll.h>
21#include <linux/file.h>
22#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Igor Mammedov4de72552015-07-01 11:07:09 +020024#include <linux/vmalloc.h>
Tejun Heoc23f34452010-06-02 20:40:00 +020025#include <linux/kthread.h>
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +030026#include <linux/cgroup.h>
Asias He6ac1afb2013-05-06 16:38:21 +080027#include <linux/module.h>
Igor Mammedovbcfeaca2015-06-16 18:33:35 +020028#include <linux/sort.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010029#include <linux/sched/mm.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010030#include <linux/sched/signal.h>
Jason Wanga9709d62016-06-23 02:04:31 -040031#include <linux/interval_tree_generic.h>
Jason Wangff002262018-10-30 14:10:49 +080032#include <linux/nospec.h>
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -080033#include <linux/kcov.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000034
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000035#include "vhost.h"
36
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020037static ushort max_mem_regions = 64;
38module_param(max_mem_regions, ushort, 0444);
39MODULE_PARM_DESC(max_mem_regions,
40 "Maximum number of memory regions in memory map. (default: 64)");
Jason Wang6b1e6cc2016-06-23 02:04:32 -040041static int max_iotlb_entries = 2048;
42module_param(max_iotlb_entries, int, 0444);
43MODULE_PARM_DESC(max_iotlb_entries,
44 "Maximum number of iotlb entries. (default: 2048)");
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020045
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000046enum {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000047 VHOST_MEMORY_F_LOG = 0x1,
48};
49
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +030050#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
51#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030052
Greg Kurz2751c982015-04-24 14:27:24 +020053#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
Greg Kurzc5072032016-02-16 15:59:34 +010054static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +020055{
56 vq->user_be = !virtio_legacy_is_little_endian();
57}
58
Greg Kurzc5072032016-02-16 15:59:34 +010059static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
60{
61 vq->user_be = true;
62}
63
64static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
65{
66 vq->user_be = false;
67}
68
Greg Kurz2751c982015-04-24 14:27:24 +020069static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
70{
71 struct vhost_vring_state s;
72
73 if (vq->private_data)
74 return -EBUSY;
75
76 if (copy_from_user(&s, argp, sizeof(s)))
77 return -EFAULT;
78
79 if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
80 s.num != VHOST_VRING_BIG_ENDIAN)
81 return -EINVAL;
82
Greg Kurzc5072032016-02-16 15:59:34 +010083 if (s.num == VHOST_VRING_BIG_ENDIAN)
84 vhost_enable_cross_endian_big(vq);
85 else
86 vhost_enable_cross_endian_little(vq);
Greg Kurz2751c982015-04-24 14:27:24 +020087
88 return 0;
89}
90
91static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
92 int __user *argp)
93{
94 struct vhost_vring_state s = {
95 .index = idx,
96 .num = vq->user_be
97 };
98
99 if (copy_to_user(argp, &s, sizeof(s)))
100 return -EFAULT;
101
102 return 0;
103}
104
105static void vhost_init_is_le(struct vhost_virtqueue *vq)
106{
107 /* Note for legacy virtio: user_be is initialized at reset time
108 * according to the host endianness. If userspace does not set an
109 * explicit endianness, the default behavior is native endian, as
110 * expected by legacy virtio.
111 */
112 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
113}
114#else
Greg Kurzc5072032016-02-16 15:59:34 +0100115static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +0200116{
117}
118
119static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
120{
121 return -ENOIOCTLCMD;
122}
123
124static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
125 int __user *argp)
126{
127 return -ENOIOCTLCMD;
128}
129
130static void vhost_init_is_le(struct vhost_virtqueue *vq)
131{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100132 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
133 || virtio_legacy_is_little_endian();
Greg Kurz2751c982015-04-24 14:27:24 +0200134}
135#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
136
Greg Kurzc5072032016-02-16 15:59:34 +0100137static void vhost_reset_is_le(struct vhost_virtqueue *vq)
138{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100139 vhost_init_is_le(vq);
Greg Kurzc5072032016-02-16 15:59:34 +0100140}
141
Jason Wang7235acd2016-04-25 22:14:32 -0400142struct vhost_flush_struct {
143 struct vhost_work work;
144 struct completion wait_event;
145};
146
147static void vhost_flush_work(struct vhost_work *work)
148{
149 struct vhost_flush_struct *s;
150
151 s = container_of(work, struct vhost_flush_struct, work);
152 complete(&s->wait_event);
153}
154
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000155static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
156 poll_table *pt)
157{
158 struct vhost_poll *poll;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000159
Krishna Kumard47effe2011-03-01 17:06:37 +0530160 poll = container_of(pt, struct vhost_poll, table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000161 poll->wqh = wqh;
162 add_wait_queue(wqh, &poll->wait);
163}
164
Ingo Molnarac6424b2017-06-20 12:06:13 +0200165static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000166 void *key)
167{
Tejun Heoc23f34452010-06-02 20:40:00 +0200168 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
Jason Wang01fcb1c2020-05-29 16:02:58 +0800169 struct vhost_work *work = &poll->work;
Tejun Heoc23f34452010-06-02 20:40:00 +0200170
Al Viro3ad6f932017-07-03 20:14:56 -0400171 if (!(key_to_poll(key) & poll->mask))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000172 return 0;
173
Jason Wang01fcb1c2020-05-29 16:02:58 +0800174 if (!poll->dev->use_worker)
175 work->fn(work);
176 else
177 vhost_poll_queue(poll);
178
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000179 return 0;
180}
181
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000182void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000183{
Jason Wang04b96e52016-04-25 22:14:33 -0400184 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200185 work->fn = fn;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000186}
Asias He6ac1afb2013-05-06 16:38:21 +0800187EXPORT_SYMBOL_GPL(vhost_work_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000188
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300189/* Init poll structure */
190void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
Al Viro58e3b602017-07-03 23:50:40 -0400191 __poll_t mask, struct vhost_dev *dev)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300192{
193 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
194 init_poll_funcptr(&poll->table, vhost_poll_func);
195 poll->mask = mask;
196 poll->dev = dev;
Jason Wang2b8b3282013-01-28 01:05:18 +0000197 poll->wqh = NULL;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300198
199 vhost_work_init(&poll->work, fn);
200}
Asias He6ac1afb2013-05-06 16:38:21 +0800201EXPORT_SYMBOL_GPL(vhost_poll_init);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300202
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000203/* Start polling a file. We add ourselves to file's wait queue. The caller must
204 * keep a reference to a file until after vhost_poll_stop is called. */
Jason Wang2b8b3282013-01-28 01:05:18 +0000205int vhost_poll_start(struct vhost_poll *poll, struct file *file)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000206{
Al Viroe6c8adc2017-07-03 22:25:56 -0400207 __poll_t mask;
Krishna Kumard47effe2011-03-01 17:06:37 +0530208
Jason Wang70181d512013-04-10 20:50:48 +0000209 if (poll->wqh)
210 return 0;
211
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800212 mask = vfs_poll(file, &poll->table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000213 if (mask)
Al Viro3ad6f932017-07-03 20:14:56 -0400214 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800215 if (mask & EPOLLERR) {
Jason Wangdc6455a2018-03-27 20:50:52 +0800216 vhost_poll_stop(poll);
Yunsheng Lin896fc242019-08-20 20:36:32 +0800217 return -EINVAL;
Jason Wang2b8b3282013-01-28 01:05:18 +0000218 }
219
Yunsheng Lin896fc242019-08-20 20:36:32 +0800220 return 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000221}
Asias He6ac1afb2013-05-06 16:38:21 +0800222EXPORT_SYMBOL_GPL(vhost_poll_start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000223
224/* Stop polling a file. After this function returns, it becomes safe to drop the
225 * file reference. You must also flush afterwards. */
226void vhost_poll_stop(struct vhost_poll *poll)
227{
Jason Wang2b8b3282013-01-28 01:05:18 +0000228 if (poll->wqh) {
229 remove_wait_queue(poll->wqh, &poll->wait);
230 poll->wqh = NULL;
231 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000232}
Asias He6ac1afb2013-05-06 16:38:21 +0800233EXPORT_SYMBOL_GPL(vhost_poll_stop);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000234
Asias He6ac1afb2013-05-06 16:38:21 +0800235void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000236{
Jason Wang7235acd2016-04-25 22:14:32 -0400237 struct vhost_flush_struct flush;
Tejun Heoc23f34452010-06-02 20:40:00 +0200238
Jason Wang7235acd2016-04-25 22:14:32 -0400239 if (dev->worker) {
240 init_completion(&flush.wait_event);
241 vhost_work_init(&flush.work, vhost_flush_work);
242
243 vhost_work_queue(dev, &flush.work);
244 wait_for_completion(&flush.wait_event);
245 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000246}
Asias He6ac1afb2013-05-06 16:38:21 +0800247EXPORT_SYMBOL_GPL(vhost_work_flush);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000248
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300249/* Flush any work that has been scheduled. When calling this, don't hold any
250 * locks that are also used by the callback. */
251void vhost_poll_flush(struct vhost_poll *poll)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000252{
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300253 vhost_work_flush(poll->dev, &poll->work);
254}
Asias He6ac1afb2013-05-06 16:38:21 +0800255EXPORT_SYMBOL_GPL(vhost_poll_flush);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300256
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000257void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300258{
Jason Wang04b96e52016-04-25 22:14:33 -0400259 if (!dev->worker)
260 return;
Tejun Heoc23f34452010-06-02 20:40:00 +0200261
Jason Wang04b96e52016-04-25 22:14:33 -0400262 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
263 /* We can only add the work to the list after we're
264 * sure it was not in the list.
Peng Tao635abf02016-12-07 17:52:19 +0800265 * test_and_set_bit() implies a memory barrier.
Jason Wang04b96e52016-04-25 22:14:33 -0400266 */
Jason Wang04b96e52016-04-25 22:14:33 -0400267 llist_add(&work->node, &dev->work_list);
Tejun Heoc23f34452010-06-02 20:40:00 +0200268 wake_up_process(dev->worker);
269 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000270}
Asias He6ac1afb2013-05-06 16:38:21 +0800271EXPORT_SYMBOL_GPL(vhost_work_queue);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000272
Jason Wang526d3e72016-03-04 06:24:51 -0500273/* A lockless hint for busy polling code to exit the loop */
274bool vhost_has_work(struct vhost_dev *dev)
275{
Jason Wang04b96e52016-04-25 22:14:33 -0400276 return !llist_empty(&dev->work_list);
Jason Wang526d3e72016-03-04 06:24:51 -0500277}
278EXPORT_SYMBOL_GPL(vhost_has_work);
279
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300280void vhost_poll_queue(struct vhost_poll *poll)
281{
282 vhost_work_queue(poll->dev, &poll->work);
283}
Asias He6ac1afb2013-05-06 16:38:21 +0800284EXPORT_SYMBOL_GPL(vhost_poll_queue);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300285
Jason Wangf8894912017-02-28 17:56:02 +0800286static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
287{
288 int j;
289
290 for (j = 0; j < VHOST_NUM_ADDRS; j++)
291 vq->meta_iotlb[j] = NULL;
292}
293
294static void vhost_vq_meta_reset(struct vhost_dev *d)
295{
296 int i;
297
Jason Wang86a07da2018-12-13 10:53:39 +0800298 for (i = 0; i < d->nvqs; ++i)
Jason Wangf8894912017-02-28 17:56:02 +0800299 __vhost_vq_meta_reset(d->vqs[i]);
300}
301
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000302static void vhost_vq_reset(struct vhost_dev *dev,
303 struct vhost_virtqueue *vq)
304{
305 vq->num = 1;
306 vq->desc = NULL;
307 vq->avail = NULL;
308 vq->used = NULL;
309 vq->last_avail_idx = 0;
310 vq->avail_idx = 0;
311 vq->last_used_idx = 0;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300312 vq->signalled_used = 0;
313 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000314 vq->used_flags = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000315 vq->log_used = false;
316 vq->log_addr = -1ull;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000317 vq->private_data = NULL;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300318 vq->acked_features = 0;
Jason Wang429711a2018-08-06 11:17:47 +0800319 vq->acked_backend_features = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000320 vq->log_base = NULL;
321 vq->error_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000322 vq->kick = NULL;
323 vq->call_ctx = NULL;
Michael S. Tsirkin73a99f02010-02-23 11:23:45 +0200324 vq->log_ctx = NULL;
Greg Kurzc5072032016-02-16 15:59:34 +0100325 vhost_reset_is_le(vq);
326 vhost_disable_cross_endian(vq);
Jason Wang03088132016-03-04 06:24:53 -0500327 vq->busyloop_timeout = 0;
Jason Wanga9709d62016-06-23 02:04:31 -0400328 vq->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400329 vq->iotlb = NULL;
Jason Wangf8894912017-02-28 17:56:02 +0800330 __vhost_vq_meta_reset(vq);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000331}
332
Tejun Heoc23f34452010-06-02 20:40:00 +0200333static int vhost_worker(void *data)
334{
335 struct vhost_dev *dev = data;
Jason Wang04b96e52016-04-25 22:14:33 -0400336 struct vhost_work *work, *work_next;
337 struct llist_node *node;
Jens Freimannd7ffde32012-06-26 00:59:58 +0000338 mm_segment_t oldfs = get_fs();
Tejun Heoc23f34452010-06-02 20:40:00 +0200339
Jens Freimannd7ffde32012-06-26 00:59:58 +0000340 set_fs(USER_DS);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200341 use_mm(dev->mm);
342
Tejun Heoc23f34452010-06-02 20:40:00 +0200343 for (;;) {
344 /* mb paired w/ kthread_stop */
345 set_current_state(TASK_INTERRUPTIBLE);
346
Tejun Heoc23f34452010-06-02 20:40:00 +0200347 if (kthread_should_stop()) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200348 __set_current_state(TASK_RUNNING);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200349 break;
Tejun Heoc23f34452010-06-02 20:40:00 +0200350 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200351
Jason Wang04b96e52016-04-25 22:14:33 -0400352 node = llist_del_all(&dev->work_list);
353 if (!node)
354 schedule();
355
356 node = llist_reverse_order(node);
357 /* make sure flag is seen after deletion */
358 smp_wmb();
359 llist_for_each_entry_safe(work, work_next, node, node) {
360 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200361 __set_current_state(TASK_RUNNING);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800362 kcov_remote_start_common(dev->kcov_handle);
Tejun Heoc23f34452010-06-02 20:40:00 +0200363 work->fn(work);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800364 kcov_remote_stop();
Nadav Har'Eld550dda2012-02-27 15:07:29 +0200365 if (need_resched())
366 schedule();
Jason Wang04b96e52016-04-25 22:14:33 -0400367 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200368 }
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200369 unuse_mm(dev->mm);
Jens Freimannd7ffde32012-06-26 00:59:58 +0000370 set_fs(oldfs);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200371 return 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200372}
373
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000374static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
375{
376 kfree(vq->indirect);
377 vq->indirect = NULL;
378 kfree(vq->log);
379 vq->log = NULL;
380 kfree(vq->heads);
381 vq->heads = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000382}
383
Jason Wange0e9b402010-09-14 23:53:05 +0800384/* Helper to allocate iovec buffers for all vqs. */
385static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
386{
Asias He6d5e6aa2013-05-06 16:38:23 +0800387 struct vhost_virtqueue *vq;
Jason Wange0e9b402010-09-14 23:53:05 +0800388 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530389
Jason Wange0e9b402010-09-14 23:53:05 +0800390 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800391 vq = dev->vqs[i];
Kees Cook6da2ec52018-06-12 13:55:00 -0700392 vq->indirect = kmalloc_array(UIO_MAXIOV,
393 sizeof(*vq->indirect),
394 GFP_KERNEL);
Jason Wangb46a0bf2019-01-28 15:05:05 +0800395 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
Kees Cook6da2ec52018-06-12 13:55:00 -0700396 GFP_KERNEL);
Jason Wangb46a0bf2019-01-28 15:05:05 +0800397 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
Kees Cook6da2ec52018-06-12 13:55:00 -0700398 GFP_KERNEL);
Asias He6d5e6aa2013-05-06 16:38:23 +0800399 if (!vq->indirect || !vq->log || !vq->heads)
Jason Wange0e9b402010-09-14 23:53:05 +0800400 goto err_nomem;
401 }
402 return 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530403
Jason Wange0e9b402010-09-14 23:53:05 +0800404err_nomem:
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000405 for (; i >= 0; --i)
Asias He3ab2e422013-04-27 11:16:48 +0800406 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800407 return -ENOMEM;
408}
409
410static void vhost_dev_free_iovecs(struct vhost_dev *dev)
411{
412 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530413
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000414 for (i = 0; i < dev->nvqs; ++i)
Asias He3ab2e422013-04-27 11:16:48 +0800415 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800416}
417
Jason Wange82b9b02019-05-17 00:29:49 -0400418bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
419 int pkts, int total_len)
420{
421 struct vhost_dev *dev = vq->dev;
422
423 if ((dev->byte_weight && total_len >= dev->byte_weight) ||
424 pkts >= dev->weight) {
425 vhost_poll_queue(&vq->poll);
426 return true;
427 }
428
429 return false;
430}
431EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
432
Jason Wang4942e822019-05-24 04:12:16 -0400433static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
434 unsigned int num)
435{
436 size_t event __maybe_unused =
437 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
438
439 return sizeof(*vq->avail) +
440 sizeof(*vq->avail->ring) * num + event;
441}
442
443static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
444 unsigned int num)
445{
446 size_t event __maybe_unused =
447 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
448
449 return sizeof(*vq->used) +
450 sizeof(*vq->used->ring) * num + event;
451}
452
453static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
454 unsigned int num)
455{
456 return sizeof(*vq->desc) * num;
457}
458
Zhi Yong Wu59566b6e2013-12-07 04:13:03 +0800459void vhost_dev_init(struct vhost_dev *dev,
Jason Wange82b9b02019-05-17 00:29:49 -0400460 struct vhost_virtqueue **vqs, int nvqs,
Jason Wang792a4f22020-03-26 22:01:18 +0800461 int iov_limit, int weight, int byte_weight,
Jason Wang01fcb1c2020-05-29 16:02:58 +0800462 bool use_worker,
Jason Wang792a4f22020-03-26 22:01:18 +0800463 int (*msg_handler)(struct vhost_dev *dev,
464 struct vhost_iotlb_msg *msg))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000465{
Asias He6d5e6aa2013-05-06 16:38:23 +0800466 struct vhost_virtqueue *vq;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000467 int i;
Tejun Heoc23f34452010-06-02 20:40:00 +0200468
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000469 dev->vqs = vqs;
470 dev->nvqs = nvqs;
471 mutex_init(&dev->mutex);
472 dev->log_ctx = NULL;
Jason Wanga9709d62016-06-23 02:04:31 -0400473 dev->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400474 dev->iotlb = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000475 dev->mm = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200476 dev->worker = NULL;
Jason Wangb46a0bf2019-01-28 15:05:05 +0800477 dev->iov_limit = iov_limit;
Jason Wange82b9b02019-05-17 00:29:49 -0400478 dev->weight = weight;
479 dev->byte_weight = byte_weight;
Jason Wang01fcb1c2020-05-29 16:02:58 +0800480 dev->use_worker = use_worker;
Jason Wang792a4f22020-03-26 22:01:18 +0800481 dev->msg_handler = msg_handler;
Jason Wang04b96e52016-04-25 22:14:33 -0400482 init_llist_head(&dev->work_list);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400483 init_waitqueue_head(&dev->wait);
484 INIT_LIST_HEAD(&dev->read_list);
485 INIT_LIST_HEAD(&dev->pending_list);
486 spin_lock_init(&dev->iotlb_lock);
Michael S. Tsirkin3d2c7d32019-08-10 13:53:21 -0400487
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000488
489 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800490 vq = dev->vqs[i];
491 vq->log = NULL;
492 vq->indirect = NULL;
493 vq->heads = NULL;
494 vq->dev = dev;
495 mutex_init(&vq->mutex);
496 vhost_vq_reset(dev, vq);
497 if (vq->handle_kick)
498 vhost_poll_init(&vq->poll, vq->handle_kick,
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800499 EPOLLIN, dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000500 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000501}
Asias He6ac1afb2013-05-06 16:38:21 +0800502EXPORT_SYMBOL_GPL(vhost_dev_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000503
504/* Caller should have device mutex */
505long vhost_dev_check_owner(struct vhost_dev *dev)
506{
507 /* Are you the owner? If not, I don't think you mean to do that */
508 return dev->mm == current->mm ? 0 : -EPERM;
509}
Asias He6ac1afb2013-05-06 16:38:21 +0800510EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000511
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300512struct vhost_attach_cgroups_struct {
Krishna Kumard47effe2011-03-01 17:06:37 +0530513 struct vhost_work work;
514 struct task_struct *owner;
515 int ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300516};
517
518static void vhost_attach_cgroups_work(struct vhost_work *work)
519{
Krishna Kumard47effe2011-03-01 17:06:37 +0530520 struct vhost_attach_cgroups_struct *s;
521
522 s = container_of(work, struct vhost_attach_cgroups_struct, work);
523 s->ret = cgroup_attach_task_all(s->owner, current);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300524}
525
526static int vhost_attach_cgroups(struct vhost_dev *dev)
527{
Krishna Kumard47effe2011-03-01 17:06:37 +0530528 struct vhost_attach_cgroups_struct attach;
529
530 attach.owner = current;
531 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
532 vhost_work_queue(dev, &attach.work);
533 vhost_work_flush(dev, &attach.work);
534 return attach.ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300535}
536
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000537/* Caller should have device mutex */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300538bool vhost_dev_has_owner(struct vhost_dev *dev)
539{
540 return dev->mm;
541}
Asias He6ac1afb2013-05-06 16:38:21 +0800542EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300543
Jason Wang5ce995f2020-05-29 16:02:59 +0800544static void vhost_attach_mm(struct vhost_dev *dev)
545{
546 /* No owner, become one */
547 if (dev->use_worker) {
548 dev->mm = get_task_mm(current);
549 } else {
550 /* vDPA device does not use worker thead, so there's
551 * no need to hold the address space for mm. This help
552 * to avoid deadlock in the case of mmap() which may
553 * held the refcnt of the file and depends on release
554 * method to remove vma.
555 */
556 dev->mm = current->mm;
557 mmgrab(dev->mm);
558 }
559}
560
561static void vhost_detach_mm(struct vhost_dev *dev)
562{
563 if (!dev->mm)
564 return;
565
566 if (dev->use_worker)
567 mmput(dev->mm);
568 else
569 mmdrop(dev->mm);
570
571 dev->mm = NULL;
572}
573
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300574/* Caller should have device mutex */
Asias He54db63c2013-05-06 11:15:59 +0800575long vhost_dev_set_owner(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000576{
Tejun Heoc23f34452010-06-02 20:40:00 +0200577 struct task_struct *worker;
578 int err;
Krishna Kumard47effe2011-03-01 17:06:37 +0530579
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000580 /* Is there an owner already? */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300581 if (vhost_dev_has_owner(dev)) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200582 err = -EBUSY;
583 goto err_mm;
584 }
Krishna Kumard47effe2011-03-01 17:06:37 +0530585
Jason Wang5ce995f2020-05-29 16:02:59 +0800586 vhost_attach_mm(dev);
587
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800588 dev->kcov_handle = kcov_common_handle();
Jason Wang01fcb1c2020-05-29 16:02:58 +0800589 if (dev->use_worker) {
590 worker = kthread_create(vhost_worker, dev,
591 "vhost-%d", current->pid);
592 if (IS_ERR(worker)) {
593 err = PTR_ERR(worker);
594 goto err_worker;
595 }
596
597 dev->worker = worker;
598 wake_up_process(worker); /* avoid contributing to loadavg */
599
600 err = vhost_attach_cgroups(dev);
601 if (err)
602 goto err_cgroup;
Tejun Heoc23f34452010-06-02 20:40:00 +0200603 }
604
Jason Wange0e9b402010-09-14 23:53:05 +0800605 err = vhost_dev_alloc_iovecs(dev);
606 if (err)
607 goto err_cgroup;
608
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000609 return 0;
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300610err_cgroup:
Jason Wang01fcb1c2020-05-29 16:02:58 +0800611 if (dev->worker) {
612 kthread_stop(dev->worker);
613 dev->worker = NULL;
614 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200615err_worker:
Jason Wang5ce995f2020-05-29 16:02:59 +0800616 vhost_detach_mm(dev);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800617 dev->kcov_handle = 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200618err_mm:
619 return err;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000620}
Asias He6ac1afb2013-05-06 16:38:21 +0800621EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000622
Jason Wang0bbe3062020-03-26 22:01:19 +0800623static struct vhost_iotlb *iotlb_alloc(void)
Jason Wanga9709d62016-06-23 02:04:31 -0400624{
Jason Wang0bbe3062020-03-26 22:01:19 +0800625 return vhost_iotlb_alloc(max_iotlb_entries,
626 VHOST_IOTLB_FLAG_RETIRE);
627}
628
629struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
630{
631 return iotlb_alloc();
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300632}
Asias He6ac1afb2013-05-06 16:38:21 +0800633EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000634
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300635/* Caller should have device mutex */
Jason Wang0bbe3062020-03-26 22:01:19 +0800636void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300637{
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300638 int i;
639
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800640 vhost_dev_cleanup(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000641
Jason Wanga9709d62016-06-23 02:04:31 -0400642 dev->umem = umem;
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300643 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
644 * VQs aren't running.
645 */
646 for (i = 0; i < dev->nvqs; ++i)
Jason Wanga9709d62016-06-23 02:04:31 -0400647 dev->vqs[i]->umem = umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000648}
Asias He6ac1afb2013-05-06 16:38:21 +0800649EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000650
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000651void vhost_dev_stop(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000652{
653 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530654
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000655 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800656 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
657 vhost_poll_stop(&dev->vqs[i]->poll);
658 vhost_poll_flush(&dev->vqs[i]->poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000659 }
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000660 }
661}
Asias He6ac1afb2013-05-06 16:38:21 +0800662EXPORT_SYMBOL_GPL(vhost_dev_stop);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000663
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400664static void vhost_clear_msg(struct vhost_dev *dev)
665{
666 struct vhost_msg_node *node, *n;
667
668 spin_lock(&dev->iotlb_lock);
669
670 list_for_each_entry_safe(node, n, &dev->read_list, node) {
671 list_del(&node->node);
672 kfree(node);
673 }
674
675 list_for_each_entry_safe(node, n, &dev->pending_list, node) {
676 list_del(&node->node);
677 kfree(node);
678 }
679
680 spin_unlock(&dev->iotlb_lock);
681}
682
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800683void vhost_dev_cleanup(struct vhost_dev *dev)
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000684{
685 int i;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000686
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000687 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800688 if (dev->vqs[i]->error_ctx)
689 eventfd_ctx_put(dev->vqs[i]->error_ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800690 if (dev->vqs[i]->kick)
691 fput(dev->vqs[i]->kick);
692 if (dev->vqs[i]->call_ctx)
693 eventfd_ctx_put(dev->vqs[i]->call_ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800694 vhost_vq_reset(dev, dev->vqs[i]);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000695 }
Jason Wange0e9b402010-09-14 23:53:05 +0800696 vhost_dev_free_iovecs(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000697 if (dev->log_ctx)
698 eventfd_ctx_put(dev->log_ctx);
699 dev->log_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000700 /* No one will access memory at this point */
Jason Wang0bbe3062020-03-26 22:01:19 +0800701 vhost_iotlb_free(dev->umem);
Jason Wanga9709d62016-06-23 02:04:31 -0400702 dev->umem = NULL;
Jason Wang0bbe3062020-03-26 22:01:19 +0800703 vhost_iotlb_free(dev->iotlb);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400704 dev->iotlb = NULL;
705 vhost_clear_msg(dev);
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800706 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang04b96e52016-04-25 22:14:33 -0400707 WARN_ON(!llist_empty(&dev->work_list));
Eric Dumazet78b620c2010-08-31 02:05:57 +0000708 if (dev->worker) {
709 kthread_stop(dev->worker);
710 dev->worker = NULL;
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800711 dev->kcov_handle = 0;
Eric Dumazet78b620c2010-08-31 02:05:57 +0000712 }
Jason Wang5ce995f2020-05-29 16:02:59 +0800713 vhost_detach_mm(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000714}
Asias He6ac1afb2013-05-06 16:38:21 +0800715EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000716
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800717static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000718{
719 u64 a = addr / VHOST_PAGE_SIZE / 8;
Krishna Kumard47effe2011-03-01 17:06:37 +0530720
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000721 /* Make sure 64 bit math will not overflow. */
722 if (a > ULONG_MAX - (unsigned long)log_base ||
723 a + (unsigned long)log_base > ULONG_MAX)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800724 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000725
Linus Torvalds96d4f262019-01-03 18:57:57 -0800726 return access_ok(log_base + a,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000727 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
728}
729
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300730static bool vhost_overflow(u64 uaddr, u64 size)
731{
732 /* Make sure 64 bit math will not overflow. */
733 return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
734}
735
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000736/* Caller should have vq mutex and device mutex. */
Jason Wang0bbe3062020-03-26 22:01:19 +0800737static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800738 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000739{
Jason Wang0bbe3062020-03-26 22:01:19 +0800740 struct vhost_iotlb_map *map;
Jeff Dike179b2842010-04-07 09:59:10 -0400741
Jason Wanga9709d62016-06-23 02:04:31 -0400742 if (!umem)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800743 return false;
Jeff Dike179b2842010-04-07 09:59:10 -0400744
Jason Wang0bbe3062020-03-26 22:01:19 +0800745 list_for_each_entry(map, &umem->list, link) {
746 unsigned long a = map->addr;
Jason Wanga9709d62016-06-23 02:04:31 -0400747
Jason Wang0bbe3062020-03-26 22:01:19 +0800748 if (vhost_overflow(map->addr, map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800749 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300750
751
Jason Wang0bbe3062020-03-26 22:01:19 +0800752 if (!access_ok((void __user *)a, map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800753 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000754 else if (log_all && !log_access_ok(log_base,
Jason Wang0bbe3062020-03-26 22:01:19 +0800755 map->start,
756 map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800757 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000758 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800759 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000760}
761
Jason Wangf8894912017-02-28 17:56:02 +0800762static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
763 u64 addr, unsigned int size,
764 int type)
765{
Jason Wang0bbe3062020-03-26 22:01:19 +0800766 const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
Jason Wangf8894912017-02-28 17:56:02 +0800767
Jason Wang0bbe3062020-03-26 22:01:19 +0800768 if (!map)
Jason Wangf8894912017-02-28 17:56:02 +0800769 return NULL;
770
Michael S. Tsirkin1b0be992020-05-15 11:29:53 -0400771 return (void __user *)(uintptr_t)(map->addr + addr - map->start);
Jason Wangf8894912017-02-28 17:56:02 +0800772}
773
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000774/* Can we switch to this memory table? */
775/* Caller should have device mutex but not vq mutex */
Jason Wang0bbe3062020-03-26 22:01:19 +0800776static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800777 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000778{
779 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530780
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000781 for (i = 0; i < d->nvqs; ++i) {
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800782 bool ok;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300783 bool log;
784
Asias He3ab2e422013-04-27 11:16:48 +0800785 mutex_lock(&d->vqs[i]->mutex);
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300786 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000787 /* If ring is inactive, will check when it's enabled. */
Asias He3ab2e422013-04-27 11:16:48 +0800788 if (d->vqs[i]->private_data)
Jason Wanga9709d62016-06-23 02:04:31 -0400789 ok = vq_memory_access_ok(d->vqs[i]->log_base,
790 umem, log);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000791 else
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800792 ok = true;
Asias He3ab2e422013-04-27 11:16:48 +0800793 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000794 if (!ok)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800795 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000796 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800797 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000798}
799
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400800static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
801 struct iovec iov[], int iov_size, int access);
Jason Wangbfe2bc52016-06-23 02:04:30 -0400802
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200803static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
Jason Wangbfe2bc52016-06-23 02:04:30 -0400804 const void *from, unsigned size)
805{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400806 int ret;
Jason Wangbfe2bc52016-06-23 02:04:30 -0400807
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400808 if (!vq->iotlb)
809 return __copy_to_user(to, from, size);
810 else {
811 /* This function should be called after iotlb
812 * prefetch, which means we're sure that all vq
813 * could be access through iotlb. So -EAGAIN should
814 * not happen in this case.
815 */
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400816 struct iov_iter t;
Jason Wangf8894912017-02-28 17:56:02 +0800817 void __user *uaddr = vhost_vq_meta_fetch(vq,
818 (u64)(uintptr_t)to, size,
Eric Auger7ced6c92018-04-11 15:30:38 +0200819 VHOST_ADDR_USED);
Jason Wangf8894912017-02-28 17:56:02 +0800820
821 if (uaddr)
822 return __copy_to_user(uaddr, from, size);
823
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400824 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
825 ARRAY_SIZE(vq->iotlb_iov),
826 VHOST_ACCESS_WO);
827 if (ret < 0)
828 goto out;
829 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
830 ret = copy_to_iter(from, size, &t);
831 if (ret == size)
832 ret = 0;
833 }
834out:
835 return ret;
836}
Jason Wangbfe2bc52016-06-23 02:04:30 -0400837
838static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200839 void __user *from, unsigned size)
Jason Wangbfe2bc52016-06-23 02:04:30 -0400840{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400841 int ret;
842
843 if (!vq->iotlb)
844 return __copy_from_user(to, from, size);
845 else {
846 /* This function should be called after iotlb
847 * prefetch, which means we're sure that vq
848 * could be access through iotlb. So -EAGAIN should
849 * not happen in this case.
850 */
Jason Wangf8894912017-02-28 17:56:02 +0800851 void __user *uaddr = vhost_vq_meta_fetch(vq,
852 (u64)(uintptr_t)from, size,
853 VHOST_ADDR_DESC);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400854 struct iov_iter f;
Jason Wangf8894912017-02-28 17:56:02 +0800855
856 if (uaddr)
857 return __copy_from_user(to, uaddr, size);
858
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400859 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
860 ARRAY_SIZE(vq->iotlb_iov),
861 VHOST_ACCESS_RO);
862 if (ret < 0) {
863 vq_err(vq, "IOTLB translation failure: uaddr "
864 "%p size 0x%llx\n", from,
865 (unsigned long long) size);
866 goto out;
867 }
868 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
869 ret = copy_from_iter(to, size, &f);
870 if (ret == size)
871 ret = 0;
872 }
873
874out:
875 return ret;
876}
877
Jason Wangf8894912017-02-28 17:56:02 +0800878static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
879 void __user *addr, unsigned int size,
880 int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400881{
882 int ret;
883
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400884 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
885 ARRAY_SIZE(vq->iotlb_iov),
886 VHOST_ACCESS_RO);
887 if (ret < 0) {
888 vq_err(vq, "IOTLB translation failure: uaddr "
889 "%p size 0x%llx\n", addr,
890 (unsigned long long) size);
891 return NULL;
892 }
893
894 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
895 vq_err(vq, "Non atomic userspace memory access: uaddr "
896 "%p size 0x%llx\n", addr,
897 (unsigned long long) size);
898 return NULL;
899 }
900
901 return vq->iotlb_iov[0].iov_base;
902}
903
Jason Wangf8894912017-02-28 17:56:02 +0800904/* This function should be called after iotlb
905 * prefetch, which means we're sure that vq
906 * could be access through iotlb. So -EAGAIN should
907 * not happen in this case.
908 */
909static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
Michael S. Tsirkin1b0be992020-05-15 11:29:53 -0400910 void __user *addr, unsigned int size,
Jason Wangf8894912017-02-28 17:56:02 +0800911 int type)
912{
913 void __user *uaddr = vhost_vq_meta_fetch(vq,
914 (u64)(uintptr_t)addr, size, type);
915 if (uaddr)
916 return uaddr;
917
918 return __vhost_get_user_slow(vq, addr, size, type);
919}
920
921#define vhost_put_user(vq, x, ptr) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400922({ \
923 int ret = -EFAULT; \
924 if (!vq->iotlb) { \
925 ret = __put_user(x, ptr); \
926 } else { \
927 __typeof__(ptr) to = \
Jason Wangf8894912017-02-28 17:56:02 +0800928 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
929 sizeof(*ptr), VHOST_ADDR_USED); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400930 if (to != NULL) \
931 ret = __put_user(x, to); \
932 else \
933 ret = -EFAULT; \
934 } \
935 ret; \
936})
937
Jason Wang7b5d7532019-05-24 04:12:14 -0400938static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
939{
940 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
941 vhost_avail_event(vq));
942}
943
944static inline int vhost_put_used(struct vhost_virtqueue *vq,
945 struct vring_used_elem *head, int idx,
946 int count)
947{
948 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
949 count * sizeof(*head));
950}
951
952static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
953
954{
955 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
956 &vq->used->flags);
957}
958
959static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
960
961{
962 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
963 &vq->used->idx);
964}
965
Jason Wangf8894912017-02-28 17:56:02 +0800966#define vhost_get_user(vq, x, ptr, type) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400967({ \
968 int ret; \
969 if (!vq->iotlb) { \
970 ret = __get_user(x, ptr); \
971 } else { \
972 __typeof__(ptr) from = \
Jason Wangf8894912017-02-28 17:56:02 +0800973 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
974 sizeof(*ptr), \
975 type); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400976 if (from != NULL) \
977 ret = __get_user(x, from); \
978 else \
979 ret = -EFAULT; \
980 } \
981 ret; \
982})
983
Jason Wangf8894912017-02-28 17:56:02 +0800984#define vhost_get_avail(vq, x, ptr) \
985 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
986
987#define vhost_get_used(vq, x, ptr) \
988 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
989
Jason Wang86a07da2018-12-13 10:53:39 +0800990static void vhost_dev_lock_vqs(struct vhost_dev *d)
991{
992 int i = 0;
993 for (i = 0; i < d->nvqs; ++i)
994 mutex_lock_nested(&d->vqs[i]->mutex, i);
995}
996
997static void vhost_dev_unlock_vqs(struct vhost_dev *d)
998{
999 int i = 0;
1000 for (i = 0; i < d->nvqs; ++i)
1001 mutex_unlock(&d->vqs[i]->mutex);
1002}
1003
Jason Wang7b5d7532019-05-24 04:12:14 -04001004static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1005 __virtio16 *idx)
1006{
1007 return vhost_get_avail(vq, *idx, &vq->avail->idx);
1008}
1009
1010static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1011 __virtio16 *head, int idx)
1012{
1013 return vhost_get_avail(vq, *head,
1014 &vq->avail->ring[idx & (vq->num - 1)]);
1015}
1016
1017static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1018 __virtio16 *flags)
1019{
1020 return vhost_get_avail(vq, *flags, &vq->avail->flags);
1021}
1022
1023static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1024 __virtio16 *event)
1025{
1026 return vhost_get_avail(vq, *event, vhost_used_event(vq));
1027}
1028
1029static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1030 __virtio16 *idx)
1031{
1032 return vhost_get_used(vq, *idx, &vq->used->idx);
1033}
1034
1035static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1036 struct vring_desc *desc, int idx)
1037{
1038 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1039}
1040
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001041static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1042 struct vhost_iotlb_msg *msg)
1043{
1044 struct vhost_msg_node *node, *n;
1045
1046 spin_lock(&d->iotlb_lock);
1047
1048 list_for_each_entry_safe(node, n, &d->pending_list, node) {
1049 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1050 if (msg->iova <= vq_msg->iova &&
Jason Wang2d66f992018-08-24 16:53:13 +08001051 msg->iova + msg->size - 1 >= vq_msg->iova &&
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001052 vq_msg->type == VHOST_IOTLB_MISS) {
1053 vhost_poll_queue(&node->vq->poll);
1054 list_del(&node->node);
1055 kfree(node);
1056 }
1057 }
1058
1059 spin_unlock(&d->iotlb_lock);
1060}
1061
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001062static bool umem_access_ok(u64 uaddr, u64 size, int access)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001063{
1064 unsigned long a = uaddr;
1065
Michael S. Tsirkinec33d032016-08-01 23:20:53 +03001066 /* Make sure 64 bit math will not overflow. */
1067 if (vhost_overflow(uaddr, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001068 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +03001069
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001070 if ((access & VHOST_ACCESS_RO) &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001071 !access_ok((void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001072 return false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001073 if ((access & VHOST_ACCESS_WO) &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001074 !access_ok((void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001075 return false;
1076 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001077}
1078
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +02001079static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1080 struct vhost_iotlb_msg *msg)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001081{
1082 int ret = 0;
1083
Jason Wang1b15ad62018-05-22 19:58:57 +08001084 mutex_lock(&dev->mutex);
Jason Wang86a07da2018-12-13 10:53:39 +08001085 vhost_dev_lock_vqs(dev);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001086 switch (msg->type) {
1087 case VHOST_IOTLB_UPDATE:
1088 if (!dev->iotlb) {
1089 ret = -EFAULT;
1090 break;
1091 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001092 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001093 ret = -EFAULT;
1094 break;
1095 }
Jason Wangf8894912017-02-28 17:56:02 +08001096 vhost_vq_meta_reset(dev);
Jason Wang0bbe3062020-03-26 22:01:19 +08001097 if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1098 msg->iova + msg->size - 1,
1099 msg->uaddr, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001100 ret = -ENOMEM;
1101 break;
1102 }
1103 vhost_iotlb_notify_vq(dev, msg);
1104 break;
1105 case VHOST_IOTLB_INVALIDATE:
Jason Wang6f3180a2018-01-23 17:27:26 +08001106 if (!dev->iotlb) {
1107 ret = -EFAULT;
1108 break;
1109 }
Jason Wangf8894912017-02-28 17:56:02 +08001110 vhost_vq_meta_reset(dev);
Jason Wang0bbe3062020-03-26 22:01:19 +08001111 vhost_iotlb_del_range(dev->iotlb, msg->iova,
1112 msg->iova + msg->size - 1);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001113 break;
1114 default:
1115 ret = -EINVAL;
1116 break;
1117 }
1118
Jason Wang86a07da2018-12-13 10:53:39 +08001119 vhost_dev_unlock_vqs(dev);
Jason Wang1b15ad62018-05-22 19:58:57 +08001120 mutex_unlock(&dev->mutex);
1121
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001122 return ret;
1123}
1124ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1125 struct iov_iter *from)
1126{
Jason Wang429711a2018-08-06 11:17:47 +08001127 struct vhost_iotlb_msg msg;
1128 size_t offset;
1129 int type, ret;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001130
Jason Wang429711a2018-08-06 11:17:47 +08001131 ret = copy_from_iter(&type, sizeof(type), from);
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001132 if (ret != sizeof(type)) {
1133 ret = -EINVAL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001134 goto done;
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001135 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001136
Jason Wang429711a2018-08-06 11:17:47 +08001137 switch (type) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001138 case VHOST_IOTLB_MSG:
Jason Wang429711a2018-08-06 11:17:47 +08001139 /* There maybe a hole after type for V1 message type,
1140 * so skip it here.
1141 */
1142 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1143 break;
1144 case VHOST_IOTLB_MSG_V2:
1145 offset = sizeof(__u32);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001146 break;
1147 default:
1148 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001149 goto done;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001150 }
1151
Jason Wang429711a2018-08-06 11:17:47 +08001152 iov_iter_advance(from, offset);
1153 ret = copy_from_iter(&msg, sizeof(msg), from);
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001154 if (ret != sizeof(msg)) {
1155 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001156 goto done;
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001157 }
Jason Wang792a4f22020-03-26 22:01:18 +08001158
1159 if (dev->msg_handler)
1160 ret = dev->msg_handler(dev, &msg);
1161 else
1162 ret = vhost_process_iotlb_msg(dev, &msg);
1163 if (ret) {
Jason Wang429711a2018-08-06 11:17:47 +08001164 ret = -EFAULT;
1165 goto done;
1166 }
1167
1168 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1169 sizeof(struct vhost_msg_v2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001170done:
1171 return ret;
1172}
1173EXPORT_SYMBOL(vhost_chr_write_iter);
1174
Al Viroafc9a422017-07-03 06:39:46 -04001175__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001176 poll_table *wait)
1177{
Al Viroafc9a422017-07-03 06:39:46 -04001178 __poll_t mask = 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001179
1180 poll_wait(file, &dev->wait, wait);
1181
1182 if (!list_empty(&dev->read_list))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001183 mask |= EPOLLIN | EPOLLRDNORM;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001184
1185 return mask;
1186}
1187EXPORT_SYMBOL(vhost_chr_poll);
1188
1189ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1190 int noblock)
1191{
1192 DEFINE_WAIT(wait);
1193 struct vhost_msg_node *node;
1194 ssize_t ret = 0;
1195 unsigned size = sizeof(struct vhost_msg);
1196
1197 if (iov_iter_count(to) < size)
1198 return 0;
1199
1200 while (1) {
1201 if (!noblock)
1202 prepare_to_wait(&dev->wait, &wait,
1203 TASK_INTERRUPTIBLE);
1204
1205 node = vhost_dequeue_msg(dev, &dev->read_list);
1206 if (node)
1207 break;
1208 if (noblock) {
1209 ret = -EAGAIN;
1210 break;
1211 }
1212 if (signal_pending(current)) {
1213 ret = -ERESTARTSYS;
1214 break;
1215 }
1216 if (!dev->iotlb) {
1217 ret = -EBADFD;
1218 break;
1219 }
1220
1221 schedule();
1222 }
1223
1224 if (!noblock)
1225 finish_wait(&dev->wait, &wait);
1226
1227 if (node) {
Jason Wang429711a2018-08-06 11:17:47 +08001228 struct vhost_iotlb_msg *msg;
1229 void *start = &node->msg;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001230
Jason Wang429711a2018-08-06 11:17:47 +08001231 switch (node->msg.type) {
1232 case VHOST_IOTLB_MSG:
1233 size = sizeof(node->msg);
1234 msg = &node->msg.iotlb;
1235 break;
1236 case VHOST_IOTLB_MSG_V2:
1237 size = sizeof(node->msg_v2);
1238 msg = &node->msg_v2.iotlb;
1239 break;
1240 default:
1241 BUG();
1242 break;
1243 }
1244
1245 ret = copy_to_iter(start, size, to);
1246 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001247 kfree(node);
1248 return ret;
1249 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001250 vhost_enqueue_msg(dev, &dev->pending_list, node);
1251 }
1252
1253 return ret;
1254}
1255EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1256
1257static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1258{
1259 struct vhost_dev *dev = vq->dev;
1260 struct vhost_msg_node *node;
1261 struct vhost_iotlb_msg *msg;
Jason Wang429711a2018-08-06 11:17:47 +08001262 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001263
Jason Wang429711a2018-08-06 11:17:47 +08001264 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001265 if (!node)
1266 return -ENOMEM;
1267
Jason Wang429711a2018-08-06 11:17:47 +08001268 if (v2) {
1269 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1270 msg = &node->msg_v2.iotlb;
1271 } else {
1272 msg = &node->msg.iotlb;
1273 }
1274
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001275 msg->type = VHOST_IOTLB_MISS;
1276 msg->iova = iova;
1277 msg->perm = access;
1278
1279 vhost_enqueue_msg(dev, &dev->read_list, node);
1280
1281 return 0;
Jason Wangbfe2bc52016-06-23 02:04:30 -04001282}
1283
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001284static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
Michael S. Tsirkina865e422020-04-06 08:42:55 -04001285 vring_desc_t __user *desc,
1286 vring_avail_t __user *avail,
1287 vring_used_t __user *used)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001288
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001289{
Jason Wang4942e822019-05-24 04:12:16 -04001290 return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1291 access_ok(avail, vhost_get_avail_size(vq, num)) &&
1292 access_ok(used, vhost_get_used_size(vq, num));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001293}
1294
Jason Wangf8894912017-02-28 17:56:02 +08001295static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
Jason Wang0bbe3062020-03-26 22:01:19 +08001296 const struct vhost_iotlb_map *map,
Jason Wangf8894912017-02-28 17:56:02 +08001297 int type)
1298{
1299 int access = (type == VHOST_ADDR_USED) ?
1300 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1301
Jason Wang0bbe3062020-03-26 22:01:19 +08001302 if (likely(map->perm & access))
1303 vq->meta_iotlb[type] = map;
Jason Wangf8894912017-02-28 17:56:02 +08001304}
1305
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001306static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1307 int access, u64 addr, u64 len, int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001308{
Jason Wang0bbe3062020-03-26 22:01:19 +08001309 const struct vhost_iotlb_map *map;
1310 struct vhost_iotlb *umem = vq->iotlb;
Michael S. Tsirkinca2c5b32017-08-21 22:33:33 +03001311 u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
Jason Wangf8894912017-02-28 17:56:02 +08001312
1313 if (vhost_vq_meta_fetch(vq, addr, len, type))
1314 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001315
1316 while (len > s) {
Jason Wang0bbe3062020-03-26 22:01:19 +08001317 map = vhost_iotlb_itree_first(umem, addr, last);
1318 if (map == NULL || map->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001319 vhost_iotlb_miss(vq, addr, access);
1320 return false;
Jason Wang0bbe3062020-03-26 22:01:19 +08001321 } else if (!(map->perm & access)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001322 /* Report the possible access violation by
1323 * request another translation from userspace.
1324 */
1325 return false;
1326 }
1327
Jason Wang0bbe3062020-03-26 22:01:19 +08001328 size = map->size - addr + map->start;
Jason Wangf8894912017-02-28 17:56:02 +08001329
1330 if (orig_addr == addr && size >= len)
Jason Wang0bbe3062020-03-26 22:01:19 +08001331 vhost_vq_meta_update(vq, map, type);
Jason Wangf8894912017-02-28 17:56:02 +08001332
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001333 s += size;
1334 addr += size;
1335 }
1336
1337 return true;
1338}
1339
Jason Wang9b5e8302019-05-24 04:12:15 -04001340int vq_meta_prefetch(struct vhost_virtqueue *vq)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001341{
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001342 unsigned int num = vq->num;
1343
Michael S. Tsirkin3d2c7d32019-08-10 13:53:21 -04001344 if (!vq->iotlb)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001345 return 1;
1346
Jason Wang0bbe3062020-03-26 22:01:19 +08001347 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
Jason Wang4942e822019-05-24 04:12:16 -04001348 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
Jason Wang0bbe3062020-03-26 22:01:19 +08001349 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
Jason Wang4942e822019-05-24 04:12:16 -04001350 vhost_get_avail_size(vq, num),
Jason Wangf8894912017-02-28 17:56:02 +08001351 VHOST_ADDR_AVAIL) &&
Jason Wang0bbe3062020-03-26 22:01:19 +08001352 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
Jason Wang4942e822019-05-24 04:12:16 -04001353 vhost_get_used_size(vq, num), VHOST_ADDR_USED);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001354}
Jason Wang9b5e8302019-05-24 04:12:15 -04001355EXPORT_SYMBOL_GPL(vq_meta_prefetch);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001356
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001357/* Can we log writes? */
1358/* Caller should have device mutex but not vq mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001359bool vhost_log_access_ok(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001360{
Jason Wanga9709d62016-06-23 02:04:31 -04001361 return memory_access_ok(dev, dev->umem, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001362}
Asias He6ac1afb2013-05-06 16:38:21 +08001363EXPORT_SYMBOL_GPL(vhost_log_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001364
1365/* Verify access for write logging. */
1366/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001367static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1368 void __user *log_base)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001369{
Jason Wanga9709d62016-06-23 02:04:31 -04001370 return vq_memory_access_ok(log_base, vq->umem,
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001371 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001372 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
Jason Wang4942e822019-05-24 04:12:16 -04001373 vhost_get_used_size(vq, vq->num)));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001374}
1375
1376/* Can we start vq? */
1377/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001378bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001379{
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001380 if (!vq_log_access_ok(vq, vq->log_base))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001381 return false;
Jason Wangd65026c2018-03-29 16:00:04 +08001382
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001383 /* Access validation occurs at prefetch time with IOTLB */
1384 if (vq->iotlb)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001385 return true;
Jason Wangd65026c2018-03-29 16:00:04 +08001386
1387 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001388}
Asias He6ac1afb2013-05-06 16:38:21 +08001389EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001390
1391static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1392{
Jason Wanga9709d62016-06-23 02:04:31 -04001393 struct vhost_memory mem, *newmem;
1394 struct vhost_memory_region *region;
Jason Wang0bbe3062020-03-26 22:01:19 +08001395 struct vhost_iotlb *newumem, *oldumem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001396 unsigned long size = offsetof(struct vhost_memory, regions);
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001397 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +05301398
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001399 if (copy_from_user(&mem, m, size))
1400 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001401 if (mem.padding)
1402 return -EOPNOTSUPP;
Igor Mammedovc9ce42f2015-07-02 15:08:11 +02001403 if (mem.nregions > max_mem_regions)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001404 return -E2BIG;
Matthew Wilcoxb2303d72018-06-07 07:57:18 -07001405 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1406 GFP_KERNEL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001407 if (!newmem)
1408 return -ENOMEM;
1409
1410 memcpy(newmem, &mem, size);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001411 if (copy_from_user(newmem->regions, m->regions,
1412 mem.nregions * sizeof *m->regions)) {
Igor Mammedovbcfeaca2015-06-16 18:33:35 +02001413 kvfree(newmem);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001414 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001415 }
1416
Jason Wang0bbe3062020-03-26 22:01:19 +08001417 newumem = iotlb_alloc();
Jason Wanga9709d62016-06-23 02:04:31 -04001418 if (!newumem) {
Igor Mammedov4de72552015-07-01 11:07:09 +02001419 kvfree(newmem);
Jason Wanga9709d62016-06-23 02:04:31 -04001420 return -ENOMEM;
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +09001421 }
Jason Wanga9709d62016-06-23 02:04:31 -04001422
Jason Wanga9709d62016-06-23 02:04:31 -04001423 for (region = newmem->regions;
1424 region < newmem->regions + mem.nregions;
1425 region++) {
Jason Wang0bbe3062020-03-26 22:01:19 +08001426 if (vhost_iotlb_add_range(newumem,
1427 region->guest_phys_addr,
1428 region->guest_phys_addr +
1429 region->memory_size - 1,
1430 region->userspace_addr,
1431 VHOST_MAP_RW))
Jason Wanga9709d62016-06-23 02:04:31 -04001432 goto err;
Jason Wanga9709d62016-06-23 02:04:31 -04001433 }
1434
1435 if (!memory_access_ok(d, newumem, 0))
1436 goto err;
1437
1438 oldumem = d->umem;
1439 d->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001440
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001441 /* All memory accesses are done under some VQ mutex. */
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001442 for (i = 0; i < d->nvqs; ++i) {
1443 mutex_lock(&d->vqs[i]->mutex);
Jason Wanga9709d62016-06-23 02:04:31 -04001444 d->vqs[i]->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001445 mutex_unlock(&d->vqs[i]->mutex);
1446 }
Jason Wanga9709d62016-06-23 02:04:31 -04001447
1448 kvfree(newmem);
Jason Wang0bbe3062020-03-26 22:01:19 +08001449 vhost_iotlb_free(oldumem);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001450 return 0;
Jason Wanga9709d62016-06-23 02:04:31 -04001451
1452err:
Jason Wang0bbe3062020-03-26 22:01:19 +08001453 vhost_iotlb_free(newumem);
Jason Wanga9709d62016-06-23 02:04:31 -04001454 kvfree(newmem);
1455 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001456}
1457
Jason Wangfeebcae2019-05-24 04:12:17 -04001458static long vhost_vring_set_num(struct vhost_dev *d,
1459 struct vhost_virtqueue *vq,
1460 void __user *argp)
1461{
1462 struct vhost_vring_state s;
1463
1464 /* Resizing ring with an active backend?
1465 * You don't want to do that. */
1466 if (vq->private_data)
1467 return -EBUSY;
1468
1469 if (copy_from_user(&s, argp, sizeof s))
1470 return -EFAULT;
1471
1472 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1473 return -EINVAL;
1474 vq->num = s.num;
1475
1476 return 0;
1477}
1478
1479static long vhost_vring_set_addr(struct vhost_dev *d,
1480 struct vhost_virtqueue *vq,
1481 void __user *argp)
1482{
1483 struct vhost_vring_addr a;
1484
1485 if (copy_from_user(&a, argp, sizeof a))
1486 return -EFAULT;
1487 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1488 return -EOPNOTSUPP;
1489
1490 /* For 32bit, verify that the top 32bits of the user
1491 data are set to zero. */
1492 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1493 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1494 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1495 return -EFAULT;
1496
1497 /* Make sure it's safe to cast pointers to vring types. */
1498 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1499 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1500 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1501 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1502 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1503 return -EINVAL;
1504
1505 /* We only verify access here if backend is configured.
1506 * If it is not, we don't as size might not have been setup.
1507 * We will verify when backend is configured. */
1508 if (vq->private_data) {
1509 if (!vq_access_ok(vq, vq->num,
1510 (void __user *)(unsigned long)a.desc_user_addr,
1511 (void __user *)(unsigned long)a.avail_user_addr,
1512 (void __user *)(unsigned long)a.used_user_addr))
1513 return -EINVAL;
1514
1515 /* Also validate log access for used ring if enabled. */
1516 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1517 !log_access_ok(vq->log_base, a.log_guest_addr,
1518 sizeof *vq->used +
1519 vq->num * sizeof *vq->used->ring))
1520 return -EINVAL;
1521 }
1522
1523 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1524 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1525 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1526 vq->log_addr = a.log_guest_addr;
1527 vq->used = (void __user *)(unsigned long)a.used_user_addr;
1528
1529 return 0;
1530}
1531
1532static long vhost_vring_set_num_addr(struct vhost_dev *d,
1533 struct vhost_virtqueue *vq,
1534 unsigned int ioctl,
1535 void __user *argp)
1536{
1537 long r;
1538
1539 mutex_lock(&vq->mutex);
1540
1541 switch (ioctl) {
1542 case VHOST_SET_VRING_NUM:
1543 r = vhost_vring_set_num(d, vq, argp);
1544 break;
1545 case VHOST_SET_VRING_ADDR:
1546 r = vhost_vring_set_addr(d, vq, argp);
1547 break;
1548 default:
1549 BUG();
1550 }
1551
1552 mutex_unlock(&vq->mutex);
1553
1554 return r;
1555}
Sonny Rao26b36602018-03-14 10:05:06 -07001556long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001557{
Al Virocecb46f2012-08-27 14:21:39 -04001558 struct file *eventfp, *filep = NULL;
1559 bool pollstart = false, pollstop = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001560 struct eventfd_ctx *ctx = NULL;
1561 u32 __user *idxp = argp;
1562 struct vhost_virtqueue *vq;
1563 struct vhost_vring_state s;
1564 struct vhost_vring_file f;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001565 u32 idx;
1566 long r;
1567
1568 r = get_user(idx, idxp);
1569 if (r < 0)
1570 return r;
Krishna Kumar0f3d9a12010-05-25 11:10:36 +05301571 if (idx >= d->nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001572 return -ENOBUFS;
1573
Jason Wangff002262018-10-30 14:10:49 +08001574 idx = array_index_nospec(idx, d->nvqs);
Asias He3ab2e422013-04-27 11:16:48 +08001575 vq = d->vqs[idx];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001576
Jason Wangfeebcae2019-05-24 04:12:17 -04001577 if (ioctl == VHOST_SET_VRING_NUM ||
1578 ioctl == VHOST_SET_VRING_ADDR) {
1579 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1580 }
1581
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001582 mutex_lock(&vq->mutex);
1583
1584 switch (ioctl) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001585 case VHOST_SET_VRING_BASE:
1586 /* Moving base with an active backend?
1587 * You don't want to do that. */
1588 if (vq->private_data) {
1589 r = -EBUSY;
1590 break;
1591 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001592 if (copy_from_user(&s, argp, sizeof s)) {
1593 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001594 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001595 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001596 if (s.num > 0xffff) {
1597 r = -EINVAL;
1598 break;
1599 }
Jason Wang8d658432017-07-27 11:22:05 +08001600 vq->last_avail_idx = s.num;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001601 /* Forget the cached index value. */
1602 vq->avail_idx = vq->last_avail_idx;
1603 break;
1604 case VHOST_GET_VRING_BASE:
1605 s.index = idx;
1606 s.num = vq->last_avail_idx;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001607 if (copy_to_user(argp, &s, sizeof s))
1608 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001609 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001610 case VHOST_SET_VRING_KICK:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001611 if (copy_from_user(&f, argp, sizeof f)) {
1612 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001613 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001614 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001615 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001616 if (IS_ERR(eventfp)) {
1617 r = PTR_ERR(eventfp);
1618 break;
1619 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001620 if (eventfp != vq->kick) {
Al Virocecb46f2012-08-27 14:21:39 -04001621 pollstop = (filep = vq->kick) != NULL;
1622 pollstart = (vq->kick = eventfp) != NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001623 } else
1624 filep = eventfp;
1625 break;
1626 case VHOST_SET_VRING_CALL:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001627 if (copy_from_user(&f, argp, sizeof f)) {
1628 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001629 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001630 }
Eric Biggerse050c7d2018-01-06 14:52:19 -08001631 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1632 if (IS_ERR(ctx)) {
1633 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001634 break;
1635 }
Eric Biggerse050c7d2018-01-06 14:52:19 -08001636 swap(ctx, vq->call_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001637 break;
1638 case VHOST_SET_VRING_ERR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001639 if (copy_from_user(&f, argp, sizeof f)) {
1640 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001641 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001642 }
Eric Biggers09f332a2018-01-06 14:52:20 -08001643 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1644 if (IS_ERR(ctx)) {
1645 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001646 break;
1647 }
Eric Biggers09f332a2018-01-06 14:52:20 -08001648 swap(ctx, vq->error_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001649 break;
Greg Kurz2751c982015-04-24 14:27:24 +02001650 case VHOST_SET_VRING_ENDIAN:
1651 r = vhost_set_vring_endian(vq, argp);
1652 break;
1653 case VHOST_GET_VRING_ENDIAN:
1654 r = vhost_get_vring_endian(vq, idx, argp);
1655 break;
Jason Wang03088132016-03-04 06:24:53 -05001656 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1657 if (copy_from_user(&s, argp, sizeof(s))) {
1658 r = -EFAULT;
1659 break;
1660 }
1661 vq->busyloop_timeout = s.num;
1662 break;
1663 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1664 s.index = idx;
1665 s.num = vq->busyloop_timeout;
1666 if (copy_to_user(argp, &s, sizeof(s)))
1667 r = -EFAULT;
1668 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001669 default:
1670 r = -ENOIOCTLCMD;
1671 }
1672
1673 if (pollstop && vq->handle_kick)
1674 vhost_poll_stop(&vq->poll);
1675
Eric Biggerse050c7d2018-01-06 14:52:19 -08001676 if (!IS_ERR_OR_NULL(ctx))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001677 eventfd_ctx_put(ctx);
1678 if (filep)
1679 fput(filep);
1680
1681 if (pollstart && vq->handle_kick)
Jason Wang2b8b3282013-01-28 01:05:18 +00001682 r = vhost_poll_start(&vq->poll, vq->kick);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001683
1684 mutex_unlock(&vq->mutex);
1685
1686 if (pollstop && vq->handle_kick)
1687 vhost_poll_flush(&vq->poll);
1688 return r;
1689}
Asias He6ac1afb2013-05-06 16:38:21 +08001690EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001691
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001692int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1693{
Jason Wang0bbe3062020-03-26 22:01:19 +08001694 struct vhost_iotlb *niotlb, *oiotlb;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001695 int i;
1696
Jason Wang0bbe3062020-03-26 22:01:19 +08001697 niotlb = iotlb_alloc();
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001698 if (!niotlb)
1699 return -ENOMEM;
1700
1701 oiotlb = d->iotlb;
1702 d->iotlb = niotlb;
1703
1704 for (i = 0; i < d->nvqs; ++i) {
Jason Wangb13f9c62018-08-08 11:43:04 +08001705 struct vhost_virtqueue *vq = d->vqs[i];
1706
1707 mutex_lock(&vq->mutex);
1708 vq->iotlb = niotlb;
1709 __vhost_vq_meta_reset(vq);
1710 mutex_unlock(&vq->mutex);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001711 }
1712
Jason Wang0bbe3062020-03-26 22:01:19 +08001713 vhost_iotlb_free(oiotlb);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001714
1715 return 0;
1716}
1717EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1718
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001719/* Caller must have device mutex */
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001720long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001721{
Eric Biggersd25cc432018-01-06 14:52:21 -08001722 struct eventfd_ctx *ctx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001723 u64 p;
1724 long r;
1725 int i, fd;
1726
1727 /* If you are not the owner, you can become one */
1728 if (ioctl == VHOST_SET_OWNER) {
1729 r = vhost_dev_set_owner(d);
1730 goto done;
1731 }
1732
1733 /* You must be the owner to do anything else */
1734 r = vhost_dev_check_owner(d);
1735 if (r)
1736 goto done;
1737
1738 switch (ioctl) {
1739 case VHOST_SET_MEM_TABLE:
1740 r = vhost_set_memory(d, argp);
1741 break;
1742 case VHOST_SET_LOG_BASE:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001743 if (copy_from_user(&p, argp, sizeof p)) {
1744 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001745 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001746 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001747 if ((u64)(unsigned long)p != p) {
1748 r = -EFAULT;
1749 break;
1750 }
1751 for (i = 0; i < d->nvqs; ++i) {
1752 struct vhost_virtqueue *vq;
1753 void __user *base = (void __user *)(unsigned long)p;
Asias He3ab2e422013-04-27 11:16:48 +08001754 vq = d->vqs[i];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001755 mutex_lock(&vq->mutex);
1756 /* If ring is inactive, will check when it's enabled. */
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001757 if (vq->private_data && !vq_log_access_ok(vq, base))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001758 r = -EFAULT;
1759 else
1760 vq->log_base = base;
1761 mutex_unlock(&vq->mutex);
1762 }
1763 break;
1764 case VHOST_SET_LOG_FD:
1765 r = get_user(fd, (int __user *)argp);
1766 if (r < 0)
1767 break;
Eric Biggersd25cc432018-01-06 14:52:21 -08001768 ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
1769 if (IS_ERR(ctx)) {
1770 r = PTR_ERR(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001771 break;
1772 }
Eric Biggersd25cc432018-01-06 14:52:21 -08001773 swap(ctx, d->log_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001774 for (i = 0; i < d->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +08001775 mutex_lock(&d->vqs[i]->mutex);
1776 d->vqs[i]->log_ctx = d->log_ctx;
1777 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001778 }
1779 if (ctx)
1780 eventfd_ctx_put(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001781 break;
1782 default:
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001783 r = -ENOIOCTLCMD;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001784 break;
1785 }
1786done:
1787 return r;
1788}
Asias He6ac1afb2013-05-06 16:38:21 +08001789EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001790
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001791/* TODO: This is really inefficient. We need something like get_user()
1792 * (instruction directly accesses the data, with an exception table entry
Mauro Carvalho Chehabcb1aaeb2019-06-07 15:54:32 -03001793 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001794 */
1795static int set_bit_to_user(int nr, void __user *addr)
1796{
1797 unsigned long log = (unsigned long)addr;
1798 struct page *page;
1799 void *base;
1800 int bit = nr + (log % PAGE_SIZE) * 8;
1801 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301802
Ira Weiny73b01402019-05-13 17:17:11 -07001803 r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001804 if (r < 0)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001805 return r;
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001806 BUG_ON(r != 1);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001807 base = kmap_atomic(page);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001808 set_bit(bit, base);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001809 kunmap_atomic(base);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001810 set_page_dirty_lock(page);
1811 put_page(page);
1812 return 0;
1813}
1814
1815static int log_write(void __user *log_base,
1816 u64 write_address, u64 write_length)
1817{
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001818 u64 write_page = write_address / VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001819 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301820
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001821 if (!write_length)
1822 return 0;
Michael S. Tsirkin3bf9be42010-11-29 10:19:07 +02001823 write_length += write_address % VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001824 for (;;) {
1825 u64 base = (u64)(unsigned long)log_base;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001826 u64 log = base + write_page / 8;
1827 int bit = write_page % 8;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001828 if ((u64)(unsigned long)log != log)
1829 return -EFAULT;
1830 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1831 if (r < 0)
1832 return r;
1833 if (write_length <= VHOST_PAGE_SIZE)
1834 break;
1835 write_length -= VHOST_PAGE_SIZE;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001836 write_page += 1;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001837 }
1838 return r;
1839}
1840
Jason Wangcc5e7102019-01-16 16:54:42 +08001841static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1842{
Jason Wang0bbe3062020-03-26 22:01:19 +08001843 struct vhost_iotlb *umem = vq->umem;
1844 struct vhost_iotlb_map *u;
Jason Wangcc5e7102019-01-16 16:54:42 +08001845 u64 start, end, l, min;
1846 int r;
1847 bool hit = false;
1848
1849 while (len) {
1850 min = len;
1851 /* More than one GPAs can be mapped into a single HVA. So
1852 * iterate all possible umems here to be safe.
1853 */
Jason Wang0bbe3062020-03-26 22:01:19 +08001854 list_for_each_entry(u, &umem->list, link) {
1855 if (u->addr > hva - 1 + len ||
1856 u->addr - 1 + u->size < hva)
Jason Wangcc5e7102019-01-16 16:54:42 +08001857 continue;
Jason Wang0bbe3062020-03-26 22:01:19 +08001858 start = max(u->addr, hva);
1859 end = min(u->addr - 1 + u->size, hva - 1 + len);
Jason Wangcc5e7102019-01-16 16:54:42 +08001860 l = end - start + 1;
1861 r = log_write(vq->log_base,
Jason Wang0bbe3062020-03-26 22:01:19 +08001862 u->start + start - u->addr,
Jason Wangcc5e7102019-01-16 16:54:42 +08001863 l);
1864 if (r < 0)
1865 return r;
1866 hit = true;
1867 min = min(l, min);
1868 }
1869
1870 if (!hit)
1871 return -EFAULT;
1872
1873 len -= min;
1874 hva += min;
1875 }
1876
1877 return 0;
1878}
1879
1880static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1881{
1882 struct iovec iov[64];
1883 int i, ret;
1884
1885 if (!vq->iotlb)
1886 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1887
1888 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1889 len, iov, 64, VHOST_ACCESS_WO);
Jason Wang816db762019-02-19 14:53:44 +08001890 if (ret < 0)
Jason Wangcc5e7102019-01-16 16:54:42 +08001891 return ret;
1892
1893 for (i = 0; i < ret; i++) {
1894 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1895 iov[i].iov_len);
1896 if (ret)
1897 return ret;
1898 }
1899
1900 return 0;
1901}
1902
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001903int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
Jason Wangcc5e7102019-01-16 16:54:42 +08001904 unsigned int log_num, u64 len, struct iovec *iov, int count)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001905{
1906 int i, r;
1907
1908 /* Make sure data written is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001909 smp_wmb();
Jason Wangcc5e7102019-01-16 16:54:42 +08001910
1911 if (vq->iotlb) {
1912 for (i = 0; i < count; i++) {
1913 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1914 iov[i].iov_len);
1915 if (r < 0)
1916 return r;
1917 }
1918 return 0;
1919 }
1920
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001921 for (i = 0; i < log_num; ++i) {
1922 u64 l = min(log[i].len, len);
1923 r = log_write(vq->log_base, log[i].addr, l);
1924 if (r < 0)
1925 return r;
1926 len -= l;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001927 if (!len) {
1928 if (vq->log_ctx)
1929 eventfd_signal(vq->log_ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001930 return 0;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001931 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001932 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001933 /* Length written exceeds what we have stored. This is a bug. */
1934 BUG();
1935 return 0;
1936}
Asias He6ac1afb2013-05-06 16:38:21 +08001937EXPORT_SYMBOL_GPL(vhost_log_write);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001938
Jason Wang2723fea2011-06-21 18:04:38 +08001939static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1940{
1941 void __user *used;
Jason Wang7b5d7532019-05-24 04:12:14 -04001942 if (vhost_put_used_flags(vq))
Jason Wang2723fea2011-06-21 18:04:38 +08001943 return -EFAULT;
1944 if (unlikely(vq->log_used)) {
1945 /* Make sure the flag is seen before log. */
1946 smp_wmb();
1947 /* Log used flag write. */
1948 used = &vq->used->flags;
Jason Wangcc5e7102019-01-16 16:54:42 +08001949 log_used(vq, (used - (void __user *)vq->used),
1950 sizeof vq->used->flags);
Jason Wang2723fea2011-06-21 18:04:38 +08001951 if (vq->log_ctx)
1952 eventfd_signal(vq->log_ctx, 1);
1953 }
1954 return 0;
1955}
1956
1957static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1958{
Jason Wang7b5d7532019-05-24 04:12:14 -04001959 if (vhost_put_avail_event(vq))
Jason Wang2723fea2011-06-21 18:04:38 +08001960 return -EFAULT;
1961 if (unlikely(vq->log_used)) {
1962 void __user *used;
1963 /* Make sure the event is seen before log. */
1964 smp_wmb();
1965 /* Log avail event write */
1966 used = vhost_avail_event(vq);
Jason Wangcc5e7102019-01-16 16:54:42 +08001967 log_used(vq, (used - (void __user *)vq->used),
1968 sizeof *vhost_avail_event(vq));
Jason Wang2723fea2011-06-21 18:04:38 +08001969 if (vq->log_ctx)
1970 eventfd_signal(vq->log_ctx, 1);
1971 }
1972 return 0;
1973}
1974
Greg Kurz80f7d032016-02-16 15:59:44 +01001975int vhost_vq_init_access(struct vhost_virtqueue *vq)
Jason Wang2723fea2011-06-21 18:04:38 +08001976{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001977 __virtio16 last_used_idx;
Jason Wang2723fea2011-06-21 18:04:38 +08001978 int r;
Greg Kurze1f33be2016-02-16 15:54:28 +01001979 bool is_le = vq->is_le;
1980
Halil Pasiccda8bba2017-01-30 11:09:36 +01001981 if (!vq->private_data)
Jason Wang2723fea2011-06-21 18:04:38 +08001982 return 0;
Greg Kurz2751c982015-04-24 14:27:24 +02001983
1984 vhost_init_is_le(vq);
Jason Wang2723fea2011-06-21 18:04:38 +08001985
1986 r = vhost_update_used_flags(vq);
1987 if (r)
Greg Kurze1f33be2016-02-16 15:54:28 +01001988 goto err;
Jason Wang2723fea2011-06-21 18:04:38 +08001989 vq->signalled_used_valid = false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001990 if (!vq->iotlb &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001991 !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
Greg Kurze1f33be2016-02-16 15:54:28 +01001992 r = -EFAULT;
1993 goto err;
1994 }
Jason Wang7b5d7532019-05-24 04:12:14 -04001995 r = vhost_get_used_idx(vq, &last_used_idx);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001996 if (r) {
1997 vq_err(vq, "Can't access used idx at %p\n",
1998 &vq->used->idx);
Greg Kurze1f33be2016-02-16 15:54:28 +01001999 goto err;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002000 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002001 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
Michael S. Tsirkin64f7f052014-12-01 17:39:39 +02002002 return 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002003
Greg Kurze1f33be2016-02-16 15:54:28 +01002004err:
2005 vq->is_le = is_le;
2006 return r;
Jason Wang2723fea2011-06-21 18:04:38 +08002007}
Greg Kurz80f7d032016-02-16 15:59:44 +01002008EXPORT_SYMBOL_GPL(vhost_vq_init_access);
Jason Wang2723fea2011-06-21 18:04:38 +08002009
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002010static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002011 struct iovec iov[], int iov_size, int access)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002012{
Jason Wang0bbe3062020-03-26 22:01:19 +08002013 const struct vhost_iotlb_map *map;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002014 struct vhost_dev *dev = vq->dev;
Jason Wang0bbe3062020-03-26 22:01:19 +08002015 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002016 struct iovec *_iov;
2017 u64 s = 0;
2018 int ret = 0;
2019
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002020 while ((u64)len > s) {
2021 u64 size;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002022 if (unlikely(ret >= iov_size)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002023 ret = -ENOBUFS;
2024 break;
2025 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002026
Jason Wang0bbe3062020-03-26 22:01:19 +08002027 map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2028 if (map == NULL || map->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002029 if (umem != dev->iotlb) {
2030 ret = -EFAULT;
2031 break;
2032 }
2033 ret = -EAGAIN;
2034 break;
Jason Wang0bbe3062020-03-26 22:01:19 +08002035 } else if (!(map->perm & access)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002036 ret = -EPERM;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002037 break;
2038 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002039
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002040 _iov = iov + ret;
Jason Wang0bbe3062020-03-26 22:01:19 +08002041 size = map->size - addr + map->start;
Michael S. Tsirkinbd971202012-11-26 05:57:27 +00002042 _iov->iov_len = min((u64)len - s, size);
Michael S. Tsirkin0d4a3f22019-09-14 15:21:51 -04002043 _iov->iov_base = (void __user *)(unsigned long)
Jason Wang0bbe3062020-03-26 22:01:19 +08002044 (map->addr + addr - map->start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002045 s += size;
2046 addr += size;
2047 ++ret;
2048 }
2049
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002050 if (ret == -EAGAIN)
2051 vhost_iotlb_miss(vq, addr, access);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002052 return ret;
2053}
2054
2055/* Each buffer in the virtqueues is actually a chain of descriptors. This
2056 * function returns the next descriptor in the chain,
2057 * or -1U if we're at the end. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002058static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002059{
2060 unsigned int next;
2061
2062 /* If this descriptor says it doesn't chain, we're done. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002063 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002064 return -1U;
2065
2066 /* Check they're not leading us off end of descriptors. */
Paul E. McKenney3a5db0b2017-11-27 09:45:10 -08002067 next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002068 return next;
2069}
2070
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002071static int get_indirect(struct vhost_virtqueue *vq,
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002072 struct iovec iov[], unsigned int iov_size,
2073 unsigned int *out_num, unsigned int *in_num,
2074 struct vhost_log *log, unsigned int *log_num,
2075 struct vring_desc *indirect)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002076{
2077 struct vring_desc desc;
2078 unsigned int i = 0, count, found = 0;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002079 u32 len = vhost32_to_cpu(vq, indirect->len);
Al Viroaad9a1c2014-12-10 14:49:01 -05002080 struct iov_iter from;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002081 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002082
2083 /* Sanity check */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002084 if (unlikely(len % sizeof desc)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002085 vq_err(vq, "Invalid length in indirect descriptor: "
2086 "len 0x%llx not multiple of 0x%zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002087 (unsigned long long)len,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002088 sizeof desc);
2089 return -EINVAL;
2090 }
2091
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002092 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002093 UIO_MAXIOV, VHOST_ACCESS_RO);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002094 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002095 if (ret != -EAGAIN)
2096 vq_err(vq, "Translation failure %d in indirect.\n", ret);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002097 return ret;
2098 }
Al Viroaad9a1c2014-12-10 14:49:01 -05002099 iov_iter_init(&from, READ, vq->indirect, ret, len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002100
2101 /* We will use the result as an address to read from, so most
2102 * architectures only need a compiler barrier here. */
2103 read_barrier_depends();
2104
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002105 count = len / sizeof desc;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002106 /* Buffers are chained via a 16 bit next field, so
2107 * we can have at most 2^16 of these. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002108 if (unlikely(count > USHRT_MAX + 1)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002109 vq_err(vq, "Indirect buffer length too big: %d\n",
2110 indirect->len);
2111 return -E2BIG;
2112 }
2113
2114 do {
2115 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002116 if (unlikely(++found > count)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002117 vq_err(vq, "Loop detected: last one at %u "
2118 "indirect size %u\n",
2119 i, count);
2120 return -EINVAL;
2121 }
Al Virocbbd26b2016-11-01 22:09:04 -04002122 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002123 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002124 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002125 return -EINVAL;
2126 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002127 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002128 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002129 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002130 return -EINVAL;
2131 }
2132
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002133 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2134 access = VHOST_ACCESS_WO;
2135 else
2136 access = VHOST_ACCESS_RO;
2137
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002138 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2139 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002140 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002141 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002142 if (ret != -EAGAIN)
2143 vq_err(vq, "Translation failure %d indirect idx %d\n",
2144 ret, i);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002145 return ret;
2146 }
2147 /* If this is an input descriptor, increment that count. */
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002148 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002149 *in_num += ret;
yongduan060423b2019-09-11 17:44:24 +08002150 if (unlikely(log && ret)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002151 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2152 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002153 ++*log_num;
2154 }
2155 } else {
2156 /* If it's an output descriptor, they're all supposed
2157 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002158 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002159 vq_err(vq, "Indirect descriptor "
2160 "has out after in: idx %d\n", i);
2161 return -EINVAL;
2162 }
2163 *out_num += ret;
2164 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002165 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002166 return 0;
2167}
2168
2169/* This looks in the virtqueue and for the first available buffer, and converts
2170 * it to an iovec for convenient access. Since descriptors consist of some
2171 * number of output then some number of input descriptors, it's actually two
2172 * iovecs, but we pack them into one and note how many of each there were.
2173 *
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002174 * This function returns the descriptor number found, or vq->num (which is
2175 * never a valid descriptor number) if none was found. A negative code is
2176 * returned on error. */
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002177int vhost_get_vq_desc(struct vhost_virtqueue *vq,
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002178 struct iovec iov[], unsigned int iov_size,
2179 unsigned int *out_num, unsigned int *in_num,
2180 struct vhost_log *log, unsigned int *log_num)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002181{
2182 struct vring_desc desc;
2183 unsigned int i, head, found = 0;
2184 u16 last_avail_idx;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002185 __virtio16 avail_idx;
2186 __virtio16 ring_head;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002187 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002188
2189 /* Check it isn't doing very strange things with descriptor numbers. */
2190 last_avail_idx = vq->last_avail_idx;
Jason Wange3b56cd2017-02-07 15:49:50 +08002191
2192 if (vq->avail_idx == vq->last_avail_idx) {
Jason Wang7b5d7532019-05-24 04:12:14 -04002193 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
Jason Wange3b56cd2017-02-07 15:49:50 +08002194 vq_err(vq, "Failed to access avail idx at %p\n",
2195 &vq->avail->idx);
2196 return -EFAULT;
2197 }
2198 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2199
2200 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2201 vq_err(vq, "Guest moved used index from %u to %u",
2202 last_avail_idx, vq->avail_idx);
2203 return -EFAULT;
2204 }
2205
2206 /* If there's nothing new since last we looked, return
2207 * invalid.
2208 */
2209 if (vq->avail_idx == last_avail_idx)
2210 return vq->num;
2211
2212 /* Only get avail ring entries after they have been
2213 * exposed by guest.
2214 */
2215 smp_rmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002216 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002217
2218 /* Grab the next descriptor number they're advertising, and increment
2219 * the index we've seen. */
Jason Wang7b5d7532019-05-24 04:12:14 -04002220 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002221 vq_err(vq, "Failed to read head: idx %d address %p\n",
2222 last_avail_idx,
2223 &vq->avail->ring[last_avail_idx % vq->num]);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002224 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002225 }
2226
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002227 head = vhost16_to_cpu(vq, ring_head);
2228
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002229 /* If their number is silly, that's an error. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002230 if (unlikely(head >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002231 vq_err(vq, "Guest says index %u > %u is available",
2232 head, vq->num);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002233 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002234 }
2235
2236 /* When we start there are none of either input nor output. */
2237 *out_num = *in_num = 0;
2238 if (unlikely(log))
2239 *log_num = 0;
2240
2241 i = head;
2242 do {
2243 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002244 if (unlikely(i >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002245 vq_err(vq, "Desc index is %u > %u, head = %u",
2246 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002247 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002248 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002249 if (unlikely(++found > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002250 vq_err(vq, "Loop detected: last one at %u "
2251 "vq size %u head %u\n",
2252 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002253 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002254 }
Jason Wang7b5d7532019-05-24 04:12:14 -04002255 ret = vhost_get_desc(vq, &desc, i);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002256 if (unlikely(ret)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002257 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2258 i, vq->desc + i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002259 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002260 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002261 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002262 ret = get_indirect(vq, iov, iov_size,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002263 out_num, in_num,
2264 log, log_num, &desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002265 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002266 if (ret != -EAGAIN)
2267 vq_err(vq, "Failure detected "
2268 "in indirect descriptor at idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002269 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002270 }
2271 continue;
2272 }
2273
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002274 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2275 access = VHOST_ACCESS_WO;
2276 else
2277 access = VHOST_ACCESS_RO;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002278 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2279 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002280 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002281 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002282 if (ret != -EAGAIN)
2283 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2284 ret, i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002285 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002286 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002287 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002288 /* If this is an input descriptor,
2289 * increment that count. */
2290 *in_num += ret;
yongduan060423b2019-09-11 17:44:24 +08002291 if (unlikely(log && ret)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002292 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2293 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002294 ++*log_num;
2295 }
2296 } else {
2297 /* If it's an output descriptor, they're all supposed
2298 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002299 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002300 vq_err(vq, "Descriptor has out after in: "
2301 "idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002302 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002303 }
2304 *out_num += ret;
2305 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002306 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002307
2308 /* On success, increment avail index. */
2309 vq->last_avail_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002310
2311 /* Assume notifications from guest are disabled at this point,
2312 * if they aren't we would need to update avail_event index. */
2313 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002314 return head;
2315}
Asias He6ac1afb2013-05-06 16:38:21 +08002316EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002317
2318/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
David Stevens8dd014a2010-07-27 18:52:21 +03002319void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002320{
David Stevens8dd014a2010-07-27 18:52:21 +03002321 vq->last_avail_idx -= n;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002322}
Asias He6ac1afb2013-05-06 16:38:21 +08002323EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002324
2325/* After we've used one of their buffers, we tell them about it. We'll then
2326 * want to notify the guest, using eventfd. */
2327int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2328{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002329 struct vring_used_elem heads = {
2330 cpu_to_vhost32(vq, head),
2331 cpu_to_vhost32(vq, len)
2332 };
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002333
Jason Wangc49e4e52013-09-02 16:40:58 +08002334 return vhost_add_used_n(vq, &heads, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002335}
Asias He6ac1afb2013-05-06 16:38:21 +08002336EXPORT_SYMBOL_GPL(vhost_add_used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002337
David Stevens8dd014a2010-07-27 18:52:21 +03002338static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2339 struct vring_used_elem *heads,
2340 unsigned count)
2341{
Michael S. Tsirkina865e422020-04-06 08:42:55 -04002342 vring_used_elem_t __user *used;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002343 u16 old, new;
David Stevens8dd014a2010-07-27 18:52:21 +03002344 int start;
2345
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002346 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002347 used = vq->used->ring + start;
Jason Wang7b5d7532019-05-24 04:12:14 -04002348 if (vhost_put_used(vq, heads, start, count)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002349 vq_err(vq, "Failed to write used");
2350 return -EFAULT;
2351 }
2352 if (unlikely(vq->log_used)) {
2353 /* Make sure data is seen before log. */
2354 smp_wmb();
2355 /* Log used ring entry write. */
Jason Wangcc5e7102019-01-16 16:54:42 +08002356 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2357 count * sizeof *used);
David Stevens8dd014a2010-07-27 18:52:21 +03002358 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002359 old = vq->last_used_idx;
2360 new = (vq->last_used_idx += count);
2361 /* If the driver never bothers to signal in a very long while,
2362 * used index might wrap around. If that happens, invalidate
2363 * signalled_used index we stored. TODO: make sure driver
2364 * signals at least once in 2^16 and remove this. */
2365 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2366 vq->signalled_used_valid = false;
David Stevens8dd014a2010-07-27 18:52:21 +03002367 return 0;
2368}
2369
2370/* After we've used one of their buffers, we tell them about it. We'll then
2371 * want to notify the guest, using eventfd. */
2372int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2373 unsigned count)
2374{
2375 int start, n, r;
2376
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002377 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002378 n = vq->num - start;
2379 if (n < count) {
2380 r = __vhost_add_used_n(vq, heads, n);
2381 if (r < 0)
2382 return r;
2383 heads += n;
2384 count -= n;
2385 }
2386 r = __vhost_add_used_n(vq, heads, count);
2387
2388 /* Make sure buffer is written before we update index. */
2389 smp_wmb();
Jason Wang7b5d7532019-05-24 04:12:14 -04002390 if (vhost_put_used_idx(vq)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002391 vq_err(vq, "Failed to increment used idx");
2392 return -EFAULT;
2393 }
2394 if (unlikely(vq->log_used)) {
Jason Wang841df922018-12-13 10:53:37 +08002395 /* Make sure used idx is seen before log. */
2396 smp_wmb();
David Stevens8dd014a2010-07-27 18:52:21 +03002397 /* Log used index update. */
Jason Wangcc5e7102019-01-16 16:54:42 +08002398 log_used(vq, offsetof(struct vring_used, idx),
2399 sizeof vq->used->idx);
David Stevens8dd014a2010-07-27 18:52:21 +03002400 if (vq->log_ctx)
2401 eventfd_signal(vq->log_ctx, 1);
2402 }
2403 return r;
2404}
Asias He6ac1afb2013-05-06 16:38:21 +08002405EXPORT_SYMBOL_GPL(vhost_add_used_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002406
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002407static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002408{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002409 __u16 old, new;
2410 __virtio16 event;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002411 bool v;
Jason Wang8d658432017-07-27 11:22:05 +08002412 /* Flush out used index updates. This is paired
2413 * with the barrier that the Guest executes when enabling
2414 * interrupts. */
2415 smp_mb();
Michael S. Tsirkin0d499352010-05-11 19:44:17 +03002416
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002417 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002418 unlikely(vq->avail_idx == vq->last_avail_idx))
2419 return true;
2420
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002421 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002422 __virtio16 flags;
Jason Wang7b5d7532019-05-24 04:12:14 -04002423 if (vhost_get_avail_flags(vq, &flags)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002424 vq_err(vq, "Failed to get flags");
2425 return true;
2426 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002427 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002428 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002429 old = vq->signalled_used;
2430 v = vq->signalled_used_valid;
2431 new = vq->signalled_used = vq->last_used_idx;
2432 vq->signalled_used_valid = true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002433
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002434 if (unlikely(!v))
2435 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002436
Jason Wang7b5d7532019-05-24 04:12:14 -04002437 if (vhost_get_used_event(vq, &event)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002438 vq_err(vq, "Failed to get used event idx");
2439 return true;
2440 }
Jason Wang8d658432017-07-27 11:22:05 +08002441 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002442}
2443
2444/* This actually signals the guest, using eventfd. */
2445void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2446{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002447 /* Signal the Guest tell them we used something up. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002448 if (vq->call_ctx && vhost_notify(dev, vq))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002449 eventfd_signal(vq->call_ctx, 1);
2450}
Asias He6ac1afb2013-05-06 16:38:21 +08002451EXPORT_SYMBOL_GPL(vhost_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002452
2453/* And here's the combo meal deal. Supersize me! */
2454void vhost_add_used_and_signal(struct vhost_dev *dev,
2455 struct vhost_virtqueue *vq,
2456 unsigned int head, int len)
2457{
2458 vhost_add_used(vq, head, len);
2459 vhost_signal(dev, vq);
2460}
Asias He6ac1afb2013-05-06 16:38:21 +08002461EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002462
David Stevens8dd014a2010-07-27 18:52:21 +03002463/* multi-buffer version of vhost_add_used_and_signal */
2464void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2465 struct vhost_virtqueue *vq,
2466 struct vring_used_elem *heads, unsigned count)
2467{
2468 vhost_add_used_n(vq, heads, count);
2469 vhost_signal(dev, vq);
2470}
Asias He6ac1afb2013-05-06 16:38:21 +08002471EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002472
Jason Wangd4a60602016-03-04 06:24:52 -05002473/* return true if we're sure that avaiable ring is empty */
2474bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2475{
2476 __virtio16 avail_idx;
2477 int r;
2478
Jason Wang275bf962017-01-18 15:02:01 +08002479 if (vq->avail_idx != vq->last_avail_idx)
Jason Wangd4a60602016-03-04 06:24:52 -05002480 return false;
2481
Jason Wang7b5d7532019-05-24 04:12:14 -04002482 r = vhost_get_avail_idx(vq, &avail_idx);
Jason Wang275bf962017-01-18 15:02:01 +08002483 if (unlikely(r))
2484 return false;
2485 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2486
2487 return vq->avail_idx == vq->last_avail_idx;
Jason Wangd4a60602016-03-04 06:24:52 -05002488}
2489EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2490
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002491/* OK, now we need to know about added descriptors. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002492bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002493{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002494 __virtio16 avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002495 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302496
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002497 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2498 return false;
2499 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002500 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002501 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002502 if (r) {
2503 vq_err(vq, "Failed to enable notification at %p: %d\n",
2504 &vq->used->flags, r);
2505 return false;
2506 }
2507 } else {
Jason Wang2723fea2011-06-21 18:04:38 +08002508 r = vhost_update_avail_event(vq, vq->avail_idx);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002509 if (r) {
2510 vq_err(vq, "Failed to update avail event index at %p: %d\n",
2511 vhost_avail_event(vq), r);
2512 return false;
2513 }
2514 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002515 /* They could have slipped one in as we were doing that: make
2516 * sure it's written, then check again. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00002517 smp_mb();
Jason Wang7b5d7532019-05-24 04:12:14 -04002518 r = vhost_get_avail_idx(vq, &avail_idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002519 if (r) {
2520 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2521 &vq->avail->idx, r);
2522 return false;
2523 }
2524
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002525 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002526}
Asias He6ac1afb2013-05-06 16:38:21 +08002527EXPORT_SYMBOL_GPL(vhost_enable_notify);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002528
2529/* We don't need to be notified again. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002530void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002531{
2532 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302533
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002534 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2535 return;
2536 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002537 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002538 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002539 if (r)
2540 vq_err(vq, "Failed to enable notification at %p: %d\n",
2541 &vq->used->flags, r);
2542 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002543}
Asias He6ac1afb2013-05-06 16:38:21 +08002544EXPORT_SYMBOL_GPL(vhost_disable_notify);
2545
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002546/* Create a new message. */
2547struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2548{
2549 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2550 if (!node)
2551 return NULL;
Michael S. Tsirkin670ae9c2018-05-12 00:33:10 +03002552
2553 /* Make sure all padding within the structure is initialized. */
2554 memset(&node->msg, 0, sizeof node->msg);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002555 node->vq = vq;
2556 node->msg.type = type;
2557 return node;
2558}
2559EXPORT_SYMBOL_GPL(vhost_new_msg);
2560
2561void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2562 struct vhost_msg_node *node)
2563{
2564 spin_lock(&dev->iotlb_lock);
2565 list_add_tail(&node->node, head);
2566 spin_unlock(&dev->iotlb_lock);
2567
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002568 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002569}
2570EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2571
2572struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2573 struct list_head *head)
2574{
2575 struct vhost_msg_node *node = NULL;
2576
2577 spin_lock(&dev->iotlb_lock);
2578 if (!list_empty(head)) {
2579 node = list_first_entry(head, struct vhost_msg_node,
2580 node);
2581 list_del(&node->node);
2582 }
2583 spin_unlock(&dev->iotlb_lock);
2584
2585 return node;
2586}
2587EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2588
2589
Asias He6ac1afb2013-05-06 16:38:21 +08002590static int __init vhost_init(void)
2591{
2592 return 0;
2593}
2594
2595static void __exit vhost_exit(void)
2596{
2597}
2598
2599module_init(vhost_init);
2600module_exit(vhost_exit);
2601
2602MODULE_VERSION("0.0.1");
2603MODULE_LICENSE("GPL v2");
2604MODULE_AUTHOR("Michael S. Tsirkin");
2605MODULE_DESCRIPTION("Host kernel accelerator for virtio");