blob: 5857d4eec9d73f69c5a934e9cff635123773acfd [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002/* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 *
5 * Author: Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Inspiration, some code, and most witty comments come from
Rob Landley61516582011-05-06 09:27:36 -07008 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00009 *
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000010 * Generic code for virtio server in host kernel.
11 */
12
13#include <linux/eventfd.h>
14#include <linux/vhost.h>
Asias He35596b22013-08-19 09:23:19 +080015#include <linux/uio.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000016#include <linux/mm.h>
17#include <linux/miscdevice.h>
18#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000019#include <linux/poll.h>
20#include <linux/file.h>
21#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Igor Mammedov4de72552015-07-01 11:07:09 +020023#include <linux/vmalloc.h>
Tejun Heoc23f34452010-06-02 20:40:00 +020024#include <linux/kthread.h>
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +030025#include <linux/cgroup.h>
Asias He6ac1afb2013-05-06 16:38:21 +080026#include <linux/module.h>
Igor Mammedovbcfeaca2015-06-16 18:33:35 +020027#include <linux/sort.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010028#include <linux/sched/mm.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Jason Wanga9709d62016-06-23 02:04:31 -040030#include <linux/interval_tree_generic.h>
Jason Wangff002262018-10-30 14:10:49 +080031#include <linux/nospec.h>
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -080032#include <linux/kcov.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000033
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000034#include "vhost.h"
35
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020036static ushort max_mem_regions = 64;
37module_param(max_mem_regions, ushort, 0444);
38MODULE_PARM_DESC(max_mem_regions,
39 "Maximum number of memory regions in memory map. (default: 64)");
Jason Wang6b1e6cc2016-06-23 02:04:32 -040040static int max_iotlb_entries = 2048;
41module_param(max_iotlb_entries, int, 0444);
42MODULE_PARM_DESC(max_iotlb_entries,
43 "Maximum number of iotlb entries. (default: 2048)");
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020044
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000045enum {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000046 VHOST_MEMORY_F_LOG = 0x1,
47};
48
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +030049#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030051
Greg Kurz2751c982015-04-24 14:27:24 +020052#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
Greg Kurzc5072032016-02-16 15:59:34 +010053static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +020054{
55 vq->user_be = !virtio_legacy_is_little_endian();
56}
57
Greg Kurzc5072032016-02-16 15:59:34 +010058static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
59{
60 vq->user_be = true;
61}
62
63static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
64{
65 vq->user_be = false;
66}
67
Greg Kurz2751c982015-04-24 14:27:24 +020068static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
69{
70 struct vhost_vring_state s;
71
72 if (vq->private_data)
73 return -EBUSY;
74
75 if (copy_from_user(&s, argp, sizeof(s)))
76 return -EFAULT;
77
78 if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
79 s.num != VHOST_VRING_BIG_ENDIAN)
80 return -EINVAL;
81
Greg Kurzc5072032016-02-16 15:59:34 +010082 if (s.num == VHOST_VRING_BIG_ENDIAN)
83 vhost_enable_cross_endian_big(vq);
84 else
85 vhost_enable_cross_endian_little(vq);
Greg Kurz2751c982015-04-24 14:27:24 +020086
87 return 0;
88}
89
90static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
91 int __user *argp)
92{
93 struct vhost_vring_state s = {
94 .index = idx,
95 .num = vq->user_be
96 };
97
98 if (copy_to_user(argp, &s, sizeof(s)))
99 return -EFAULT;
100
101 return 0;
102}
103
104static void vhost_init_is_le(struct vhost_virtqueue *vq)
105{
106 /* Note for legacy virtio: user_be is initialized at reset time
107 * according to the host endianness. If userspace does not set an
108 * explicit endianness, the default behavior is native endian, as
109 * expected by legacy virtio.
110 */
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
112}
113#else
Greg Kurzc5072032016-02-16 15:59:34 +0100114static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +0200115{
116}
117
118static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
119{
120 return -ENOIOCTLCMD;
121}
122
123static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
124 int __user *argp)
125{
126 return -ENOIOCTLCMD;
127}
128
129static void vhost_init_is_le(struct vhost_virtqueue *vq)
130{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
132 || virtio_legacy_is_little_endian();
Greg Kurz2751c982015-04-24 14:27:24 +0200133}
134#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
135
Greg Kurzc5072032016-02-16 15:59:34 +0100136static void vhost_reset_is_le(struct vhost_virtqueue *vq)
137{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100138 vhost_init_is_le(vq);
Greg Kurzc5072032016-02-16 15:59:34 +0100139}
140
Jason Wang7235acd2016-04-25 22:14:32 -0400141struct vhost_flush_struct {
142 struct vhost_work work;
143 struct completion wait_event;
144};
145
146static void vhost_flush_work(struct vhost_work *work)
147{
148 struct vhost_flush_struct *s;
149
150 s = container_of(work, struct vhost_flush_struct, work);
151 complete(&s->wait_event);
152}
153
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000154static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
155 poll_table *pt)
156{
157 struct vhost_poll *poll;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000158
Krishna Kumard47effe2011-03-01 17:06:37 +0530159 poll = container_of(pt, struct vhost_poll, table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000160 poll->wqh = wqh;
161 add_wait_queue(wqh, &poll->wait);
162}
163
Ingo Molnarac6424b2017-06-20 12:06:13 +0200164static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000165 void *key)
166{
Tejun Heoc23f34452010-06-02 20:40:00 +0200167 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
Jason Wang01fcb1c2020-05-29 16:02:58 +0800168 struct vhost_work *work = &poll->work;
Tejun Heoc23f34452010-06-02 20:40:00 +0200169
Al Viro3ad6f932017-07-03 20:14:56 -0400170 if (!(key_to_poll(key) & poll->mask))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000171 return 0;
172
Jason Wang01fcb1c2020-05-29 16:02:58 +0800173 if (!poll->dev->use_worker)
174 work->fn(work);
175 else
176 vhost_poll_queue(poll);
177
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000178 return 0;
179}
180
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000181void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000182{
Jason Wang04b96e52016-04-25 22:14:33 -0400183 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200184 work->fn = fn;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000185}
Asias He6ac1afb2013-05-06 16:38:21 +0800186EXPORT_SYMBOL_GPL(vhost_work_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000187
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300188/* Init poll structure */
189void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
Al Viro58e3b602017-07-03 23:50:40 -0400190 __poll_t mask, struct vhost_dev *dev)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300191{
192 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193 init_poll_funcptr(&poll->table, vhost_poll_func);
194 poll->mask = mask;
195 poll->dev = dev;
Jason Wang2b8b3282013-01-28 01:05:18 +0000196 poll->wqh = NULL;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300197
198 vhost_work_init(&poll->work, fn);
199}
Asias He6ac1afb2013-05-06 16:38:21 +0800200EXPORT_SYMBOL_GPL(vhost_poll_init);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300201
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000202/* Start polling a file. We add ourselves to file's wait queue. The caller must
203 * keep a reference to a file until after vhost_poll_stop is called. */
Jason Wang2b8b3282013-01-28 01:05:18 +0000204int vhost_poll_start(struct vhost_poll *poll, struct file *file)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000205{
Al Viroe6c8adc2017-07-03 22:25:56 -0400206 __poll_t mask;
Krishna Kumard47effe2011-03-01 17:06:37 +0530207
Jason Wang70181d512013-04-10 20:50:48 +0000208 if (poll->wqh)
209 return 0;
210
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800211 mask = vfs_poll(file, &poll->table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000212 if (mask)
Al Viro3ad6f932017-07-03 20:14:56 -0400213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800214 if (mask & EPOLLERR) {
Jason Wangdc6455a2018-03-27 20:50:52 +0800215 vhost_poll_stop(poll);
Yunsheng Lin896fc242019-08-20 20:36:32 +0800216 return -EINVAL;
Jason Wang2b8b3282013-01-28 01:05:18 +0000217 }
218
Yunsheng Lin896fc242019-08-20 20:36:32 +0800219 return 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000220}
Asias He6ac1afb2013-05-06 16:38:21 +0800221EXPORT_SYMBOL_GPL(vhost_poll_start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000222
223/* Stop polling a file. After this function returns, it becomes safe to drop the
224 * file reference. You must also flush afterwards. */
225void vhost_poll_stop(struct vhost_poll *poll)
226{
Jason Wang2b8b3282013-01-28 01:05:18 +0000227 if (poll->wqh) {
228 remove_wait_queue(poll->wqh, &poll->wait);
229 poll->wqh = NULL;
230 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000231}
Asias He6ac1afb2013-05-06 16:38:21 +0800232EXPORT_SYMBOL_GPL(vhost_poll_stop);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000233
Asias He6ac1afb2013-05-06 16:38:21 +0800234void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000235{
Jason Wang7235acd2016-04-25 22:14:32 -0400236 struct vhost_flush_struct flush;
Tejun Heoc23f34452010-06-02 20:40:00 +0200237
Jason Wang7235acd2016-04-25 22:14:32 -0400238 if (dev->worker) {
239 init_completion(&flush.wait_event);
240 vhost_work_init(&flush.work, vhost_flush_work);
241
242 vhost_work_queue(dev, &flush.work);
243 wait_for_completion(&flush.wait_event);
244 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000245}
Asias He6ac1afb2013-05-06 16:38:21 +0800246EXPORT_SYMBOL_GPL(vhost_work_flush);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000247
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300248/* Flush any work that has been scheduled. When calling this, don't hold any
249 * locks that are also used by the callback. */
250void vhost_poll_flush(struct vhost_poll *poll)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000251{
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300252 vhost_work_flush(poll->dev, &poll->work);
253}
Asias He6ac1afb2013-05-06 16:38:21 +0800254EXPORT_SYMBOL_GPL(vhost_poll_flush);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300255
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300257{
Jason Wang04b96e52016-04-25 22:14:33 -0400258 if (!dev->worker)
259 return;
Tejun Heoc23f34452010-06-02 20:40:00 +0200260
Jason Wang04b96e52016-04-25 22:14:33 -0400261 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262 /* We can only add the work to the list after we're
263 * sure it was not in the list.
Peng Tao635abf02016-12-07 17:52:19 +0800264 * test_and_set_bit() implies a memory barrier.
Jason Wang04b96e52016-04-25 22:14:33 -0400265 */
Jason Wang04b96e52016-04-25 22:14:33 -0400266 llist_add(&work->node, &dev->work_list);
Tejun Heoc23f34452010-06-02 20:40:00 +0200267 wake_up_process(dev->worker);
268 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000269}
Asias He6ac1afb2013-05-06 16:38:21 +0800270EXPORT_SYMBOL_GPL(vhost_work_queue);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000271
Jason Wang526d3e72016-03-04 06:24:51 -0500272/* A lockless hint for busy polling code to exit the loop */
273bool vhost_has_work(struct vhost_dev *dev)
274{
Jason Wang04b96e52016-04-25 22:14:33 -0400275 return !llist_empty(&dev->work_list);
Jason Wang526d3e72016-03-04 06:24:51 -0500276}
277EXPORT_SYMBOL_GPL(vhost_has_work);
278
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300279void vhost_poll_queue(struct vhost_poll *poll)
280{
281 vhost_work_queue(poll->dev, &poll->work);
282}
Asias He6ac1afb2013-05-06 16:38:21 +0800283EXPORT_SYMBOL_GPL(vhost_poll_queue);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300284
Jason Wangf8894912017-02-28 17:56:02 +0800285static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286{
287 int j;
288
289 for (j = 0; j < VHOST_NUM_ADDRS; j++)
290 vq->meta_iotlb[j] = NULL;
291}
292
293static void vhost_vq_meta_reset(struct vhost_dev *d)
294{
295 int i;
296
Jason Wang86a07da2018-12-13 10:53:39 +0800297 for (i = 0; i < d->nvqs; ++i)
Jason Wangf8894912017-02-28 17:56:02 +0800298 __vhost_vq_meta_reset(d->vqs[i]);
299}
300
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800301static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
302{
303 call_ctx->ctx = NULL;
304 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
305 spin_lock_init(&call_ctx->ctx_lock);
306}
307
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000308static void vhost_vq_reset(struct vhost_dev *dev,
309 struct vhost_virtqueue *vq)
310{
311 vq->num = 1;
312 vq->desc = NULL;
313 vq->avail = NULL;
314 vq->used = NULL;
315 vq->last_avail_idx = 0;
316 vq->avail_idx = 0;
317 vq->last_used_idx = 0;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300318 vq->signalled_used = 0;
319 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000320 vq->used_flags = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000321 vq->log_used = false;
322 vq->log_addr = -1ull;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000323 vq->private_data = NULL;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300324 vq->acked_features = 0;
Jason Wang429711a2018-08-06 11:17:47 +0800325 vq->acked_backend_features = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000326 vq->log_base = NULL;
327 vq->error_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000328 vq->kick = NULL;
Michael S. Tsirkin73a99f02010-02-23 11:23:45 +0200329 vq->log_ctx = NULL;
Greg Kurzc5072032016-02-16 15:59:34 +0100330 vhost_reset_is_le(vq);
331 vhost_disable_cross_endian(vq);
Jason Wang03088132016-03-04 06:24:53 -0500332 vq->busyloop_timeout = 0;
Jason Wanga9709d62016-06-23 02:04:31 -0400333 vq->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400334 vq->iotlb = NULL;
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800335 vhost_vring_call_reset(&vq->call_ctx);
Jason Wangf8894912017-02-28 17:56:02 +0800336 __vhost_vq_meta_reset(vq);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000337}
338
Tejun Heoc23f34452010-06-02 20:40:00 +0200339static int vhost_worker(void *data)
340{
341 struct vhost_dev *dev = data;
Jason Wang04b96e52016-04-25 22:14:33 -0400342 struct vhost_work *work, *work_next;
343 struct llist_node *node;
Tejun Heoc23f34452010-06-02 20:40:00 +0200344
Christoph Hellwigf5678e72020-06-10 18:42:06 -0700345 kthread_use_mm(dev->mm);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200346
Tejun Heoc23f34452010-06-02 20:40:00 +0200347 for (;;) {
348 /* mb paired w/ kthread_stop */
349 set_current_state(TASK_INTERRUPTIBLE);
350
Tejun Heoc23f34452010-06-02 20:40:00 +0200351 if (kthread_should_stop()) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200352 __set_current_state(TASK_RUNNING);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200353 break;
Tejun Heoc23f34452010-06-02 20:40:00 +0200354 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200355
Jason Wang04b96e52016-04-25 22:14:33 -0400356 node = llist_del_all(&dev->work_list);
357 if (!node)
358 schedule();
359
360 node = llist_reverse_order(node);
361 /* make sure flag is seen after deletion */
362 smp_wmb();
363 llist_for_each_entry_safe(work, work_next, node, node) {
364 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200365 __set_current_state(TASK_RUNNING);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800366 kcov_remote_start_common(dev->kcov_handle);
Tejun Heoc23f34452010-06-02 20:40:00 +0200367 work->fn(work);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800368 kcov_remote_stop();
Nadav Har'Eld550dda2012-02-27 15:07:29 +0200369 if (need_resched())
370 schedule();
Jason Wang04b96e52016-04-25 22:14:33 -0400371 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200372 }
Christoph Hellwigf5678e72020-06-10 18:42:06 -0700373 kthread_unuse_mm(dev->mm);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200374 return 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200375}
376
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000377static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
378{
379 kfree(vq->indirect);
380 vq->indirect = NULL;
381 kfree(vq->log);
382 vq->log = NULL;
383 kfree(vq->heads);
384 vq->heads = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000385}
386
Jason Wange0e9b402010-09-14 23:53:05 +0800387/* Helper to allocate iovec buffers for all vqs. */
388static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
389{
Asias He6d5e6aa2013-05-06 16:38:23 +0800390 struct vhost_virtqueue *vq;
Jason Wange0e9b402010-09-14 23:53:05 +0800391 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530392
Jason Wange0e9b402010-09-14 23:53:05 +0800393 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800394 vq = dev->vqs[i];
Kees Cook6da2ec52018-06-12 13:55:00 -0700395 vq->indirect = kmalloc_array(UIO_MAXIOV,
396 sizeof(*vq->indirect),
397 GFP_KERNEL);
Jason Wangb46a0bf2019-01-28 15:05:05 +0800398 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
Kees Cook6da2ec52018-06-12 13:55:00 -0700399 GFP_KERNEL);
Jason Wangb46a0bf2019-01-28 15:05:05 +0800400 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
Kees Cook6da2ec52018-06-12 13:55:00 -0700401 GFP_KERNEL);
Asias He6d5e6aa2013-05-06 16:38:23 +0800402 if (!vq->indirect || !vq->log || !vq->heads)
Jason Wange0e9b402010-09-14 23:53:05 +0800403 goto err_nomem;
404 }
405 return 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530406
Jason Wange0e9b402010-09-14 23:53:05 +0800407err_nomem:
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000408 for (; i >= 0; --i)
Asias He3ab2e422013-04-27 11:16:48 +0800409 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800410 return -ENOMEM;
411}
412
413static void vhost_dev_free_iovecs(struct vhost_dev *dev)
414{
415 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530416
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000417 for (i = 0; i < dev->nvqs; ++i)
Asias He3ab2e422013-04-27 11:16:48 +0800418 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800419}
420
Jason Wange82b9b02019-05-17 00:29:49 -0400421bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
422 int pkts, int total_len)
423{
424 struct vhost_dev *dev = vq->dev;
425
426 if ((dev->byte_weight && total_len >= dev->byte_weight) ||
427 pkts >= dev->weight) {
428 vhost_poll_queue(&vq->poll);
429 return true;
430 }
431
432 return false;
433}
434EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
435
Jason Wang4942e822019-05-24 04:12:16 -0400436static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
437 unsigned int num)
438{
439 size_t event __maybe_unused =
440 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
441
442 return sizeof(*vq->avail) +
443 sizeof(*vq->avail->ring) * num + event;
444}
445
446static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
447 unsigned int num)
448{
449 size_t event __maybe_unused =
450 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
451
452 return sizeof(*vq->used) +
453 sizeof(*vq->used->ring) * num + event;
454}
455
456static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
457 unsigned int num)
458{
459 return sizeof(*vq->desc) * num;
460}
461
Zhi Yong Wu59566b6e2013-12-07 04:13:03 +0800462void vhost_dev_init(struct vhost_dev *dev,
Jason Wange82b9b02019-05-17 00:29:49 -0400463 struct vhost_virtqueue **vqs, int nvqs,
Jason Wang792a4f22020-03-26 22:01:18 +0800464 int iov_limit, int weight, int byte_weight,
Jason Wang01fcb1c2020-05-29 16:02:58 +0800465 bool use_worker,
Jason Wang792a4f22020-03-26 22:01:18 +0800466 int (*msg_handler)(struct vhost_dev *dev,
467 struct vhost_iotlb_msg *msg))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000468{
Asias He6d5e6aa2013-05-06 16:38:23 +0800469 struct vhost_virtqueue *vq;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000470 int i;
Tejun Heoc23f34452010-06-02 20:40:00 +0200471
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000472 dev->vqs = vqs;
473 dev->nvqs = nvqs;
474 mutex_init(&dev->mutex);
475 dev->log_ctx = NULL;
Jason Wanga9709d62016-06-23 02:04:31 -0400476 dev->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400477 dev->iotlb = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000478 dev->mm = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200479 dev->worker = NULL;
Jason Wangb46a0bf2019-01-28 15:05:05 +0800480 dev->iov_limit = iov_limit;
Jason Wange82b9b02019-05-17 00:29:49 -0400481 dev->weight = weight;
482 dev->byte_weight = byte_weight;
Jason Wang01fcb1c2020-05-29 16:02:58 +0800483 dev->use_worker = use_worker;
Jason Wang792a4f22020-03-26 22:01:18 +0800484 dev->msg_handler = msg_handler;
Jason Wang04b96e52016-04-25 22:14:33 -0400485 init_llist_head(&dev->work_list);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400486 init_waitqueue_head(&dev->wait);
487 INIT_LIST_HEAD(&dev->read_list);
488 INIT_LIST_HEAD(&dev->pending_list);
489 spin_lock_init(&dev->iotlb_lock);
Michael S. Tsirkin3d2c7d32019-08-10 13:53:21 -0400490
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000491
492 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800493 vq = dev->vqs[i];
494 vq->log = NULL;
495 vq->indirect = NULL;
496 vq->heads = NULL;
497 vq->dev = dev;
498 mutex_init(&vq->mutex);
499 vhost_vq_reset(dev, vq);
500 if (vq->handle_kick)
501 vhost_poll_init(&vq->poll, vq->handle_kick,
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800502 EPOLLIN, dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000503 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000504}
Asias He6ac1afb2013-05-06 16:38:21 +0800505EXPORT_SYMBOL_GPL(vhost_dev_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000506
507/* Caller should have device mutex */
508long vhost_dev_check_owner(struct vhost_dev *dev)
509{
510 /* Are you the owner? If not, I don't think you mean to do that */
511 return dev->mm == current->mm ? 0 : -EPERM;
512}
Asias He6ac1afb2013-05-06 16:38:21 +0800513EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000514
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300515struct vhost_attach_cgroups_struct {
Krishna Kumard47effe2011-03-01 17:06:37 +0530516 struct vhost_work work;
517 struct task_struct *owner;
518 int ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300519};
520
521static void vhost_attach_cgroups_work(struct vhost_work *work)
522{
Krishna Kumard47effe2011-03-01 17:06:37 +0530523 struct vhost_attach_cgroups_struct *s;
524
525 s = container_of(work, struct vhost_attach_cgroups_struct, work);
526 s->ret = cgroup_attach_task_all(s->owner, current);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300527}
528
529static int vhost_attach_cgroups(struct vhost_dev *dev)
530{
Krishna Kumard47effe2011-03-01 17:06:37 +0530531 struct vhost_attach_cgroups_struct attach;
532
533 attach.owner = current;
534 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
535 vhost_work_queue(dev, &attach.work);
536 vhost_work_flush(dev, &attach.work);
537 return attach.ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300538}
539
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000540/* Caller should have device mutex */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300541bool vhost_dev_has_owner(struct vhost_dev *dev)
542{
543 return dev->mm;
544}
Asias He6ac1afb2013-05-06 16:38:21 +0800545EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300546
Jason Wang5ce995f2020-05-29 16:02:59 +0800547static void vhost_attach_mm(struct vhost_dev *dev)
548{
549 /* No owner, become one */
550 if (dev->use_worker) {
551 dev->mm = get_task_mm(current);
552 } else {
553 /* vDPA device does not use worker thead, so there's
554 * no need to hold the address space for mm. This help
555 * to avoid deadlock in the case of mmap() which may
556 * held the refcnt of the file and depends on release
557 * method to remove vma.
558 */
559 dev->mm = current->mm;
560 mmgrab(dev->mm);
561 }
562}
563
564static void vhost_detach_mm(struct vhost_dev *dev)
565{
566 if (!dev->mm)
567 return;
568
569 if (dev->use_worker)
570 mmput(dev->mm);
571 else
572 mmdrop(dev->mm);
573
574 dev->mm = NULL;
575}
576
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300577/* Caller should have device mutex */
Asias He54db63c2013-05-06 11:15:59 +0800578long vhost_dev_set_owner(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000579{
Tejun Heoc23f34452010-06-02 20:40:00 +0200580 struct task_struct *worker;
581 int err;
Krishna Kumard47effe2011-03-01 17:06:37 +0530582
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000583 /* Is there an owner already? */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300584 if (vhost_dev_has_owner(dev)) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200585 err = -EBUSY;
586 goto err_mm;
587 }
Krishna Kumard47effe2011-03-01 17:06:37 +0530588
Jason Wang5ce995f2020-05-29 16:02:59 +0800589 vhost_attach_mm(dev);
590
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800591 dev->kcov_handle = kcov_common_handle();
Jason Wang01fcb1c2020-05-29 16:02:58 +0800592 if (dev->use_worker) {
593 worker = kthread_create(vhost_worker, dev,
594 "vhost-%d", current->pid);
595 if (IS_ERR(worker)) {
596 err = PTR_ERR(worker);
597 goto err_worker;
598 }
599
600 dev->worker = worker;
601 wake_up_process(worker); /* avoid contributing to loadavg */
602
603 err = vhost_attach_cgroups(dev);
604 if (err)
605 goto err_cgroup;
Tejun Heoc23f34452010-06-02 20:40:00 +0200606 }
607
Jason Wange0e9b402010-09-14 23:53:05 +0800608 err = vhost_dev_alloc_iovecs(dev);
609 if (err)
610 goto err_cgroup;
611
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000612 return 0;
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300613err_cgroup:
Jason Wang01fcb1c2020-05-29 16:02:58 +0800614 if (dev->worker) {
615 kthread_stop(dev->worker);
616 dev->worker = NULL;
617 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200618err_worker:
Jason Wang5ce995f2020-05-29 16:02:59 +0800619 vhost_detach_mm(dev);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800620 dev->kcov_handle = 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200621err_mm:
622 return err;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000623}
Asias He6ac1afb2013-05-06 16:38:21 +0800624EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000625
Jason Wang0bbe3062020-03-26 22:01:19 +0800626static struct vhost_iotlb *iotlb_alloc(void)
Jason Wanga9709d62016-06-23 02:04:31 -0400627{
Jason Wang0bbe3062020-03-26 22:01:19 +0800628 return vhost_iotlb_alloc(max_iotlb_entries,
629 VHOST_IOTLB_FLAG_RETIRE);
630}
631
632struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
633{
634 return iotlb_alloc();
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300635}
Asias He6ac1afb2013-05-06 16:38:21 +0800636EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000637
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300638/* Caller should have device mutex */
Jason Wang0bbe3062020-03-26 22:01:19 +0800639void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300640{
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300641 int i;
642
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800643 vhost_dev_cleanup(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000644
Jason Wanga9709d62016-06-23 02:04:31 -0400645 dev->umem = umem;
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300646 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
647 * VQs aren't running.
648 */
649 for (i = 0; i < dev->nvqs; ++i)
Jason Wanga9709d62016-06-23 02:04:31 -0400650 dev->vqs[i]->umem = umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000651}
Asias He6ac1afb2013-05-06 16:38:21 +0800652EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000653
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000654void vhost_dev_stop(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000655{
656 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530657
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000658 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800659 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
660 vhost_poll_stop(&dev->vqs[i]->poll);
661 vhost_poll_flush(&dev->vqs[i]->poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000662 }
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000663 }
664}
Asias He6ac1afb2013-05-06 16:38:21 +0800665EXPORT_SYMBOL_GPL(vhost_dev_stop);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000666
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400667static void vhost_clear_msg(struct vhost_dev *dev)
668{
669 struct vhost_msg_node *node, *n;
670
671 spin_lock(&dev->iotlb_lock);
672
673 list_for_each_entry_safe(node, n, &dev->read_list, node) {
674 list_del(&node->node);
675 kfree(node);
676 }
677
678 list_for_each_entry_safe(node, n, &dev->pending_list, node) {
679 list_del(&node->node);
680 kfree(node);
681 }
682
683 spin_unlock(&dev->iotlb_lock);
684}
685
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800686void vhost_dev_cleanup(struct vhost_dev *dev)
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000687{
688 int i;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000689
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000690 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800691 if (dev->vqs[i]->error_ctx)
692 eventfd_ctx_put(dev->vqs[i]->error_ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800693 if (dev->vqs[i]->kick)
694 fput(dev->vqs[i]->kick);
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800695 if (dev->vqs[i]->call_ctx.ctx)
696 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800697 vhost_vq_reset(dev, dev->vqs[i]);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000698 }
Jason Wange0e9b402010-09-14 23:53:05 +0800699 vhost_dev_free_iovecs(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000700 if (dev->log_ctx)
701 eventfd_ctx_put(dev->log_ctx);
702 dev->log_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000703 /* No one will access memory at this point */
Jason Wang0bbe3062020-03-26 22:01:19 +0800704 vhost_iotlb_free(dev->umem);
Jason Wanga9709d62016-06-23 02:04:31 -0400705 dev->umem = NULL;
Jason Wang0bbe3062020-03-26 22:01:19 +0800706 vhost_iotlb_free(dev->iotlb);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400707 dev->iotlb = NULL;
708 vhost_clear_msg(dev);
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800709 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang04b96e52016-04-25 22:14:33 -0400710 WARN_ON(!llist_empty(&dev->work_list));
Eric Dumazet78b620c2010-08-31 02:05:57 +0000711 if (dev->worker) {
712 kthread_stop(dev->worker);
713 dev->worker = NULL;
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800714 dev->kcov_handle = 0;
Eric Dumazet78b620c2010-08-31 02:05:57 +0000715 }
Jason Wang5ce995f2020-05-29 16:02:59 +0800716 vhost_detach_mm(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000717}
Asias He6ac1afb2013-05-06 16:38:21 +0800718EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000719
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800720static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000721{
722 u64 a = addr / VHOST_PAGE_SIZE / 8;
Krishna Kumard47effe2011-03-01 17:06:37 +0530723
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000724 /* Make sure 64 bit math will not overflow. */
725 if (a > ULONG_MAX - (unsigned long)log_base ||
726 a + (unsigned long)log_base > ULONG_MAX)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800727 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000728
Linus Torvalds96d4f262019-01-03 18:57:57 -0800729 return access_ok(log_base + a,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000730 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
731}
732
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300733static bool vhost_overflow(u64 uaddr, u64 size)
734{
735 /* Make sure 64 bit math will not overflow. */
736 return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
737}
738
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000739/* Caller should have vq mutex and device mutex. */
Jason Wang0bbe3062020-03-26 22:01:19 +0800740static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800741 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000742{
Jason Wang0bbe3062020-03-26 22:01:19 +0800743 struct vhost_iotlb_map *map;
Jeff Dike179b2842010-04-07 09:59:10 -0400744
Jason Wanga9709d62016-06-23 02:04:31 -0400745 if (!umem)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800746 return false;
Jeff Dike179b2842010-04-07 09:59:10 -0400747
Jason Wang0bbe3062020-03-26 22:01:19 +0800748 list_for_each_entry(map, &umem->list, link) {
749 unsigned long a = map->addr;
Jason Wanga9709d62016-06-23 02:04:31 -0400750
Jason Wang0bbe3062020-03-26 22:01:19 +0800751 if (vhost_overflow(map->addr, map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800752 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300753
754
Jason Wang0bbe3062020-03-26 22:01:19 +0800755 if (!access_ok((void __user *)a, map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800756 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000757 else if (log_all && !log_access_ok(log_base,
Jason Wang0bbe3062020-03-26 22:01:19 +0800758 map->start,
759 map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800760 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000761 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800762 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000763}
764
Jason Wangf8894912017-02-28 17:56:02 +0800765static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
766 u64 addr, unsigned int size,
767 int type)
768{
Jason Wang0bbe3062020-03-26 22:01:19 +0800769 const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
Jason Wangf8894912017-02-28 17:56:02 +0800770
Jason Wang0bbe3062020-03-26 22:01:19 +0800771 if (!map)
Jason Wangf8894912017-02-28 17:56:02 +0800772 return NULL;
773
Michael S. Tsirkin1b0be992020-05-15 11:29:53 -0400774 return (void __user *)(uintptr_t)(map->addr + addr - map->start);
Jason Wangf8894912017-02-28 17:56:02 +0800775}
776
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000777/* Can we switch to this memory table? */
778/* Caller should have device mutex but not vq mutex */
Jason Wang0bbe3062020-03-26 22:01:19 +0800779static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800780 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000781{
782 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530783
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000784 for (i = 0; i < d->nvqs; ++i) {
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800785 bool ok;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300786 bool log;
787
Asias He3ab2e422013-04-27 11:16:48 +0800788 mutex_lock(&d->vqs[i]->mutex);
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300789 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000790 /* If ring is inactive, will check when it's enabled. */
Asias He3ab2e422013-04-27 11:16:48 +0800791 if (d->vqs[i]->private_data)
Jason Wanga9709d62016-06-23 02:04:31 -0400792 ok = vq_memory_access_ok(d->vqs[i]->log_base,
793 umem, log);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000794 else
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800795 ok = true;
Asias He3ab2e422013-04-27 11:16:48 +0800796 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000797 if (!ok)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800798 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000799 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800800 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000801}
802
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400803static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
804 struct iovec iov[], int iov_size, int access);
Jason Wangbfe2bc52016-06-23 02:04:30 -0400805
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200806static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
Jason Wangbfe2bc52016-06-23 02:04:30 -0400807 const void *from, unsigned size)
808{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400809 int ret;
Jason Wangbfe2bc52016-06-23 02:04:30 -0400810
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400811 if (!vq->iotlb)
812 return __copy_to_user(to, from, size);
813 else {
814 /* This function should be called after iotlb
815 * prefetch, which means we're sure that all vq
816 * could be access through iotlb. So -EAGAIN should
817 * not happen in this case.
818 */
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400819 struct iov_iter t;
Jason Wangf8894912017-02-28 17:56:02 +0800820 void __user *uaddr = vhost_vq_meta_fetch(vq,
821 (u64)(uintptr_t)to, size,
Eric Auger7ced6c92018-04-11 15:30:38 +0200822 VHOST_ADDR_USED);
Jason Wangf8894912017-02-28 17:56:02 +0800823
824 if (uaddr)
825 return __copy_to_user(uaddr, from, size);
826
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400827 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
828 ARRAY_SIZE(vq->iotlb_iov),
829 VHOST_ACCESS_WO);
830 if (ret < 0)
831 goto out;
832 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
833 ret = copy_to_iter(from, size, &t);
834 if (ret == size)
835 ret = 0;
836 }
837out:
838 return ret;
839}
Jason Wangbfe2bc52016-06-23 02:04:30 -0400840
841static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200842 void __user *from, unsigned size)
Jason Wangbfe2bc52016-06-23 02:04:30 -0400843{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400844 int ret;
845
846 if (!vq->iotlb)
847 return __copy_from_user(to, from, size);
848 else {
849 /* This function should be called after iotlb
850 * prefetch, which means we're sure that vq
851 * could be access through iotlb. So -EAGAIN should
852 * not happen in this case.
853 */
Jason Wangf8894912017-02-28 17:56:02 +0800854 void __user *uaddr = vhost_vq_meta_fetch(vq,
855 (u64)(uintptr_t)from, size,
856 VHOST_ADDR_DESC);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400857 struct iov_iter f;
Jason Wangf8894912017-02-28 17:56:02 +0800858
859 if (uaddr)
860 return __copy_from_user(to, uaddr, size);
861
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400862 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
863 ARRAY_SIZE(vq->iotlb_iov),
864 VHOST_ACCESS_RO);
865 if (ret < 0) {
866 vq_err(vq, "IOTLB translation failure: uaddr "
867 "%p size 0x%llx\n", from,
868 (unsigned long long) size);
869 goto out;
870 }
871 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
872 ret = copy_from_iter(to, size, &f);
873 if (ret == size)
874 ret = 0;
875 }
876
877out:
878 return ret;
879}
880
Jason Wangf8894912017-02-28 17:56:02 +0800881static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
882 void __user *addr, unsigned int size,
883 int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400884{
885 int ret;
886
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400887 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
888 ARRAY_SIZE(vq->iotlb_iov),
889 VHOST_ACCESS_RO);
890 if (ret < 0) {
891 vq_err(vq, "IOTLB translation failure: uaddr "
892 "%p size 0x%llx\n", addr,
893 (unsigned long long) size);
894 return NULL;
895 }
896
897 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
898 vq_err(vq, "Non atomic userspace memory access: uaddr "
899 "%p size 0x%llx\n", addr,
900 (unsigned long long) size);
901 return NULL;
902 }
903
904 return vq->iotlb_iov[0].iov_base;
905}
906
Jason Wangf8894912017-02-28 17:56:02 +0800907/* This function should be called after iotlb
908 * prefetch, which means we're sure that vq
909 * could be access through iotlb. So -EAGAIN should
910 * not happen in this case.
911 */
912static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
Michael S. Tsirkin1b0be992020-05-15 11:29:53 -0400913 void __user *addr, unsigned int size,
Jason Wangf8894912017-02-28 17:56:02 +0800914 int type)
915{
916 void __user *uaddr = vhost_vq_meta_fetch(vq,
917 (u64)(uintptr_t)addr, size, type);
918 if (uaddr)
919 return uaddr;
920
921 return __vhost_get_user_slow(vq, addr, size, type);
922}
923
924#define vhost_put_user(vq, x, ptr) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400925({ \
Guennadi Liakhovetski002ef182020-05-27 20:05:38 +0200926 int ret; \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400927 if (!vq->iotlb) { \
928 ret = __put_user(x, ptr); \
929 } else { \
930 __typeof__(ptr) to = \
Jason Wangf8894912017-02-28 17:56:02 +0800931 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
932 sizeof(*ptr), VHOST_ADDR_USED); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400933 if (to != NULL) \
934 ret = __put_user(x, to); \
935 else \
936 ret = -EFAULT; \
937 } \
938 ret; \
939})
940
Jason Wang7b5d7532019-05-24 04:12:14 -0400941static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
942{
943 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
944 vhost_avail_event(vq));
945}
946
947static inline int vhost_put_used(struct vhost_virtqueue *vq,
948 struct vring_used_elem *head, int idx,
949 int count)
950{
951 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
952 count * sizeof(*head));
953}
954
955static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
956
957{
958 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
959 &vq->used->flags);
960}
961
962static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
963
964{
965 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
966 &vq->used->idx);
967}
968
Jason Wangf8894912017-02-28 17:56:02 +0800969#define vhost_get_user(vq, x, ptr, type) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400970({ \
971 int ret; \
972 if (!vq->iotlb) { \
973 ret = __get_user(x, ptr); \
974 } else { \
975 __typeof__(ptr) from = \
Jason Wangf8894912017-02-28 17:56:02 +0800976 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
977 sizeof(*ptr), \
978 type); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400979 if (from != NULL) \
980 ret = __get_user(x, from); \
981 else \
982 ret = -EFAULT; \
983 } \
984 ret; \
985})
986
Jason Wangf8894912017-02-28 17:56:02 +0800987#define vhost_get_avail(vq, x, ptr) \
988 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
989
990#define vhost_get_used(vq, x, ptr) \
991 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
992
Jason Wang86a07da2018-12-13 10:53:39 +0800993static void vhost_dev_lock_vqs(struct vhost_dev *d)
994{
995 int i = 0;
996 for (i = 0; i < d->nvqs; ++i)
997 mutex_lock_nested(&d->vqs[i]->mutex, i);
998}
999
1000static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1001{
1002 int i = 0;
1003 for (i = 0; i < d->nvqs; ++i)
1004 mutex_unlock(&d->vqs[i]->mutex);
1005}
1006
Jason Wang7b5d7532019-05-24 04:12:14 -04001007static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1008 __virtio16 *idx)
1009{
1010 return vhost_get_avail(vq, *idx, &vq->avail->idx);
1011}
1012
1013static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1014 __virtio16 *head, int idx)
1015{
1016 return vhost_get_avail(vq, *head,
1017 &vq->avail->ring[idx & (vq->num - 1)]);
1018}
1019
1020static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1021 __virtio16 *flags)
1022{
1023 return vhost_get_avail(vq, *flags, &vq->avail->flags);
1024}
1025
1026static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1027 __virtio16 *event)
1028{
1029 return vhost_get_avail(vq, *event, vhost_used_event(vq));
1030}
1031
1032static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1033 __virtio16 *idx)
1034{
1035 return vhost_get_used(vq, *idx, &vq->used->idx);
1036}
1037
1038static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1039 struct vring_desc *desc, int idx)
1040{
1041 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1042}
1043
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001044static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1045 struct vhost_iotlb_msg *msg)
1046{
1047 struct vhost_msg_node *node, *n;
1048
1049 spin_lock(&d->iotlb_lock);
1050
1051 list_for_each_entry_safe(node, n, &d->pending_list, node) {
1052 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1053 if (msg->iova <= vq_msg->iova &&
Jason Wang2d66f992018-08-24 16:53:13 +08001054 msg->iova + msg->size - 1 >= vq_msg->iova &&
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001055 vq_msg->type == VHOST_IOTLB_MISS) {
1056 vhost_poll_queue(&node->vq->poll);
1057 list_del(&node->node);
1058 kfree(node);
1059 }
1060 }
1061
1062 spin_unlock(&d->iotlb_lock);
1063}
1064
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001065static bool umem_access_ok(u64 uaddr, u64 size, int access)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001066{
1067 unsigned long a = uaddr;
1068
Michael S. Tsirkinec33d032016-08-01 23:20:53 +03001069 /* Make sure 64 bit math will not overflow. */
1070 if (vhost_overflow(uaddr, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001071 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +03001072
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001073 if ((access & VHOST_ACCESS_RO) &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001074 !access_ok((void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001075 return false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001076 if ((access & VHOST_ACCESS_WO) &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001077 !access_ok((void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001078 return false;
1079 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001080}
1081
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +02001082static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1083 struct vhost_iotlb_msg *msg)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001084{
1085 int ret = 0;
1086
Jason Wang1b15ad62018-05-22 19:58:57 +08001087 mutex_lock(&dev->mutex);
Jason Wang86a07da2018-12-13 10:53:39 +08001088 vhost_dev_lock_vqs(dev);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001089 switch (msg->type) {
1090 case VHOST_IOTLB_UPDATE:
1091 if (!dev->iotlb) {
1092 ret = -EFAULT;
1093 break;
1094 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001095 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001096 ret = -EFAULT;
1097 break;
1098 }
Jason Wangf8894912017-02-28 17:56:02 +08001099 vhost_vq_meta_reset(dev);
Jason Wang0bbe3062020-03-26 22:01:19 +08001100 if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1101 msg->iova + msg->size - 1,
1102 msg->uaddr, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001103 ret = -ENOMEM;
1104 break;
1105 }
1106 vhost_iotlb_notify_vq(dev, msg);
1107 break;
1108 case VHOST_IOTLB_INVALIDATE:
Jason Wang6f3180a2018-01-23 17:27:26 +08001109 if (!dev->iotlb) {
1110 ret = -EFAULT;
1111 break;
1112 }
Jason Wangf8894912017-02-28 17:56:02 +08001113 vhost_vq_meta_reset(dev);
Jason Wang0bbe3062020-03-26 22:01:19 +08001114 vhost_iotlb_del_range(dev->iotlb, msg->iova,
1115 msg->iova + msg->size - 1);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001116 break;
1117 default:
1118 ret = -EINVAL;
1119 break;
1120 }
1121
Jason Wang86a07da2018-12-13 10:53:39 +08001122 vhost_dev_unlock_vqs(dev);
Jason Wang1b15ad62018-05-22 19:58:57 +08001123 mutex_unlock(&dev->mutex);
1124
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001125 return ret;
1126}
1127ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1128 struct iov_iter *from)
1129{
Jason Wang429711a2018-08-06 11:17:47 +08001130 struct vhost_iotlb_msg msg;
1131 size_t offset;
1132 int type, ret;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001133
Jason Wang429711a2018-08-06 11:17:47 +08001134 ret = copy_from_iter(&type, sizeof(type), from);
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001135 if (ret != sizeof(type)) {
1136 ret = -EINVAL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001137 goto done;
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001138 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001139
Jason Wang429711a2018-08-06 11:17:47 +08001140 switch (type) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001141 case VHOST_IOTLB_MSG:
Jason Wang429711a2018-08-06 11:17:47 +08001142 /* There maybe a hole after type for V1 message type,
1143 * so skip it here.
1144 */
1145 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1146 break;
1147 case VHOST_IOTLB_MSG_V2:
1148 offset = sizeof(__u32);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001149 break;
1150 default:
1151 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001152 goto done;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001153 }
1154
Jason Wang429711a2018-08-06 11:17:47 +08001155 iov_iter_advance(from, offset);
1156 ret = copy_from_iter(&msg, sizeof(msg), from);
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001157 if (ret != sizeof(msg)) {
1158 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001159 goto done;
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001160 }
Jason Wang792a4f22020-03-26 22:01:18 +08001161
1162 if (dev->msg_handler)
1163 ret = dev->msg_handler(dev, &msg);
1164 else
1165 ret = vhost_process_iotlb_msg(dev, &msg);
1166 if (ret) {
Jason Wang429711a2018-08-06 11:17:47 +08001167 ret = -EFAULT;
1168 goto done;
1169 }
1170
1171 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1172 sizeof(struct vhost_msg_v2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001173done:
1174 return ret;
1175}
1176EXPORT_SYMBOL(vhost_chr_write_iter);
1177
Al Viroafc9a422017-07-03 06:39:46 -04001178__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001179 poll_table *wait)
1180{
Al Viroafc9a422017-07-03 06:39:46 -04001181 __poll_t mask = 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001182
1183 poll_wait(file, &dev->wait, wait);
1184
1185 if (!list_empty(&dev->read_list))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001186 mask |= EPOLLIN | EPOLLRDNORM;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001187
1188 return mask;
1189}
1190EXPORT_SYMBOL(vhost_chr_poll);
1191
1192ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1193 int noblock)
1194{
1195 DEFINE_WAIT(wait);
1196 struct vhost_msg_node *node;
1197 ssize_t ret = 0;
1198 unsigned size = sizeof(struct vhost_msg);
1199
1200 if (iov_iter_count(to) < size)
1201 return 0;
1202
1203 while (1) {
1204 if (!noblock)
1205 prepare_to_wait(&dev->wait, &wait,
1206 TASK_INTERRUPTIBLE);
1207
1208 node = vhost_dequeue_msg(dev, &dev->read_list);
1209 if (node)
1210 break;
1211 if (noblock) {
1212 ret = -EAGAIN;
1213 break;
1214 }
1215 if (signal_pending(current)) {
1216 ret = -ERESTARTSYS;
1217 break;
1218 }
1219 if (!dev->iotlb) {
1220 ret = -EBADFD;
1221 break;
1222 }
1223
1224 schedule();
1225 }
1226
1227 if (!noblock)
1228 finish_wait(&dev->wait, &wait);
1229
1230 if (node) {
Jason Wang429711a2018-08-06 11:17:47 +08001231 struct vhost_iotlb_msg *msg;
1232 void *start = &node->msg;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001233
Jason Wang429711a2018-08-06 11:17:47 +08001234 switch (node->msg.type) {
1235 case VHOST_IOTLB_MSG:
1236 size = sizeof(node->msg);
1237 msg = &node->msg.iotlb;
1238 break;
1239 case VHOST_IOTLB_MSG_V2:
1240 size = sizeof(node->msg_v2);
1241 msg = &node->msg_v2.iotlb;
1242 break;
1243 default:
1244 BUG();
1245 break;
1246 }
1247
1248 ret = copy_to_iter(start, size, to);
1249 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001250 kfree(node);
1251 return ret;
1252 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001253 vhost_enqueue_msg(dev, &dev->pending_list, node);
1254 }
1255
1256 return ret;
1257}
1258EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1259
1260static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1261{
1262 struct vhost_dev *dev = vq->dev;
1263 struct vhost_msg_node *node;
1264 struct vhost_iotlb_msg *msg;
Jason Wang429711a2018-08-06 11:17:47 +08001265 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001266
Jason Wang429711a2018-08-06 11:17:47 +08001267 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001268 if (!node)
1269 return -ENOMEM;
1270
Jason Wang429711a2018-08-06 11:17:47 +08001271 if (v2) {
1272 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1273 msg = &node->msg_v2.iotlb;
1274 } else {
1275 msg = &node->msg.iotlb;
1276 }
1277
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001278 msg->type = VHOST_IOTLB_MISS;
1279 msg->iova = iova;
1280 msg->perm = access;
1281
1282 vhost_enqueue_msg(dev, &dev->read_list, node);
1283
1284 return 0;
Jason Wangbfe2bc52016-06-23 02:04:30 -04001285}
1286
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001287static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
Michael S. Tsirkina865e422020-04-06 08:42:55 -04001288 vring_desc_t __user *desc,
1289 vring_avail_t __user *avail,
1290 vring_used_t __user *used)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001291
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001292{
Jason Wang4942e822019-05-24 04:12:16 -04001293 return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1294 access_ok(avail, vhost_get_avail_size(vq, num)) &&
1295 access_ok(used, vhost_get_used_size(vq, num));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001296}
1297
Jason Wangf8894912017-02-28 17:56:02 +08001298static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
Jason Wang0bbe3062020-03-26 22:01:19 +08001299 const struct vhost_iotlb_map *map,
Jason Wangf8894912017-02-28 17:56:02 +08001300 int type)
1301{
1302 int access = (type == VHOST_ADDR_USED) ?
1303 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1304
Jason Wang0bbe3062020-03-26 22:01:19 +08001305 if (likely(map->perm & access))
1306 vq->meta_iotlb[type] = map;
Jason Wangf8894912017-02-28 17:56:02 +08001307}
1308
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001309static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1310 int access, u64 addr, u64 len, int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001311{
Jason Wang0bbe3062020-03-26 22:01:19 +08001312 const struct vhost_iotlb_map *map;
1313 struct vhost_iotlb *umem = vq->iotlb;
Michael S. Tsirkinca2c5b32017-08-21 22:33:33 +03001314 u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
Jason Wangf8894912017-02-28 17:56:02 +08001315
1316 if (vhost_vq_meta_fetch(vq, addr, len, type))
1317 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001318
1319 while (len > s) {
Jason Wang0bbe3062020-03-26 22:01:19 +08001320 map = vhost_iotlb_itree_first(umem, addr, last);
1321 if (map == NULL || map->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001322 vhost_iotlb_miss(vq, addr, access);
1323 return false;
Jason Wang0bbe3062020-03-26 22:01:19 +08001324 } else if (!(map->perm & access)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001325 /* Report the possible access violation by
1326 * request another translation from userspace.
1327 */
1328 return false;
1329 }
1330
Jason Wang0bbe3062020-03-26 22:01:19 +08001331 size = map->size - addr + map->start;
Jason Wangf8894912017-02-28 17:56:02 +08001332
1333 if (orig_addr == addr && size >= len)
Jason Wang0bbe3062020-03-26 22:01:19 +08001334 vhost_vq_meta_update(vq, map, type);
Jason Wangf8894912017-02-28 17:56:02 +08001335
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001336 s += size;
1337 addr += size;
1338 }
1339
1340 return true;
1341}
1342
Jason Wang9b5e8302019-05-24 04:12:15 -04001343int vq_meta_prefetch(struct vhost_virtqueue *vq)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001344{
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001345 unsigned int num = vq->num;
1346
Michael S. Tsirkin3d2c7d32019-08-10 13:53:21 -04001347 if (!vq->iotlb)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001348 return 1;
1349
Jason Wang0bbe3062020-03-26 22:01:19 +08001350 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
Jason Wang4942e822019-05-24 04:12:16 -04001351 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
Jason Wang0bbe3062020-03-26 22:01:19 +08001352 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
Jason Wang4942e822019-05-24 04:12:16 -04001353 vhost_get_avail_size(vq, num),
Jason Wangf8894912017-02-28 17:56:02 +08001354 VHOST_ADDR_AVAIL) &&
Jason Wang0bbe3062020-03-26 22:01:19 +08001355 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
Jason Wang4942e822019-05-24 04:12:16 -04001356 vhost_get_used_size(vq, num), VHOST_ADDR_USED);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001357}
Jason Wang9b5e8302019-05-24 04:12:15 -04001358EXPORT_SYMBOL_GPL(vq_meta_prefetch);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001359
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001360/* Can we log writes? */
1361/* Caller should have device mutex but not vq mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001362bool vhost_log_access_ok(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001363{
Jason Wanga9709d62016-06-23 02:04:31 -04001364 return memory_access_ok(dev, dev->umem, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001365}
Asias He6ac1afb2013-05-06 16:38:21 +08001366EXPORT_SYMBOL_GPL(vhost_log_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001367
1368/* Verify access for write logging. */
1369/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001370static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1371 void __user *log_base)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001372{
Jason Wanga9709d62016-06-23 02:04:31 -04001373 return vq_memory_access_ok(log_base, vq->umem,
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001374 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001375 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
Jason Wang4942e822019-05-24 04:12:16 -04001376 vhost_get_used_size(vq, vq->num)));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001377}
1378
1379/* Can we start vq? */
1380/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001381bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001382{
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001383 if (!vq_log_access_ok(vq, vq->log_base))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001384 return false;
Jason Wangd65026c2018-03-29 16:00:04 +08001385
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001386 /* Access validation occurs at prefetch time with IOTLB */
1387 if (vq->iotlb)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001388 return true;
Jason Wangd65026c2018-03-29 16:00:04 +08001389
1390 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001391}
Asias He6ac1afb2013-05-06 16:38:21 +08001392EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001393
1394static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1395{
Jason Wanga9709d62016-06-23 02:04:31 -04001396 struct vhost_memory mem, *newmem;
1397 struct vhost_memory_region *region;
Jason Wang0bbe3062020-03-26 22:01:19 +08001398 struct vhost_iotlb *newumem, *oldumem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001399 unsigned long size = offsetof(struct vhost_memory, regions);
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001400 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +05301401
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001402 if (copy_from_user(&mem, m, size))
1403 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001404 if (mem.padding)
1405 return -EOPNOTSUPP;
Igor Mammedovc9ce42f2015-07-02 15:08:11 +02001406 if (mem.nregions > max_mem_regions)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001407 return -E2BIG;
Matthew Wilcoxb2303d72018-06-07 07:57:18 -07001408 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1409 GFP_KERNEL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001410 if (!newmem)
1411 return -ENOMEM;
1412
1413 memcpy(newmem, &mem, size);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001414 if (copy_from_user(newmem->regions, m->regions,
Gustavo A. R. Silvabf11d712020-07-31 08:09:56 -05001415 flex_array_size(newmem, regions, mem.nregions))) {
Igor Mammedovbcfeaca2015-06-16 18:33:35 +02001416 kvfree(newmem);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001417 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001418 }
1419
Jason Wang0bbe3062020-03-26 22:01:19 +08001420 newumem = iotlb_alloc();
Jason Wanga9709d62016-06-23 02:04:31 -04001421 if (!newumem) {
Igor Mammedov4de72552015-07-01 11:07:09 +02001422 kvfree(newmem);
Jason Wanga9709d62016-06-23 02:04:31 -04001423 return -ENOMEM;
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +09001424 }
Jason Wanga9709d62016-06-23 02:04:31 -04001425
Jason Wanga9709d62016-06-23 02:04:31 -04001426 for (region = newmem->regions;
1427 region < newmem->regions + mem.nregions;
1428 region++) {
Jason Wang0bbe3062020-03-26 22:01:19 +08001429 if (vhost_iotlb_add_range(newumem,
1430 region->guest_phys_addr,
1431 region->guest_phys_addr +
1432 region->memory_size - 1,
1433 region->userspace_addr,
1434 VHOST_MAP_RW))
Jason Wanga9709d62016-06-23 02:04:31 -04001435 goto err;
Jason Wanga9709d62016-06-23 02:04:31 -04001436 }
1437
1438 if (!memory_access_ok(d, newumem, 0))
1439 goto err;
1440
1441 oldumem = d->umem;
1442 d->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001443
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001444 /* All memory accesses are done under some VQ mutex. */
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001445 for (i = 0; i < d->nvqs; ++i) {
1446 mutex_lock(&d->vqs[i]->mutex);
Jason Wanga9709d62016-06-23 02:04:31 -04001447 d->vqs[i]->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001448 mutex_unlock(&d->vqs[i]->mutex);
1449 }
Jason Wanga9709d62016-06-23 02:04:31 -04001450
1451 kvfree(newmem);
Jason Wang0bbe3062020-03-26 22:01:19 +08001452 vhost_iotlb_free(oldumem);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001453 return 0;
Jason Wanga9709d62016-06-23 02:04:31 -04001454
1455err:
Jason Wang0bbe3062020-03-26 22:01:19 +08001456 vhost_iotlb_free(newumem);
Jason Wanga9709d62016-06-23 02:04:31 -04001457 kvfree(newmem);
1458 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001459}
1460
Jason Wangfeebcae2019-05-24 04:12:17 -04001461static long vhost_vring_set_num(struct vhost_dev *d,
1462 struct vhost_virtqueue *vq,
1463 void __user *argp)
1464{
1465 struct vhost_vring_state s;
1466
1467 /* Resizing ring with an active backend?
1468 * You don't want to do that. */
1469 if (vq->private_data)
1470 return -EBUSY;
1471
1472 if (copy_from_user(&s, argp, sizeof s))
1473 return -EFAULT;
1474
1475 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1476 return -EINVAL;
1477 vq->num = s.num;
1478
1479 return 0;
1480}
1481
1482static long vhost_vring_set_addr(struct vhost_dev *d,
1483 struct vhost_virtqueue *vq,
1484 void __user *argp)
1485{
1486 struct vhost_vring_addr a;
1487
1488 if (copy_from_user(&a, argp, sizeof a))
1489 return -EFAULT;
1490 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1491 return -EOPNOTSUPP;
1492
1493 /* For 32bit, verify that the top 32bits of the user
1494 data are set to zero. */
1495 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1496 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1497 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1498 return -EFAULT;
1499
1500 /* Make sure it's safe to cast pointers to vring types. */
1501 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1502 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1503 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1504 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1505 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1506 return -EINVAL;
1507
1508 /* We only verify access here if backend is configured.
1509 * If it is not, we don't as size might not have been setup.
1510 * We will verify when backend is configured. */
1511 if (vq->private_data) {
1512 if (!vq_access_ok(vq, vq->num,
1513 (void __user *)(unsigned long)a.desc_user_addr,
1514 (void __user *)(unsigned long)a.avail_user_addr,
1515 (void __user *)(unsigned long)a.used_user_addr))
1516 return -EINVAL;
1517
1518 /* Also validate log access for used ring if enabled. */
1519 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1520 !log_access_ok(vq->log_base, a.log_guest_addr,
1521 sizeof *vq->used +
1522 vq->num * sizeof *vq->used->ring))
1523 return -EINVAL;
1524 }
1525
1526 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1527 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1528 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1529 vq->log_addr = a.log_guest_addr;
1530 vq->used = (void __user *)(unsigned long)a.used_user_addr;
1531
1532 return 0;
1533}
1534
1535static long vhost_vring_set_num_addr(struct vhost_dev *d,
1536 struct vhost_virtqueue *vq,
1537 unsigned int ioctl,
1538 void __user *argp)
1539{
1540 long r;
1541
1542 mutex_lock(&vq->mutex);
1543
1544 switch (ioctl) {
1545 case VHOST_SET_VRING_NUM:
1546 r = vhost_vring_set_num(d, vq, argp);
1547 break;
1548 case VHOST_SET_VRING_ADDR:
1549 r = vhost_vring_set_addr(d, vq, argp);
1550 break;
1551 default:
1552 BUG();
1553 }
1554
1555 mutex_unlock(&vq->mutex);
1556
1557 return r;
1558}
Sonny Rao26b36602018-03-14 10:05:06 -07001559long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001560{
Al Virocecb46f2012-08-27 14:21:39 -04001561 struct file *eventfp, *filep = NULL;
1562 bool pollstart = false, pollstop = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001563 struct eventfd_ctx *ctx = NULL;
1564 u32 __user *idxp = argp;
1565 struct vhost_virtqueue *vq;
1566 struct vhost_vring_state s;
1567 struct vhost_vring_file f;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001568 u32 idx;
1569 long r;
1570
1571 r = get_user(idx, idxp);
1572 if (r < 0)
1573 return r;
Krishna Kumar0f3d9a12010-05-25 11:10:36 +05301574 if (idx >= d->nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001575 return -ENOBUFS;
1576
Jason Wangff002262018-10-30 14:10:49 +08001577 idx = array_index_nospec(idx, d->nvqs);
Asias He3ab2e422013-04-27 11:16:48 +08001578 vq = d->vqs[idx];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001579
Jason Wangfeebcae2019-05-24 04:12:17 -04001580 if (ioctl == VHOST_SET_VRING_NUM ||
1581 ioctl == VHOST_SET_VRING_ADDR) {
1582 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1583 }
1584
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001585 mutex_lock(&vq->mutex);
1586
1587 switch (ioctl) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001588 case VHOST_SET_VRING_BASE:
1589 /* Moving base with an active backend?
1590 * You don't want to do that. */
1591 if (vq->private_data) {
1592 r = -EBUSY;
1593 break;
1594 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001595 if (copy_from_user(&s, argp, sizeof s)) {
1596 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001597 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001598 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001599 if (s.num > 0xffff) {
1600 r = -EINVAL;
1601 break;
1602 }
Jason Wang8d658432017-07-27 11:22:05 +08001603 vq->last_avail_idx = s.num;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001604 /* Forget the cached index value. */
1605 vq->avail_idx = vq->last_avail_idx;
1606 break;
1607 case VHOST_GET_VRING_BASE:
1608 s.index = idx;
1609 s.num = vq->last_avail_idx;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001610 if (copy_to_user(argp, &s, sizeof s))
1611 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001612 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001613 case VHOST_SET_VRING_KICK:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001614 if (copy_from_user(&f, argp, sizeof f)) {
1615 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001616 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001617 }
Zhu Lingshane0136c12020-06-05 18:27:14 +08001618 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001619 if (IS_ERR(eventfp)) {
1620 r = PTR_ERR(eventfp);
1621 break;
1622 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001623 if (eventfp != vq->kick) {
Al Virocecb46f2012-08-27 14:21:39 -04001624 pollstop = (filep = vq->kick) != NULL;
1625 pollstart = (vq->kick = eventfp) != NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001626 } else
1627 filep = eventfp;
1628 break;
1629 case VHOST_SET_VRING_CALL:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001630 if (copy_from_user(&f, argp, sizeof f)) {
1631 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001632 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001633 }
Zhu Lingshane0136c12020-06-05 18:27:14 +08001634 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
Eric Biggerse050c7d2018-01-06 14:52:19 -08001635 if (IS_ERR(ctx)) {
1636 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001637 break;
1638 }
Zhu Lingshan265a0ad2020-07-31 14:55:28 +08001639
1640 spin_lock(&vq->call_ctx.ctx_lock);
1641 swap(ctx, vq->call_ctx.ctx);
1642 spin_unlock(&vq->call_ctx.ctx_lock);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001643 break;
1644 case VHOST_SET_VRING_ERR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001645 if (copy_from_user(&f, argp, sizeof f)) {
1646 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001647 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001648 }
Zhu Lingshane0136c12020-06-05 18:27:14 +08001649 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
Eric Biggers09f332a2018-01-06 14:52:20 -08001650 if (IS_ERR(ctx)) {
1651 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001652 break;
1653 }
Eric Biggers09f332a2018-01-06 14:52:20 -08001654 swap(ctx, vq->error_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001655 break;
Greg Kurz2751c982015-04-24 14:27:24 +02001656 case VHOST_SET_VRING_ENDIAN:
1657 r = vhost_set_vring_endian(vq, argp);
1658 break;
1659 case VHOST_GET_VRING_ENDIAN:
1660 r = vhost_get_vring_endian(vq, idx, argp);
1661 break;
Jason Wang03088132016-03-04 06:24:53 -05001662 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1663 if (copy_from_user(&s, argp, sizeof(s))) {
1664 r = -EFAULT;
1665 break;
1666 }
1667 vq->busyloop_timeout = s.num;
1668 break;
1669 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1670 s.index = idx;
1671 s.num = vq->busyloop_timeout;
1672 if (copy_to_user(argp, &s, sizeof(s)))
1673 r = -EFAULT;
1674 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001675 default:
1676 r = -ENOIOCTLCMD;
1677 }
1678
1679 if (pollstop && vq->handle_kick)
1680 vhost_poll_stop(&vq->poll);
1681
Eric Biggerse050c7d2018-01-06 14:52:19 -08001682 if (!IS_ERR_OR_NULL(ctx))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001683 eventfd_ctx_put(ctx);
1684 if (filep)
1685 fput(filep);
1686
1687 if (pollstart && vq->handle_kick)
Jason Wang2b8b3282013-01-28 01:05:18 +00001688 r = vhost_poll_start(&vq->poll, vq->kick);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001689
1690 mutex_unlock(&vq->mutex);
1691
1692 if (pollstop && vq->handle_kick)
1693 vhost_poll_flush(&vq->poll);
1694 return r;
1695}
Asias He6ac1afb2013-05-06 16:38:21 +08001696EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001697
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001698int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1699{
Jason Wang0bbe3062020-03-26 22:01:19 +08001700 struct vhost_iotlb *niotlb, *oiotlb;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001701 int i;
1702
Jason Wang0bbe3062020-03-26 22:01:19 +08001703 niotlb = iotlb_alloc();
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001704 if (!niotlb)
1705 return -ENOMEM;
1706
1707 oiotlb = d->iotlb;
1708 d->iotlb = niotlb;
1709
1710 for (i = 0; i < d->nvqs; ++i) {
Jason Wangb13f9c62018-08-08 11:43:04 +08001711 struct vhost_virtqueue *vq = d->vqs[i];
1712
1713 mutex_lock(&vq->mutex);
1714 vq->iotlb = niotlb;
1715 __vhost_vq_meta_reset(vq);
1716 mutex_unlock(&vq->mutex);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001717 }
1718
Jason Wang0bbe3062020-03-26 22:01:19 +08001719 vhost_iotlb_free(oiotlb);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001720
1721 return 0;
1722}
1723EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1724
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001725/* Caller must have device mutex */
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001726long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001727{
Eric Biggersd25cc432018-01-06 14:52:21 -08001728 struct eventfd_ctx *ctx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001729 u64 p;
1730 long r;
1731 int i, fd;
1732
1733 /* If you are not the owner, you can become one */
1734 if (ioctl == VHOST_SET_OWNER) {
1735 r = vhost_dev_set_owner(d);
1736 goto done;
1737 }
1738
1739 /* You must be the owner to do anything else */
1740 r = vhost_dev_check_owner(d);
1741 if (r)
1742 goto done;
1743
1744 switch (ioctl) {
1745 case VHOST_SET_MEM_TABLE:
1746 r = vhost_set_memory(d, argp);
1747 break;
1748 case VHOST_SET_LOG_BASE:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001749 if (copy_from_user(&p, argp, sizeof p)) {
1750 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001751 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001752 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001753 if ((u64)(unsigned long)p != p) {
1754 r = -EFAULT;
1755 break;
1756 }
1757 for (i = 0; i < d->nvqs; ++i) {
1758 struct vhost_virtqueue *vq;
1759 void __user *base = (void __user *)(unsigned long)p;
Asias He3ab2e422013-04-27 11:16:48 +08001760 vq = d->vqs[i];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001761 mutex_lock(&vq->mutex);
1762 /* If ring is inactive, will check when it's enabled. */
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001763 if (vq->private_data && !vq_log_access_ok(vq, base))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001764 r = -EFAULT;
1765 else
1766 vq->log_base = base;
1767 mutex_unlock(&vq->mutex);
1768 }
1769 break;
1770 case VHOST_SET_LOG_FD:
1771 r = get_user(fd, (int __user *)argp);
1772 if (r < 0)
1773 break;
Zhu Lingshane0136c12020-06-05 18:27:14 +08001774 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
Eric Biggersd25cc432018-01-06 14:52:21 -08001775 if (IS_ERR(ctx)) {
1776 r = PTR_ERR(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001777 break;
1778 }
Eric Biggersd25cc432018-01-06 14:52:21 -08001779 swap(ctx, d->log_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001780 for (i = 0; i < d->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +08001781 mutex_lock(&d->vqs[i]->mutex);
1782 d->vqs[i]->log_ctx = d->log_ctx;
1783 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001784 }
1785 if (ctx)
1786 eventfd_ctx_put(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001787 break;
1788 default:
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001789 r = -ENOIOCTLCMD;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001790 break;
1791 }
1792done:
1793 return r;
1794}
Asias He6ac1afb2013-05-06 16:38:21 +08001795EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001796
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001797/* TODO: This is really inefficient. We need something like get_user()
1798 * (instruction directly accesses the data, with an exception table entry
Mauro Carvalho Chehabcb1aaeb2019-06-07 15:54:32 -03001799 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001800 */
1801static int set_bit_to_user(int nr, void __user *addr)
1802{
1803 unsigned long log = (unsigned long)addr;
1804 struct page *page;
1805 void *base;
1806 int bit = nr + (log % PAGE_SIZE) * 8;
1807 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301808
John Hubbard690623e2020-06-07 21:41:15 -07001809 r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001810 if (r < 0)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001811 return r;
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001812 BUG_ON(r != 1);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001813 base = kmap_atomic(page);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001814 set_bit(bit, base);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001815 kunmap_atomic(base);
John Hubbard690623e2020-06-07 21:41:15 -07001816 unpin_user_pages_dirty_lock(&page, 1, true);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001817 return 0;
1818}
1819
1820static int log_write(void __user *log_base,
1821 u64 write_address, u64 write_length)
1822{
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001823 u64 write_page = write_address / VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001824 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301825
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001826 if (!write_length)
1827 return 0;
Michael S. Tsirkin3bf9be42010-11-29 10:19:07 +02001828 write_length += write_address % VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001829 for (;;) {
1830 u64 base = (u64)(unsigned long)log_base;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001831 u64 log = base + write_page / 8;
1832 int bit = write_page % 8;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001833 if ((u64)(unsigned long)log != log)
1834 return -EFAULT;
1835 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1836 if (r < 0)
1837 return r;
1838 if (write_length <= VHOST_PAGE_SIZE)
1839 break;
1840 write_length -= VHOST_PAGE_SIZE;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001841 write_page += 1;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001842 }
1843 return r;
1844}
1845
Jason Wangcc5e7102019-01-16 16:54:42 +08001846static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1847{
Jason Wang0bbe3062020-03-26 22:01:19 +08001848 struct vhost_iotlb *umem = vq->umem;
1849 struct vhost_iotlb_map *u;
Jason Wangcc5e7102019-01-16 16:54:42 +08001850 u64 start, end, l, min;
1851 int r;
1852 bool hit = false;
1853
1854 while (len) {
1855 min = len;
1856 /* More than one GPAs can be mapped into a single HVA. So
1857 * iterate all possible umems here to be safe.
1858 */
Jason Wang0bbe3062020-03-26 22:01:19 +08001859 list_for_each_entry(u, &umem->list, link) {
1860 if (u->addr > hva - 1 + len ||
1861 u->addr - 1 + u->size < hva)
Jason Wangcc5e7102019-01-16 16:54:42 +08001862 continue;
Jason Wang0bbe3062020-03-26 22:01:19 +08001863 start = max(u->addr, hva);
1864 end = min(u->addr - 1 + u->size, hva - 1 + len);
Jason Wangcc5e7102019-01-16 16:54:42 +08001865 l = end - start + 1;
1866 r = log_write(vq->log_base,
Jason Wang0bbe3062020-03-26 22:01:19 +08001867 u->start + start - u->addr,
Jason Wangcc5e7102019-01-16 16:54:42 +08001868 l);
1869 if (r < 0)
1870 return r;
1871 hit = true;
1872 min = min(l, min);
1873 }
1874
1875 if (!hit)
1876 return -EFAULT;
1877
1878 len -= min;
1879 hva += min;
1880 }
1881
1882 return 0;
1883}
1884
1885static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1886{
1887 struct iovec iov[64];
1888 int i, ret;
1889
1890 if (!vq->iotlb)
1891 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1892
1893 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1894 len, iov, 64, VHOST_ACCESS_WO);
Jason Wang816db762019-02-19 14:53:44 +08001895 if (ret < 0)
Jason Wangcc5e7102019-01-16 16:54:42 +08001896 return ret;
1897
1898 for (i = 0; i < ret; i++) {
1899 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1900 iov[i].iov_len);
1901 if (ret)
1902 return ret;
1903 }
1904
1905 return 0;
1906}
1907
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001908int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
Jason Wangcc5e7102019-01-16 16:54:42 +08001909 unsigned int log_num, u64 len, struct iovec *iov, int count)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001910{
1911 int i, r;
1912
1913 /* Make sure data written is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001914 smp_wmb();
Jason Wangcc5e7102019-01-16 16:54:42 +08001915
1916 if (vq->iotlb) {
1917 for (i = 0; i < count; i++) {
1918 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1919 iov[i].iov_len);
1920 if (r < 0)
1921 return r;
1922 }
1923 return 0;
1924 }
1925
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001926 for (i = 0; i < log_num; ++i) {
1927 u64 l = min(log[i].len, len);
1928 r = log_write(vq->log_base, log[i].addr, l);
1929 if (r < 0)
1930 return r;
1931 len -= l;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001932 if (!len) {
1933 if (vq->log_ctx)
1934 eventfd_signal(vq->log_ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001935 return 0;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001936 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001937 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001938 /* Length written exceeds what we have stored. This is a bug. */
1939 BUG();
1940 return 0;
1941}
Asias He6ac1afb2013-05-06 16:38:21 +08001942EXPORT_SYMBOL_GPL(vhost_log_write);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001943
Jason Wang2723fea2011-06-21 18:04:38 +08001944static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1945{
1946 void __user *used;
Jason Wang7b5d7532019-05-24 04:12:14 -04001947 if (vhost_put_used_flags(vq))
Jason Wang2723fea2011-06-21 18:04:38 +08001948 return -EFAULT;
1949 if (unlikely(vq->log_used)) {
1950 /* Make sure the flag is seen before log. */
1951 smp_wmb();
1952 /* Log used flag write. */
1953 used = &vq->used->flags;
Jason Wangcc5e7102019-01-16 16:54:42 +08001954 log_used(vq, (used - (void __user *)vq->used),
1955 sizeof vq->used->flags);
Jason Wang2723fea2011-06-21 18:04:38 +08001956 if (vq->log_ctx)
1957 eventfd_signal(vq->log_ctx, 1);
1958 }
1959 return 0;
1960}
1961
1962static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1963{
Jason Wang7b5d7532019-05-24 04:12:14 -04001964 if (vhost_put_avail_event(vq))
Jason Wang2723fea2011-06-21 18:04:38 +08001965 return -EFAULT;
1966 if (unlikely(vq->log_used)) {
1967 void __user *used;
1968 /* Make sure the event is seen before log. */
1969 smp_wmb();
1970 /* Log avail event write */
1971 used = vhost_avail_event(vq);
Jason Wangcc5e7102019-01-16 16:54:42 +08001972 log_used(vq, (used - (void __user *)vq->used),
1973 sizeof *vhost_avail_event(vq));
Jason Wang2723fea2011-06-21 18:04:38 +08001974 if (vq->log_ctx)
1975 eventfd_signal(vq->log_ctx, 1);
1976 }
1977 return 0;
1978}
1979
Greg Kurz80f7d032016-02-16 15:59:44 +01001980int vhost_vq_init_access(struct vhost_virtqueue *vq)
Jason Wang2723fea2011-06-21 18:04:38 +08001981{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03001982 __virtio16 last_used_idx;
Jason Wang2723fea2011-06-21 18:04:38 +08001983 int r;
Greg Kurze1f33be2016-02-16 15:54:28 +01001984 bool is_le = vq->is_le;
1985
Halil Pasiccda8bba2017-01-30 11:09:36 +01001986 if (!vq->private_data)
Jason Wang2723fea2011-06-21 18:04:38 +08001987 return 0;
Greg Kurz2751c982015-04-24 14:27:24 +02001988
1989 vhost_init_is_le(vq);
Jason Wang2723fea2011-06-21 18:04:38 +08001990
1991 r = vhost_update_used_flags(vq);
1992 if (r)
Greg Kurze1f33be2016-02-16 15:54:28 +01001993 goto err;
Jason Wang2723fea2011-06-21 18:04:38 +08001994 vq->signalled_used_valid = false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001995 if (!vq->iotlb &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001996 !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
Greg Kurze1f33be2016-02-16 15:54:28 +01001997 r = -EFAULT;
1998 goto err;
1999 }
Jason Wang7b5d7532019-05-24 04:12:14 -04002000 r = vhost_get_used_idx(vq, &last_used_idx);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002001 if (r) {
2002 vq_err(vq, "Can't access used idx at %p\n",
2003 &vq->used->idx);
Greg Kurze1f33be2016-02-16 15:54:28 +01002004 goto err;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002005 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002006 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
Michael S. Tsirkin64f7f052014-12-01 17:39:39 +02002007 return 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002008
Greg Kurze1f33be2016-02-16 15:54:28 +01002009err:
2010 vq->is_le = is_le;
2011 return r;
Jason Wang2723fea2011-06-21 18:04:38 +08002012}
Greg Kurz80f7d032016-02-16 15:59:44 +01002013EXPORT_SYMBOL_GPL(vhost_vq_init_access);
Jason Wang2723fea2011-06-21 18:04:38 +08002014
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002015static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002016 struct iovec iov[], int iov_size, int access)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002017{
Jason Wang0bbe3062020-03-26 22:01:19 +08002018 const struct vhost_iotlb_map *map;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002019 struct vhost_dev *dev = vq->dev;
Jason Wang0bbe3062020-03-26 22:01:19 +08002020 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002021 struct iovec *_iov;
2022 u64 s = 0;
2023 int ret = 0;
2024
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002025 while ((u64)len > s) {
2026 u64 size;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002027 if (unlikely(ret >= iov_size)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002028 ret = -ENOBUFS;
2029 break;
2030 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002031
Jason Wang0bbe3062020-03-26 22:01:19 +08002032 map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2033 if (map == NULL || map->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002034 if (umem != dev->iotlb) {
2035 ret = -EFAULT;
2036 break;
2037 }
2038 ret = -EAGAIN;
2039 break;
Jason Wang0bbe3062020-03-26 22:01:19 +08002040 } else if (!(map->perm & access)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002041 ret = -EPERM;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002042 break;
2043 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002044
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002045 _iov = iov + ret;
Jason Wang0bbe3062020-03-26 22:01:19 +08002046 size = map->size - addr + map->start;
Michael S. Tsirkinbd971202012-11-26 05:57:27 +00002047 _iov->iov_len = min((u64)len - s, size);
Michael S. Tsirkin0d4a3f22019-09-14 15:21:51 -04002048 _iov->iov_base = (void __user *)(unsigned long)
Jason Wang0bbe3062020-03-26 22:01:19 +08002049 (map->addr + addr - map->start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002050 s += size;
2051 addr += size;
2052 ++ret;
2053 }
2054
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002055 if (ret == -EAGAIN)
2056 vhost_iotlb_miss(vq, addr, access);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002057 return ret;
2058}
2059
2060/* Each buffer in the virtqueues is actually a chain of descriptors. This
2061 * function returns the next descriptor in the chain,
2062 * or -1U if we're at the end. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002063static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002064{
2065 unsigned int next;
2066
2067 /* If this descriptor says it doesn't chain, we're done. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002068 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002069 return -1U;
2070
2071 /* Check they're not leading us off end of descriptors. */
Paul E. McKenney3a5db0b2017-11-27 09:45:10 -08002072 next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002073 return next;
2074}
2075
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002076static int get_indirect(struct vhost_virtqueue *vq,
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002077 struct iovec iov[], unsigned int iov_size,
2078 unsigned int *out_num, unsigned int *in_num,
2079 struct vhost_log *log, unsigned int *log_num,
2080 struct vring_desc *indirect)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002081{
2082 struct vring_desc desc;
2083 unsigned int i = 0, count, found = 0;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002084 u32 len = vhost32_to_cpu(vq, indirect->len);
Al Viroaad9a1c2014-12-10 14:49:01 -05002085 struct iov_iter from;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002086 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002087
2088 /* Sanity check */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002089 if (unlikely(len % sizeof desc)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002090 vq_err(vq, "Invalid length in indirect descriptor: "
2091 "len 0x%llx not multiple of 0x%zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002092 (unsigned long long)len,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002093 sizeof desc);
2094 return -EINVAL;
2095 }
2096
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002097 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002098 UIO_MAXIOV, VHOST_ACCESS_RO);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002099 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002100 if (ret != -EAGAIN)
2101 vq_err(vq, "Translation failure %d in indirect.\n", ret);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002102 return ret;
2103 }
Al Viroaad9a1c2014-12-10 14:49:01 -05002104 iov_iter_init(&from, READ, vq->indirect, ret, len);
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002105 count = len / sizeof desc;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002106 /* Buffers are chained via a 16 bit next field, so
2107 * we can have at most 2^16 of these. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002108 if (unlikely(count > USHRT_MAX + 1)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002109 vq_err(vq, "Indirect buffer length too big: %d\n",
2110 indirect->len);
2111 return -E2BIG;
2112 }
2113
2114 do {
2115 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002116 if (unlikely(++found > count)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002117 vq_err(vq, "Loop detected: last one at %u "
2118 "indirect size %u\n",
2119 i, count);
2120 return -EINVAL;
2121 }
Al Virocbbd26b2016-11-01 22:09:04 -04002122 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002123 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002124 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002125 return -EINVAL;
2126 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002127 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002128 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002129 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002130 return -EINVAL;
2131 }
2132
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002133 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2134 access = VHOST_ACCESS_WO;
2135 else
2136 access = VHOST_ACCESS_RO;
2137
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002138 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2139 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002140 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002141 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002142 if (ret != -EAGAIN)
2143 vq_err(vq, "Translation failure %d indirect idx %d\n",
2144 ret, i);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002145 return ret;
2146 }
2147 /* If this is an input descriptor, increment that count. */
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002148 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002149 *in_num += ret;
yongduan060423b2019-09-11 17:44:24 +08002150 if (unlikely(log && ret)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002151 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2152 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002153 ++*log_num;
2154 }
2155 } else {
2156 /* If it's an output descriptor, they're all supposed
2157 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002158 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002159 vq_err(vq, "Indirect descriptor "
2160 "has out after in: idx %d\n", i);
2161 return -EINVAL;
2162 }
2163 *out_num += ret;
2164 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002165 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002166 return 0;
2167}
2168
2169/* This looks in the virtqueue and for the first available buffer, and converts
2170 * it to an iovec for convenient access. Since descriptors consist of some
2171 * number of output then some number of input descriptors, it's actually two
2172 * iovecs, but we pack them into one and note how many of each there were.
2173 *
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002174 * This function returns the descriptor number found, or vq->num (which is
2175 * never a valid descriptor number) if none was found. A negative code is
2176 * returned on error. */
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002177int vhost_get_vq_desc(struct vhost_virtqueue *vq,
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002178 struct iovec iov[], unsigned int iov_size,
2179 unsigned int *out_num, unsigned int *in_num,
2180 struct vhost_log *log, unsigned int *log_num)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002181{
2182 struct vring_desc desc;
2183 unsigned int i, head, found = 0;
2184 u16 last_avail_idx;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002185 __virtio16 avail_idx;
2186 __virtio16 ring_head;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002187 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002188
2189 /* Check it isn't doing very strange things with descriptor numbers. */
2190 last_avail_idx = vq->last_avail_idx;
Jason Wange3b56cd2017-02-07 15:49:50 +08002191
2192 if (vq->avail_idx == vq->last_avail_idx) {
Jason Wang7b5d7532019-05-24 04:12:14 -04002193 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
Jason Wange3b56cd2017-02-07 15:49:50 +08002194 vq_err(vq, "Failed to access avail idx at %p\n",
2195 &vq->avail->idx);
2196 return -EFAULT;
2197 }
2198 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2199
2200 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2201 vq_err(vq, "Guest moved used index from %u to %u",
2202 last_avail_idx, vq->avail_idx);
2203 return -EFAULT;
2204 }
2205
2206 /* If there's nothing new since last we looked, return
2207 * invalid.
2208 */
2209 if (vq->avail_idx == last_avail_idx)
2210 return vq->num;
2211
2212 /* Only get avail ring entries after they have been
2213 * exposed by guest.
2214 */
2215 smp_rmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002216 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002217
2218 /* Grab the next descriptor number they're advertising, and increment
2219 * the index we've seen. */
Jason Wang7b5d7532019-05-24 04:12:14 -04002220 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002221 vq_err(vq, "Failed to read head: idx %d address %p\n",
2222 last_avail_idx,
2223 &vq->avail->ring[last_avail_idx % vq->num]);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002224 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002225 }
2226
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002227 head = vhost16_to_cpu(vq, ring_head);
2228
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002229 /* If their number is silly, that's an error. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002230 if (unlikely(head >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002231 vq_err(vq, "Guest says index %u > %u is available",
2232 head, vq->num);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002233 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002234 }
2235
2236 /* When we start there are none of either input nor output. */
2237 *out_num = *in_num = 0;
2238 if (unlikely(log))
2239 *log_num = 0;
2240
2241 i = head;
2242 do {
2243 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002244 if (unlikely(i >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002245 vq_err(vq, "Desc index is %u > %u, head = %u",
2246 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002247 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002248 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002249 if (unlikely(++found > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002250 vq_err(vq, "Loop detected: last one at %u "
2251 "vq size %u head %u\n",
2252 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002253 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002254 }
Jason Wang7b5d7532019-05-24 04:12:14 -04002255 ret = vhost_get_desc(vq, &desc, i);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002256 if (unlikely(ret)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002257 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2258 i, vq->desc + i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002259 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002260 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002261 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002262 ret = get_indirect(vq, iov, iov_size,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002263 out_num, in_num,
2264 log, log_num, &desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002265 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002266 if (ret != -EAGAIN)
2267 vq_err(vq, "Failure detected "
2268 "in indirect descriptor at idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002269 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002270 }
2271 continue;
2272 }
2273
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002274 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2275 access = VHOST_ACCESS_WO;
2276 else
2277 access = VHOST_ACCESS_RO;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002278 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2279 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002280 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002281 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002282 if (ret != -EAGAIN)
2283 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2284 ret, i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002285 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002286 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002287 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002288 /* If this is an input descriptor,
2289 * increment that count. */
2290 *in_num += ret;
yongduan060423b2019-09-11 17:44:24 +08002291 if (unlikely(log && ret)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002292 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2293 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002294 ++*log_num;
2295 }
2296 } else {
2297 /* If it's an output descriptor, they're all supposed
2298 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002299 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002300 vq_err(vq, "Descriptor has out after in: "
2301 "idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002302 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002303 }
2304 *out_num += ret;
2305 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002306 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002307
2308 /* On success, increment avail index. */
2309 vq->last_avail_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002310
2311 /* Assume notifications from guest are disabled at this point,
2312 * if they aren't we would need to update avail_event index. */
2313 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002314 return head;
2315}
Asias He6ac1afb2013-05-06 16:38:21 +08002316EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002317
2318/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
David Stevens8dd014a2010-07-27 18:52:21 +03002319void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002320{
David Stevens8dd014a2010-07-27 18:52:21 +03002321 vq->last_avail_idx -= n;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002322}
Asias He6ac1afb2013-05-06 16:38:21 +08002323EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002324
2325/* After we've used one of their buffers, we tell them about it. We'll then
2326 * want to notify the guest, using eventfd. */
2327int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2328{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002329 struct vring_used_elem heads = {
2330 cpu_to_vhost32(vq, head),
2331 cpu_to_vhost32(vq, len)
2332 };
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002333
Jason Wangc49e4e52013-09-02 16:40:58 +08002334 return vhost_add_used_n(vq, &heads, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002335}
Asias He6ac1afb2013-05-06 16:38:21 +08002336EXPORT_SYMBOL_GPL(vhost_add_used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002337
David Stevens8dd014a2010-07-27 18:52:21 +03002338static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2339 struct vring_used_elem *heads,
2340 unsigned count)
2341{
Michael S. Tsirkina865e422020-04-06 08:42:55 -04002342 vring_used_elem_t __user *used;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002343 u16 old, new;
David Stevens8dd014a2010-07-27 18:52:21 +03002344 int start;
2345
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002346 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002347 used = vq->used->ring + start;
Jason Wang7b5d7532019-05-24 04:12:14 -04002348 if (vhost_put_used(vq, heads, start, count)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002349 vq_err(vq, "Failed to write used");
2350 return -EFAULT;
2351 }
2352 if (unlikely(vq->log_used)) {
2353 /* Make sure data is seen before log. */
2354 smp_wmb();
2355 /* Log used ring entry write. */
Jason Wangcc5e7102019-01-16 16:54:42 +08002356 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2357 count * sizeof *used);
David Stevens8dd014a2010-07-27 18:52:21 +03002358 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002359 old = vq->last_used_idx;
2360 new = (vq->last_used_idx += count);
2361 /* If the driver never bothers to signal in a very long while,
2362 * used index might wrap around. If that happens, invalidate
2363 * signalled_used index we stored. TODO: make sure driver
2364 * signals at least once in 2^16 and remove this. */
2365 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2366 vq->signalled_used_valid = false;
David Stevens8dd014a2010-07-27 18:52:21 +03002367 return 0;
2368}
2369
2370/* After we've used one of their buffers, we tell them about it. We'll then
2371 * want to notify the guest, using eventfd. */
2372int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2373 unsigned count)
2374{
2375 int start, n, r;
2376
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002377 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002378 n = vq->num - start;
2379 if (n < count) {
2380 r = __vhost_add_used_n(vq, heads, n);
2381 if (r < 0)
2382 return r;
2383 heads += n;
2384 count -= n;
2385 }
2386 r = __vhost_add_used_n(vq, heads, count);
2387
2388 /* Make sure buffer is written before we update index. */
2389 smp_wmb();
Jason Wang7b5d7532019-05-24 04:12:14 -04002390 if (vhost_put_used_idx(vq)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002391 vq_err(vq, "Failed to increment used idx");
2392 return -EFAULT;
2393 }
2394 if (unlikely(vq->log_used)) {
Jason Wang841df922018-12-13 10:53:37 +08002395 /* Make sure used idx is seen before log. */
2396 smp_wmb();
David Stevens8dd014a2010-07-27 18:52:21 +03002397 /* Log used index update. */
Jason Wangcc5e7102019-01-16 16:54:42 +08002398 log_used(vq, offsetof(struct vring_used, idx),
2399 sizeof vq->used->idx);
David Stevens8dd014a2010-07-27 18:52:21 +03002400 if (vq->log_ctx)
2401 eventfd_signal(vq->log_ctx, 1);
2402 }
2403 return r;
2404}
Asias He6ac1afb2013-05-06 16:38:21 +08002405EXPORT_SYMBOL_GPL(vhost_add_used_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002406
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002407static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002408{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002409 __u16 old, new;
2410 __virtio16 event;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002411 bool v;
Jason Wang8d658432017-07-27 11:22:05 +08002412 /* Flush out used index updates. This is paired
2413 * with the barrier that the Guest executes when enabling
2414 * interrupts. */
2415 smp_mb();
Michael S. Tsirkin0d499352010-05-11 19:44:17 +03002416
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002417 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002418 unlikely(vq->avail_idx == vq->last_avail_idx))
2419 return true;
2420
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002421 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002422 __virtio16 flags;
Jason Wang7b5d7532019-05-24 04:12:14 -04002423 if (vhost_get_avail_flags(vq, &flags)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002424 vq_err(vq, "Failed to get flags");
2425 return true;
2426 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002427 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002428 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002429 old = vq->signalled_used;
2430 v = vq->signalled_used_valid;
2431 new = vq->signalled_used = vq->last_used_idx;
2432 vq->signalled_used_valid = true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002433
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002434 if (unlikely(!v))
2435 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002436
Jason Wang7b5d7532019-05-24 04:12:14 -04002437 if (vhost_get_used_event(vq, &event)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002438 vq_err(vq, "Failed to get used event idx");
2439 return true;
2440 }
Jason Wang8d658432017-07-27 11:22:05 +08002441 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002442}
2443
2444/* This actually signals the guest, using eventfd. */
2445void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2446{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002447 /* Signal the Guest tell them we used something up. */
Zhu Lingshan265a0ad2020-07-31 14:55:28 +08002448 if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2449 eventfd_signal(vq->call_ctx.ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002450}
Asias He6ac1afb2013-05-06 16:38:21 +08002451EXPORT_SYMBOL_GPL(vhost_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002452
2453/* And here's the combo meal deal. Supersize me! */
2454void vhost_add_used_and_signal(struct vhost_dev *dev,
2455 struct vhost_virtqueue *vq,
2456 unsigned int head, int len)
2457{
2458 vhost_add_used(vq, head, len);
2459 vhost_signal(dev, vq);
2460}
Asias He6ac1afb2013-05-06 16:38:21 +08002461EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002462
David Stevens8dd014a2010-07-27 18:52:21 +03002463/* multi-buffer version of vhost_add_used_and_signal */
2464void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2465 struct vhost_virtqueue *vq,
2466 struct vring_used_elem *heads, unsigned count)
2467{
2468 vhost_add_used_n(vq, heads, count);
2469 vhost_signal(dev, vq);
2470}
Asias He6ac1afb2013-05-06 16:38:21 +08002471EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002472
Jason Wangd4a60602016-03-04 06:24:52 -05002473/* return true if we're sure that avaiable ring is empty */
2474bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2475{
2476 __virtio16 avail_idx;
2477 int r;
2478
Jason Wang275bf962017-01-18 15:02:01 +08002479 if (vq->avail_idx != vq->last_avail_idx)
Jason Wangd4a60602016-03-04 06:24:52 -05002480 return false;
2481
Jason Wang7b5d7532019-05-24 04:12:14 -04002482 r = vhost_get_avail_idx(vq, &avail_idx);
Jason Wang275bf962017-01-18 15:02:01 +08002483 if (unlikely(r))
2484 return false;
2485 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2486
2487 return vq->avail_idx == vq->last_avail_idx;
Jason Wangd4a60602016-03-04 06:24:52 -05002488}
2489EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2490
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002491/* OK, now we need to know about added descriptors. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002492bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002493{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002494 __virtio16 avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002495 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302496
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002497 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2498 return false;
2499 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002500 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002501 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002502 if (r) {
2503 vq_err(vq, "Failed to enable notification at %p: %d\n",
2504 &vq->used->flags, r);
2505 return false;
2506 }
2507 } else {
Jason Wang2723fea2011-06-21 18:04:38 +08002508 r = vhost_update_avail_event(vq, vq->avail_idx);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002509 if (r) {
2510 vq_err(vq, "Failed to update avail event index at %p: %d\n",
2511 vhost_avail_event(vq), r);
2512 return false;
2513 }
2514 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002515 /* They could have slipped one in as we were doing that: make
2516 * sure it's written, then check again. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00002517 smp_mb();
Jason Wang7b5d7532019-05-24 04:12:14 -04002518 r = vhost_get_avail_idx(vq, &avail_idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002519 if (r) {
2520 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2521 &vq->avail->idx, r);
2522 return false;
2523 }
2524
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002525 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002526}
Asias He6ac1afb2013-05-06 16:38:21 +08002527EXPORT_SYMBOL_GPL(vhost_enable_notify);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002528
2529/* We don't need to be notified again. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002530void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002531{
2532 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302533
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002534 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2535 return;
2536 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002537 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002538 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002539 if (r)
2540 vq_err(vq, "Failed to enable notification at %p: %d\n",
2541 &vq->used->flags, r);
2542 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002543}
Asias He6ac1afb2013-05-06 16:38:21 +08002544EXPORT_SYMBOL_GPL(vhost_disable_notify);
2545
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002546/* Create a new message. */
2547struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2548{
2549 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2550 if (!node)
2551 return NULL;
Michael S. Tsirkin670ae9c2018-05-12 00:33:10 +03002552
2553 /* Make sure all padding within the structure is initialized. */
2554 memset(&node->msg, 0, sizeof node->msg);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002555 node->vq = vq;
2556 node->msg.type = type;
2557 return node;
2558}
2559EXPORT_SYMBOL_GPL(vhost_new_msg);
2560
2561void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2562 struct vhost_msg_node *node)
2563{
2564 spin_lock(&dev->iotlb_lock);
2565 list_add_tail(&node->node, head);
2566 spin_unlock(&dev->iotlb_lock);
2567
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002568 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002569}
2570EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2571
2572struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2573 struct list_head *head)
2574{
2575 struct vhost_msg_node *node = NULL;
2576
2577 spin_lock(&dev->iotlb_lock);
2578 if (!list_empty(head)) {
2579 node = list_first_entry(head, struct vhost_msg_node,
2580 node);
2581 list_del(&node->node);
2582 }
2583 spin_unlock(&dev->iotlb_lock);
2584
2585 return node;
2586}
2587EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2588
Jason Wang460f7ce2020-08-04 19:20:38 +03002589void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2590{
2591 struct vhost_virtqueue *vq;
2592 int i;
2593
2594 mutex_lock(&dev->mutex);
2595 for (i = 0; i < dev->nvqs; ++i) {
2596 vq = dev->vqs[i];
2597 mutex_lock(&vq->mutex);
2598 vq->acked_backend_features = features;
2599 mutex_unlock(&vq->mutex);
2600 }
2601 mutex_unlock(&dev->mutex);
2602}
2603EXPORT_SYMBOL_GPL(vhost_set_backend_features);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002604
Asias He6ac1afb2013-05-06 16:38:21 +08002605static int __init vhost_init(void)
2606{
2607 return 0;
2608}
2609
2610static void __exit vhost_exit(void)
2611{
2612}
2613
2614module_init(vhost_init);
2615module_exit(vhost_exit);
2616
2617MODULE_VERSION("0.0.1");
2618MODULE_LICENSE("GPL v2");
2619MODULE_AUTHOR("Michael S. Tsirkin");
2620MODULE_DESCRIPTION("Host kernel accelerator for virtio");