blob: 59edb5a1ffe28a934574c33cef5f861467b02970 [file] [log] [blame]
Thomas Gleixner7a338472019-06-04 10:11:15 +02001// SPDX-License-Identifier: GPL-2.0-only
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002/* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 *
5 * Author: Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Inspiration, some code, and most witty comments come from
Rob Landley61516582011-05-06 09:27:36 -07008 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00009 *
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000010 * Generic code for virtio server in host kernel.
11 */
12
13#include <linux/eventfd.h>
14#include <linux/vhost.h>
Asias He35596b22013-08-19 09:23:19 +080015#include <linux/uio.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000016#include <linux/mm.h>
17#include <linux/miscdevice.h>
18#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000019#include <linux/poll.h>
20#include <linux/file.h>
21#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Igor Mammedov4de72552015-07-01 11:07:09 +020023#include <linux/vmalloc.h>
Tejun Heoc23f34452010-06-02 20:40:00 +020024#include <linux/kthread.h>
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +030025#include <linux/cgroup.h>
Asias He6ac1afb2013-05-06 16:38:21 +080026#include <linux/module.h>
Igor Mammedovbcfeaca2015-06-16 18:33:35 +020027#include <linux/sort.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010028#include <linux/sched/mm.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010029#include <linux/sched/signal.h>
Jason Wanga9709d62016-06-23 02:04:31 -040030#include <linux/interval_tree_generic.h>
Jason Wangff002262018-10-30 14:10:49 +080031#include <linux/nospec.h>
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -080032#include <linux/kcov.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000033
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000034#include "vhost.h"
35
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020036static ushort max_mem_regions = 64;
37module_param(max_mem_regions, ushort, 0444);
38MODULE_PARM_DESC(max_mem_regions,
39 "Maximum number of memory regions in memory map. (default: 64)");
Jason Wang6b1e6cc2016-06-23 02:04:32 -040040static int max_iotlb_entries = 2048;
41module_param(max_iotlb_entries, int, 0444);
42MODULE_PARM_DESC(max_iotlb_entries,
43 "Maximum number of iotlb entries. (default: 2048)");
Igor Mammedovc9ce42f2015-07-02 15:08:11 +020044
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000045enum {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000046 VHOST_MEMORY_F_LOG = 0x1,
47};
48
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +030049#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030051
Greg Kurz2751c982015-04-24 14:27:24 +020052#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
Greg Kurzc5072032016-02-16 15:59:34 +010053static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +020054{
55 vq->user_be = !virtio_legacy_is_little_endian();
56}
57
Greg Kurzc5072032016-02-16 15:59:34 +010058static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
59{
60 vq->user_be = true;
61}
62
63static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
64{
65 vq->user_be = false;
66}
67
Greg Kurz2751c982015-04-24 14:27:24 +020068static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
69{
70 struct vhost_vring_state s;
71
72 if (vq->private_data)
73 return -EBUSY;
74
75 if (copy_from_user(&s, argp, sizeof(s)))
76 return -EFAULT;
77
78 if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
79 s.num != VHOST_VRING_BIG_ENDIAN)
80 return -EINVAL;
81
Greg Kurzc5072032016-02-16 15:59:34 +010082 if (s.num == VHOST_VRING_BIG_ENDIAN)
83 vhost_enable_cross_endian_big(vq);
84 else
85 vhost_enable_cross_endian_little(vq);
Greg Kurz2751c982015-04-24 14:27:24 +020086
87 return 0;
88}
89
90static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
91 int __user *argp)
92{
93 struct vhost_vring_state s = {
94 .index = idx,
95 .num = vq->user_be
96 };
97
98 if (copy_to_user(argp, &s, sizeof(s)))
99 return -EFAULT;
100
101 return 0;
102}
103
104static void vhost_init_is_le(struct vhost_virtqueue *vq)
105{
106 /* Note for legacy virtio: user_be is initialized at reset time
107 * according to the host endianness. If userspace does not set an
108 * explicit endianness, the default behavior is native endian, as
109 * expected by legacy virtio.
110 */
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
112}
113#else
Greg Kurzc5072032016-02-16 15:59:34 +0100114static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
Greg Kurz2751c982015-04-24 14:27:24 +0200115{
116}
117
118static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
119{
120 return -ENOIOCTLCMD;
121}
122
123static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
124 int __user *argp)
125{
126 return -ENOIOCTLCMD;
127}
128
129static void vhost_init_is_le(struct vhost_virtqueue *vq)
130{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
132 || virtio_legacy_is_little_endian();
Greg Kurz2751c982015-04-24 14:27:24 +0200133}
134#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
135
Greg Kurzc5072032016-02-16 15:59:34 +0100136static void vhost_reset_is_le(struct vhost_virtqueue *vq)
137{
Halil Pasiccda8bba2017-01-30 11:09:36 +0100138 vhost_init_is_le(vq);
Greg Kurzc5072032016-02-16 15:59:34 +0100139}
140
Jason Wang7235acd2016-04-25 22:14:32 -0400141struct vhost_flush_struct {
142 struct vhost_work work;
143 struct completion wait_event;
144};
145
146static void vhost_flush_work(struct vhost_work *work)
147{
148 struct vhost_flush_struct *s;
149
150 s = container_of(work, struct vhost_flush_struct, work);
151 complete(&s->wait_event);
152}
153
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000154static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
155 poll_table *pt)
156{
157 struct vhost_poll *poll;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000158
Krishna Kumard47effe2011-03-01 17:06:37 +0530159 poll = container_of(pt, struct vhost_poll, table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000160 poll->wqh = wqh;
161 add_wait_queue(wqh, &poll->wait);
162}
163
Ingo Molnarac6424b2017-06-20 12:06:13 +0200164static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000165 void *key)
166{
Tejun Heoc23f34452010-06-02 20:40:00 +0200167 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
Jason Wang01fcb1c2020-05-29 16:02:58 +0800168 struct vhost_work *work = &poll->work;
Tejun Heoc23f34452010-06-02 20:40:00 +0200169
Al Viro3ad6f932017-07-03 20:14:56 -0400170 if (!(key_to_poll(key) & poll->mask))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000171 return 0;
172
Jason Wang01fcb1c2020-05-29 16:02:58 +0800173 if (!poll->dev->use_worker)
174 work->fn(work);
175 else
176 vhost_poll_queue(poll);
177
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000178 return 0;
179}
180
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000181void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000182{
Jason Wang04b96e52016-04-25 22:14:33 -0400183 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200184 work->fn = fn;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000185}
Asias He6ac1afb2013-05-06 16:38:21 +0800186EXPORT_SYMBOL_GPL(vhost_work_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000187
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300188/* Init poll structure */
189void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
Al Viro58e3b602017-07-03 23:50:40 -0400190 __poll_t mask, struct vhost_dev *dev)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300191{
192 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193 init_poll_funcptr(&poll->table, vhost_poll_func);
194 poll->mask = mask;
195 poll->dev = dev;
Jason Wang2b8b3282013-01-28 01:05:18 +0000196 poll->wqh = NULL;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300197
198 vhost_work_init(&poll->work, fn);
199}
Asias He6ac1afb2013-05-06 16:38:21 +0800200EXPORT_SYMBOL_GPL(vhost_poll_init);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300201
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000202/* Start polling a file. We add ourselves to file's wait queue. The caller must
203 * keep a reference to a file until after vhost_poll_stop is called. */
Jason Wang2b8b3282013-01-28 01:05:18 +0000204int vhost_poll_start(struct vhost_poll *poll, struct file *file)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000205{
Al Viroe6c8adc2017-07-03 22:25:56 -0400206 __poll_t mask;
Krishna Kumard47effe2011-03-01 17:06:37 +0530207
Jason Wang70181d512013-04-10 20:50:48 +0000208 if (poll->wqh)
209 return 0;
210
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800211 mask = vfs_poll(file, &poll->table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000212 if (mask)
Al Viro3ad6f932017-07-03 20:14:56 -0400213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800214 if (mask & EPOLLERR) {
Jason Wangdc6455a2018-03-27 20:50:52 +0800215 vhost_poll_stop(poll);
Yunsheng Lin896fc242019-08-20 20:36:32 +0800216 return -EINVAL;
Jason Wang2b8b3282013-01-28 01:05:18 +0000217 }
218
Yunsheng Lin896fc242019-08-20 20:36:32 +0800219 return 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000220}
Asias He6ac1afb2013-05-06 16:38:21 +0800221EXPORT_SYMBOL_GPL(vhost_poll_start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000222
223/* Stop polling a file. After this function returns, it becomes safe to drop the
224 * file reference. You must also flush afterwards. */
225void vhost_poll_stop(struct vhost_poll *poll)
226{
Jason Wang2b8b3282013-01-28 01:05:18 +0000227 if (poll->wqh) {
228 remove_wait_queue(poll->wqh, &poll->wait);
229 poll->wqh = NULL;
230 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000231}
Asias He6ac1afb2013-05-06 16:38:21 +0800232EXPORT_SYMBOL_GPL(vhost_poll_stop);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000233
Mike Christie1465cb62021-05-25 12:47:29 -0500234void vhost_work_dev_flush(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000235{
Jason Wang7235acd2016-04-25 22:14:32 -0400236 struct vhost_flush_struct flush;
Tejun Heoc23f34452010-06-02 20:40:00 +0200237
Jason Wang7235acd2016-04-25 22:14:32 -0400238 if (dev->worker) {
239 init_completion(&flush.wait_event);
240 vhost_work_init(&flush.work, vhost_flush_work);
241
242 vhost_work_queue(dev, &flush.work);
243 wait_for_completion(&flush.wait_event);
244 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000245}
Mike Christie1465cb62021-05-25 12:47:29 -0500246EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000247
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300248/* Flush any work that has been scheduled. When calling this, don't hold any
249 * locks that are also used by the callback. */
250void vhost_poll_flush(struct vhost_poll *poll)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000251{
Mike Christie1465cb62021-05-25 12:47:29 -0500252 vhost_work_dev_flush(poll->dev);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300253}
Asias He6ac1afb2013-05-06 16:38:21 +0800254EXPORT_SYMBOL_GPL(vhost_poll_flush);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300255
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000256void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300257{
Jason Wang04b96e52016-04-25 22:14:33 -0400258 if (!dev->worker)
259 return;
Tejun Heoc23f34452010-06-02 20:40:00 +0200260
Jason Wang04b96e52016-04-25 22:14:33 -0400261 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262 /* We can only add the work to the list after we're
263 * sure it was not in the list.
Peng Tao635abf02016-12-07 17:52:19 +0800264 * test_and_set_bit() implies a memory barrier.
Jason Wang04b96e52016-04-25 22:14:33 -0400265 */
Jason Wang04b96e52016-04-25 22:14:33 -0400266 llist_add(&work->node, &dev->work_list);
Tejun Heoc23f34452010-06-02 20:40:00 +0200267 wake_up_process(dev->worker);
268 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000269}
Asias He6ac1afb2013-05-06 16:38:21 +0800270EXPORT_SYMBOL_GPL(vhost_work_queue);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000271
Jason Wang526d3e72016-03-04 06:24:51 -0500272/* A lockless hint for busy polling code to exit the loop */
273bool vhost_has_work(struct vhost_dev *dev)
274{
Jason Wang04b96e52016-04-25 22:14:33 -0400275 return !llist_empty(&dev->work_list);
Jason Wang526d3e72016-03-04 06:24:51 -0500276}
277EXPORT_SYMBOL_GPL(vhost_has_work);
278
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300279void vhost_poll_queue(struct vhost_poll *poll)
280{
281 vhost_work_queue(poll->dev, &poll->work);
282}
Asias He6ac1afb2013-05-06 16:38:21 +0800283EXPORT_SYMBOL_GPL(vhost_poll_queue);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300284
Jason Wangf8894912017-02-28 17:56:02 +0800285static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286{
287 int j;
288
289 for (j = 0; j < VHOST_NUM_ADDRS; j++)
290 vq->meta_iotlb[j] = NULL;
291}
292
293static void vhost_vq_meta_reset(struct vhost_dev *d)
294{
295 int i;
296
Jason Wang86a07da2018-12-13 10:53:39 +0800297 for (i = 0; i < d->nvqs; ++i)
Jason Wangf8894912017-02-28 17:56:02 +0800298 __vhost_vq_meta_reset(d->vqs[i]);
299}
300
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800301static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
302{
303 call_ctx->ctx = NULL;
304 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800305}
306
Mike Christie6bcf3422020-11-09 23:33:19 -0600307bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
308{
309 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
310}
311EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
312
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000313static void vhost_vq_reset(struct vhost_dev *dev,
314 struct vhost_virtqueue *vq)
315{
316 vq->num = 1;
317 vq->desc = NULL;
318 vq->avail = NULL;
319 vq->used = NULL;
320 vq->last_avail_idx = 0;
321 vq->avail_idx = 0;
322 vq->last_used_idx = 0;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300323 vq->signalled_used = 0;
324 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000325 vq->used_flags = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000326 vq->log_used = false;
327 vq->log_addr = -1ull;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000328 vq->private_data = NULL;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300329 vq->acked_features = 0;
Jason Wang429711a2018-08-06 11:17:47 +0800330 vq->acked_backend_features = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000331 vq->log_base = NULL;
332 vq->error_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000333 vq->kick = NULL;
Michael S. Tsirkin73a99f02010-02-23 11:23:45 +0200334 vq->log_ctx = NULL;
Greg Kurzc5072032016-02-16 15:59:34 +0100335 vhost_disable_cross_endian(vq);
Laurent Vivierbeb691e2021-03-12 15:09:13 +0100336 vhost_reset_is_le(vq);
Jason Wang03088132016-03-04 06:24:53 -0500337 vq->busyloop_timeout = 0;
Jason Wanga9709d62016-06-23 02:04:31 -0400338 vq->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400339 vq->iotlb = NULL;
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800340 vhost_vring_call_reset(&vq->call_ctx);
Jason Wangf8894912017-02-28 17:56:02 +0800341 __vhost_vq_meta_reset(vq);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000342}
343
Tejun Heoc23f34452010-06-02 20:40:00 +0200344static int vhost_worker(void *data)
345{
346 struct vhost_dev *dev = data;
Jason Wang04b96e52016-04-25 22:14:33 -0400347 struct vhost_work *work, *work_next;
348 struct llist_node *node;
Tejun Heoc23f34452010-06-02 20:40:00 +0200349
Christoph Hellwigf5678e72020-06-10 18:42:06 -0700350 kthread_use_mm(dev->mm);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200351
Tejun Heoc23f34452010-06-02 20:40:00 +0200352 for (;;) {
353 /* mb paired w/ kthread_stop */
354 set_current_state(TASK_INTERRUPTIBLE);
355
Tejun Heoc23f34452010-06-02 20:40:00 +0200356 if (kthread_should_stop()) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200357 __set_current_state(TASK_RUNNING);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200358 break;
Tejun Heoc23f34452010-06-02 20:40:00 +0200359 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200360
Jason Wang04b96e52016-04-25 22:14:33 -0400361 node = llist_del_all(&dev->work_list);
362 if (!node)
363 schedule();
364
365 node = llist_reverse_order(node);
366 /* make sure flag is seen after deletion */
367 smp_wmb();
368 llist_for_each_entry_safe(work, work_next, node, node) {
369 clear_bit(VHOST_WORK_QUEUED, &work->flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200370 __set_current_state(TASK_RUNNING);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800371 kcov_remote_start_common(dev->kcov_handle);
Tejun Heoc23f34452010-06-02 20:40:00 +0200372 work->fn(work);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800373 kcov_remote_stop();
Nadav Har'Eld550dda2012-02-27 15:07:29 +0200374 if (need_resched())
375 schedule();
Jason Wang04b96e52016-04-25 22:14:33 -0400376 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200377 }
Christoph Hellwigf5678e72020-06-10 18:42:06 -0700378 kthread_unuse_mm(dev->mm);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200379 return 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200380}
381
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000382static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
383{
384 kfree(vq->indirect);
385 vq->indirect = NULL;
386 kfree(vq->log);
387 vq->log = NULL;
388 kfree(vq->heads);
389 vq->heads = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000390}
391
Jason Wange0e9b402010-09-14 23:53:05 +0800392/* Helper to allocate iovec buffers for all vqs. */
393static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
394{
Asias He6d5e6aa2013-05-06 16:38:23 +0800395 struct vhost_virtqueue *vq;
Jason Wange0e9b402010-09-14 23:53:05 +0800396 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530397
Jason Wange0e9b402010-09-14 23:53:05 +0800398 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800399 vq = dev->vqs[i];
Kees Cook6da2ec52018-06-12 13:55:00 -0700400 vq->indirect = kmalloc_array(UIO_MAXIOV,
401 sizeof(*vq->indirect),
402 GFP_KERNEL);
Jason Wangb46a0bf2019-01-28 15:05:05 +0800403 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
Kees Cook6da2ec52018-06-12 13:55:00 -0700404 GFP_KERNEL);
Jason Wangb46a0bf2019-01-28 15:05:05 +0800405 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
Kees Cook6da2ec52018-06-12 13:55:00 -0700406 GFP_KERNEL);
Asias He6d5e6aa2013-05-06 16:38:23 +0800407 if (!vq->indirect || !vq->log || !vq->heads)
Jason Wange0e9b402010-09-14 23:53:05 +0800408 goto err_nomem;
409 }
410 return 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530411
Jason Wange0e9b402010-09-14 23:53:05 +0800412err_nomem:
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000413 for (; i >= 0; --i)
Asias He3ab2e422013-04-27 11:16:48 +0800414 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800415 return -ENOMEM;
416}
417
418static void vhost_dev_free_iovecs(struct vhost_dev *dev)
419{
420 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530421
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000422 for (i = 0; i < dev->nvqs; ++i)
Asias He3ab2e422013-04-27 11:16:48 +0800423 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800424}
425
Jason Wange82b9b02019-05-17 00:29:49 -0400426bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
427 int pkts, int total_len)
428{
429 struct vhost_dev *dev = vq->dev;
430
431 if ((dev->byte_weight && total_len >= dev->byte_weight) ||
432 pkts >= dev->weight) {
433 vhost_poll_queue(&vq->poll);
434 return true;
435 }
436
437 return false;
438}
439EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
440
Jason Wang4942e822019-05-24 04:12:16 -0400441static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
442 unsigned int num)
443{
444 size_t event __maybe_unused =
445 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
446
447 return sizeof(*vq->avail) +
448 sizeof(*vq->avail->ring) * num + event;
449}
450
451static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
452 unsigned int num)
453{
454 size_t event __maybe_unused =
455 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
456
457 return sizeof(*vq->used) +
458 sizeof(*vq->used->ring) * num + event;
459}
460
461static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
462 unsigned int num)
463{
464 return sizeof(*vq->desc) * num;
465}
466
Zhi Yong Wu59566b6e2013-12-07 04:13:03 +0800467void vhost_dev_init(struct vhost_dev *dev,
Jason Wange82b9b02019-05-17 00:29:49 -0400468 struct vhost_virtqueue **vqs, int nvqs,
Jason Wang792a4f22020-03-26 22:01:18 +0800469 int iov_limit, int weight, int byte_weight,
Jason Wang01fcb1c2020-05-29 16:02:58 +0800470 bool use_worker,
Jason Wang792a4f22020-03-26 22:01:18 +0800471 int (*msg_handler)(struct vhost_dev *dev,
472 struct vhost_iotlb_msg *msg))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000473{
Asias He6d5e6aa2013-05-06 16:38:23 +0800474 struct vhost_virtqueue *vq;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000475 int i;
Tejun Heoc23f34452010-06-02 20:40:00 +0200476
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000477 dev->vqs = vqs;
478 dev->nvqs = nvqs;
479 mutex_init(&dev->mutex);
480 dev->log_ctx = NULL;
Jason Wanga9709d62016-06-23 02:04:31 -0400481 dev->umem = NULL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400482 dev->iotlb = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000483 dev->mm = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200484 dev->worker = NULL;
Jason Wangb46a0bf2019-01-28 15:05:05 +0800485 dev->iov_limit = iov_limit;
Jason Wange82b9b02019-05-17 00:29:49 -0400486 dev->weight = weight;
487 dev->byte_weight = byte_weight;
Jason Wang01fcb1c2020-05-29 16:02:58 +0800488 dev->use_worker = use_worker;
Jason Wang792a4f22020-03-26 22:01:18 +0800489 dev->msg_handler = msg_handler;
Jason Wang04b96e52016-04-25 22:14:33 -0400490 init_llist_head(&dev->work_list);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400491 init_waitqueue_head(&dev->wait);
492 INIT_LIST_HEAD(&dev->read_list);
493 INIT_LIST_HEAD(&dev->pending_list);
494 spin_lock_init(&dev->iotlb_lock);
Michael S. Tsirkin3d2c7d32019-08-10 13:53:21 -0400495
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000496
497 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800498 vq = dev->vqs[i];
499 vq->log = NULL;
500 vq->indirect = NULL;
501 vq->heads = NULL;
502 vq->dev = dev;
503 mutex_init(&vq->mutex);
504 vhost_vq_reset(dev, vq);
505 if (vq->handle_kick)
506 vhost_poll_init(&vq->poll, vq->handle_kick,
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800507 EPOLLIN, dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000508 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000509}
Asias He6ac1afb2013-05-06 16:38:21 +0800510EXPORT_SYMBOL_GPL(vhost_dev_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000511
512/* Caller should have device mutex */
513long vhost_dev_check_owner(struct vhost_dev *dev)
514{
515 /* Are you the owner? If not, I don't think you mean to do that */
516 return dev->mm == current->mm ? 0 : -EPERM;
517}
Asias He6ac1afb2013-05-06 16:38:21 +0800518EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000519
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300520struct vhost_attach_cgroups_struct {
Krishna Kumard47effe2011-03-01 17:06:37 +0530521 struct vhost_work work;
522 struct task_struct *owner;
523 int ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300524};
525
526static void vhost_attach_cgroups_work(struct vhost_work *work)
527{
Krishna Kumard47effe2011-03-01 17:06:37 +0530528 struct vhost_attach_cgroups_struct *s;
529
530 s = container_of(work, struct vhost_attach_cgroups_struct, work);
531 s->ret = cgroup_attach_task_all(s->owner, current);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300532}
533
534static int vhost_attach_cgroups(struct vhost_dev *dev)
535{
Krishna Kumard47effe2011-03-01 17:06:37 +0530536 struct vhost_attach_cgroups_struct attach;
537
538 attach.owner = current;
539 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
540 vhost_work_queue(dev, &attach.work);
Mike Christie1465cb62021-05-25 12:47:29 -0500541 vhost_work_dev_flush(dev);
Krishna Kumard47effe2011-03-01 17:06:37 +0530542 return attach.ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300543}
544
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000545/* Caller should have device mutex */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300546bool vhost_dev_has_owner(struct vhost_dev *dev)
547{
548 return dev->mm;
549}
Asias He6ac1afb2013-05-06 16:38:21 +0800550EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300551
Jason Wang5ce995f2020-05-29 16:02:59 +0800552static void vhost_attach_mm(struct vhost_dev *dev)
553{
554 /* No owner, become one */
555 if (dev->use_worker) {
556 dev->mm = get_task_mm(current);
557 } else {
558 /* vDPA device does not use worker thead, so there's
559 * no need to hold the address space for mm. This help
560 * to avoid deadlock in the case of mmap() which may
561 * held the refcnt of the file and depends on release
562 * method to remove vma.
563 */
564 dev->mm = current->mm;
565 mmgrab(dev->mm);
566 }
567}
568
569static void vhost_detach_mm(struct vhost_dev *dev)
570{
571 if (!dev->mm)
572 return;
573
574 if (dev->use_worker)
575 mmput(dev->mm);
576 else
577 mmdrop(dev->mm);
578
579 dev->mm = NULL;
580}
581
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300582/* Caller should have device mutex */
Asias He54db63c2013-05-06 11:15:59 +0800583long vhost_dev_set_owner(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000584{
Tejun Heoc23f34452010-06-02 20:40:00 +0200585 struct task_struct *worker;
586 int err;
Krishna Kumard47effe2011-03-01 17:06:37 +0530587
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000588 /* Is there an owner already? */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300589 if (vhost_dev_has_owner(dev)) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200590 err = -EBUSY;
591 goto err_mm;
592 }
Krishna Kumard47effe2011-03-01 17:06:37 +0530593
Jason Wang5ce995f2020-05-29 16:02:59 +0800594 vhost_attach_mm(dev);
595
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800596 dev->kcov_handle = kcov_common_handle();
Jason Wang01fcb1c2020-05-29 16:02:58 +0800597 if (dev->use_worker) {
598 worker = kthread_create(vhost_worker, dev,
599 "vhost-%d", current->pid);
600 if (IS_ERR(worker)) {
601 err = PTR_ERR(worker);
602 goto err_worker;
603 }
604
605 dev->worker = worker;
606 wake_up_process(worker); /* avoid contributing to loadavg */
607
608 err = vhost_attach_cgroups(dev);
609 if (err)
610 goto err_cgroup;
Tejun Heoc23f34452010-06-02 20:40:00 +0200611 }
612
Jason Wange0e9b402010-09-14 23:53:05 +0800613 err = vhost_dev_alloc_iovecs(dev);
614 if (err)
615 goto err_cgroup;
616
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000617 return 0;
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300618err_cgroup:
Jason Wang01fcb1c2020-05-29 16:02:58 +0800619 if (dev->worker) {
620 kthread_stop(dev->worker);
621 dev->worker = NULL;
622 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200623err_worker:
Jason Wang5ce995f2020-05-29 16:02:59 +0800624 vhost_detach_mm(dev);
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800625 dev->kcov_handle = 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200626err_mm:
627 return err;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000628}
Asias He6ac1afb2013-05-06 16:38:21 +0800629EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000630
Jason Wang0bbe3062020-03-26 22:01:19 +0800631static struct vhost_iotlb *iotlb_alloc(void)
Jason Wanga9709d62016-06-23 02:04:31 -0400632{
Jason Wang0bbe3062020-03-26 22:01:19 +0800633 return vhost_iotlb_alloc(max_iotlb_entries,
634 VHOST_IOTLB_FLAG_RETIRE);
635}
636
637struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
638{
639 return iotlb_alloc();
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300640}
Asias He6ac1afb2013-05-06 16:38:21 +0800641EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000642
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300643/* Caller should have device mutex */
Jason Wang0bbe3062020-03-26 22:01:19 +0800644void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300645{
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300646 int i;
647
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800648 vhost_dev_cleanup(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000649
Jason Wanga9709d62016-06-23 02:04:31 -0400650 dev->umem = umem;
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300651 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
652 * VQs aren't running.
653 */
654 for (i = 0; i < dev->nvqs; ++i)
Jason Wanga9709d62016-06-23 02:04:31 -0400655 dev->vqs[i]->umem = umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000656}
Asias He6ac1afb2013-05-06 16:38:21 +0800657EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000658
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000659void vhost_dev_stop(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000660{
661 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530662
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000663 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800664 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
665 vhost_poll_stop(&dev->vqs[i]->poll);
666 vhost_poll_flush(&dev->vqs[i]->poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000667 }
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000668 }
669}
Asias He6ac1afb2013-05-06 16:38:21 +0800670EXPORT_SYMBOL_GPL(vhost_dev_stop);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000671
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400672static void vhost_clear_msg(struct vhost_dev *dev)
673{
674 struct vhost_msg_node *node, *n;
675
676 spin_lock(&dev->iotlb_lock);
677
678 list_for_each_entry_safe(node, n, &dev->read_list, node) {
679 list_del(&node->node);
680 kfree(node);
681 }
682
683 list_for_each_entry_safe(node, n, &dev->pending_list, node) {
684 list_del(&node->node);
685 kfree(node);
686 }
687
688 spin_unlock(&dev->iotlb_lock);
689}
690
夷则(Caspar)f6f93f72017-12-25 00:08:58 +0800691void vhost_dev_cleanup(struct vhost_dev *dev)
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000692{
693 int i;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000694
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000695 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800696 if (dev->vqs[i]->error_ctx)
697 eventfd_ctx_put(dev->vqs[i]->error_ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800698 if (dev->vqs[i]->kick)
699 fput(dev->vqs[i]->kick);
Zhu Lingshan265a0ad2020-07-31 14:55:28 +0800700 if (dev->vqs[i]->call_ctx.ctx)
701 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
Asias He3ab2e422013-04-27 11:16:48 +0800702 vhost_vq_reset(dev, dev->vqs[i]);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000703 }
Jason Wange0e9b402010-09-14 23:53:05 +0800704 vhost_dev_free_iovecs(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000705 if (dev->log_ctx)
706 eventfd_ctx_put(dev->log_ctx);
707 dev->log_ctx = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000708 /* No one will access memory at this point */
Jason Wang0bbe3062020-03-26 22:01:19 +0800709 vhost_iotlb_free(dev->umem);
Jason Wanga9709d62016-06-23 02:04:31 -0400710 dev->umem = NULL;
Jason Wang0bbe3062020-03-26 22:01:19 +0800711 vhost_iotlb_free(dev->iotlb);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400712 dev->iotlb = NULL;
713 vhost_clear_msg(dev);
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800714 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang04b96e52016-04-25 22:14:33 -0400715 WARN_ON(!llist_empty(&dev->work_list));
Eric Dumazet78b620c2010-08-31 02:05:57 +0000716 if (dev->worker) {
717 kthread_stop(dev->worker);
718 dev->worker = NULL;
Andrey Konovalov8f6a7f92019-12-04 16:52:50 -0800719 dev->kcov_handle = 0;
Eric Dumazet78b620c2010-08-31 02:05:57 +0000720 }
Jason Wang5ce995f2020-05-29 16:02:59 +0800721 vhost_detach_mm(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000722}
Asias He6ac1afb2013-05-06 16:38:21 +0800723EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000724
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800725static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000726{
727 u64 a = addr / VHOST_PAGE_SIZE / 8;
Krishna Kumard47effe2011-03-01 17:06:37 +0530728
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000729 /* Make sure 64 bit math will not overflow. */
730 if (a > ULONG_MAX - (unsigned long)log_base ||
731 a + (unsigned long)log_base > ULONG_MAX)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800732 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000733
Linus Torvalds96d4f262019-01-03 18:57:57 -0800734 return access_ok(log_base + a,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000735 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
736}
737
Xie Yongjif7ad3182021-07-28 21:07:56 +0800738/* Make sure 64 bit math will not overflow. */
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300739static bool vhost_overflow(u64 uaddr, u64 size)
740{
Xie Yongjif7ad3182021-07-28 21:07:56 +0800741 if (uaddr > ULONG_MAX || size > ULONG_MAX)
742 return true;
743
744 if (!size)
745 return false;
746
747 return uaddr > ULONG_MAX - size + 1;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300748}
749
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000750/* Caller should have vq mutex and device mutex. */
Jason Wang0bbe3062020-03-26 22:01:19 +0800751static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800752 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000753{
Jason Wang0bbe3062020-03-26 22:01:19 +0800754 struct vhost_iotlb_map *map;
Jeff Dike179b2842010-04-07 09:59:10 -0400755
Jason Wanga9709d62016-06-23 02:04:31 -0400756 if (!umem)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800757 return false;
Jeff Dike179b2842010-04-07 09:59:10 -0400758
Jason Wang0bbe3062020-03-26 22:01:19 +0800759 list_for_each_entry(map, &umem->list, link) {
760 unsigned long a = map->addr;
Jason Wanga9709d62016-06-23 02:04:31 -0400761
Jason Wang0bbe3062020-03-26 22:01:19 +0800762 if (vhost_overflow(map->addr, map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800763 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +0300764
765
Jason Wang0bbe3062020-03-26 22:01:19 +0800766 if (!access_ok((void __user *)a, map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800767 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000768 else if (log_all && !log_access_ok(log_base,
Jason Wang0bbe3062020-03-26 22:01:19 +0800769 map->start,
770 map->size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800771 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000772 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800773 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000774}
775
Jason Wangf8894912017-02-28 17:56:02 +0800776static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
777 u64 addr, unsigned int size,
778 int type)
779{
Jason Wang0bbe3062020-03-26 22:01:19 +0800780 const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
Jason Wangf8894912017-02-28 17:56:02 +0800781
Jason Wang0bbe3062020-03-26 22:01:19 +0800782 if (!map)
Jason Wangf8894912017-02-28 17:56:02 +0800783 return NULL;
784
Michael S. Tsirkin1b0be992020-05-15 11:29:53 -0400785 return (void __user *)(uintptr_t)(map->addr + addr - map->start);
Jason Wangf8894912017-02-28 17:56:02 +0800786}
787
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000788/* Can we switch to this memory table? */
789/* Caller should have device mutex but not vq mutex */
Jason Wang0bbe3062020-03-26 22:01:19 +0800790static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800791 int log_all)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000792{
793 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530794
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000795 for (i = 0; i < d->nvqs; ++i) {
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800796 bool ok;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300797 bool log;
798
Asias He3ab2e422013-04-27 11:16:48 +0800799 mutex_lock(&d->vqs[i]->mutex);
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300800 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000801 /* If ring is inactive, will check when it's enabled. */
Asias He3ab2e422013-04-27 11:16:48 +0800802 if (d->vqs[i]->private_data)
Jason Wanga9709d62016-06-23 02:04:31 -0400803 ok = vq_memory_access_ok(d->vqs[i]->log_base,
804 umem, log);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000805 else
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800806 ok = true;
Asias He3ab2e422013-04-27 11:16:48 +0800807 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000808 if (!ok)
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800809 return false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000810 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +0800811 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000812}
813
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400814static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
815 struct iovec iov[], int iov_size, int access);
Jason Wangbfe2bc52016-06-23 02:04:30 -0400816
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200817static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
Jason Wangbfe2bc52016-06-23 02:04:30 -0400818 const void *from, unsigned size)
819{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400820 int ret;
Jason Wangbfe2bc52016-06-23 02:04:30 -0400821
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400822 if (!vq->iotlb)
823 return __copy_to_user(to, from, size);
824 else {
825 /* This function should be called after iotlb
826 * prefetch, which means we're sure that all vq
827 * could be access through iotlb. So -EAGAIN should
828 * not happen in this case.
829 */
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400830 struct iov_iter t;
Jason Wangf8894912017-02-28 17:56:02 +0800831 void __user *uaddr = vhost_vq_meta_fetch(vq,
832 (u64)(uintptr_t)to, size,
Eric Auger7ced6c92018-04-11 15:30:38 +0200833 VHOST_ADDR_USED);
Jason Wangf8894912017-02-28 17:56:02 +0800834
835 if (uaddr)
836 return __copy_to_user(uaddr, from, size);
837
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400838 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
839 ARRAY_SIZE(vq->iotlb_iov),
840 VHOST_ACCESS_WO);
841 if (ret < 0)
842 goto out;
843 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
844 ret = copy_to_iter(from, size, &t);
845 if (ret == size)
846 ret = 0;
847 }
848out:
849 return ret;
850}
Jason Wangbfe2bc52016-06-23 02:04:30 -0400851
852static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +0200853 void __user *from, unsigned size)
Jason Wangbfe2bc52016-06-23 02:04:30 -0400854{
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400855 int ret;
856
857 if (!vq->iotlb)
858 return __copy_from_user(to, from, size);
859 else {
860 /* This function should be called after iotlb
861 * prefetch, which means we're sure that vq
862 * could be access through iotlb. So -EAGAIN should
863 * not happen in this case.
864 */
Jason Wangf8894912017-02-28 17:56:02 +0800865 void __user *uaddr = vhost_vq_meta_fetch(vq,
866 (u64)(uintptr_t)from, size,
867 VHOST_ADDR_DESC);
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400868 struct iov_iter f;
Jason Wangf8894912017-02-28 17:56:02 +0800869
870 if (uaddr)
871 return __copy_from_user(to, uaddr, size);
872
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400873 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
874 ARRAY_SIZE(vq->iotlb_iov),
875 VHOST_ACCESS_RO);
876 if (ret < 0) {
877 vq_err(vq, "IOTLB translation failure: uaddr "
878 "%p size 0x%llx\n", from,
879 (unsigned long long) size);
880 goto out;
881 }
882 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
883 ret = copy_from_iter(to, size, &f);
884 if (ret == size)
885 ret = 0;
886 }
887
888out:
889 return ret;
890}
891
Jason Wangf8894912017-02-28 17:56:02 +0800892static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
893 void __user *addr, unsigned int size,
894 int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400895{
896 int ret;
897
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400898 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
899 ARRAY_SIZE(vq->iotlb_iov),
900 VHOST_ACCESS_RO);
901 if (ret < 0) {
902 vq_err(vq, "IOTLB translation failure: uaddr "
903 "%p size 0x%llx\n", addr,
904 (unsigned long long) size);
905 return NULL;
906 }
907
908 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
909 vq_err(vq, "Non atomic userspace memory access: uaddr "
910 "%p size 0x%llx\n", addr,
911 (unsigned long long) size);
912 return NULL;
913 }
914
915 return vq->iotlb_iov[0].iov_base;
916}
917
Jason Wangf8894912017-02-28 17:56:02 +0800918/* This function should be called after iotlb
919 * prefetch, which means we're sure that vq
920 * could be access through iotlb. So -EAGAIN should
921 * not happen in this case.
922 */
923static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
Michael S. Tsirkin1b0be992020-05-15 11:29:53 -0400924 void __user *addr, unsigned int size,
Jason Wangf8894912017-02-28 17:56:02 +0800925 int type)
926{
927 void __user *uaddr = vhost_vq_meta_fetch(vq,
928 (u64)(uintptr_t)addr, size, type);
929 if (uaddr)
930 return uaddr;
931
932 return __vhost_get_user_slow(vq, addr, size, type);
933}
934
935#define vhost_put_user(vq, x, ptr) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400936({ \
Guennadi Liakhovetski002ef182020-05-27 20:05:38 +0200937 int ret; \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400938 if (!vq->iotlb) { \
939 ret = __put_user(x, ptr); \
940 } else { \
941 __typeof__(ptr) to = \
Jason Wangf8894912017-02-28 17:56:02 +0800942 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
943 sizeof(*ptr), VHOST_ADDR_USED); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400944 if (to != NULL) \
945 ret = __put_user(x, to); \
946 else \
947 ret = -EFAULT; \
948 } \
949 ret; \
950})
951
Jason Wang7b5d7532019-05-24 04:12:14 -0400952static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
953{
954 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
955 vhost_avail_event(vq));
956}
957
958static inline int vhost_put_used(struct vhost_virtqueue *vq,
959 struct vring_used_elem *head, int idx,
960 int count)
961{
962 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
963 count * sizeof(*head));
964}
965
966static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
967
968{
969 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
970 &vq->used->flags);
971}
972
973static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
974
975{
976 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
977 &vq->used->idx);
978}
979
Jason Wangf8894912017-02-28 17:56:02 +0800980#define vhost_get_user(vq, x, ptr, type) \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400981({ \
982 int ret; \
983 if (!vq->iotlb) { \
984 ret = __get_user(x, ptr); \
985 } else { \
986 __typeof__(ptr) from = \
Jason Wangf8894912017-02-28 17:56:02 +0800987 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
988 sizeof(*ptr), \
989 type); \
Jason Wang6b1e6cc2016-06-23 02:04:32 -0400990 if (from != NULL) \
991 ret = __get_user(x, from); \
992 else \
993 ret = -EFAULT; \
994 } \
995 ret; \
996})
997
Jason Wangf8894912017-02-28 17:56:02 +0800998#define vhost_get_avail(vq, x, ptr) \
999 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1000
1001#define vhost_get_used(vq, x, ptr) \
1002 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1003
Jason Wang86a07da2018-12-13 10:53:39 +08001004static void vhost_dev_lock_vqs(struct vhost_dev *d)
1005{
1006 int i = 0;
1007 for (i = 0; i < d->nvqs; ++i)
1008 mutex_lock_nested(&d->vqs[i]->mutex, i);
1009}
1010
1011static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1012{
1013 int i = 0;
1014 for (i = 0; i < d->nvqs; ++i)
1015 mutex_unlock(&d->vqs[i]->mutex);
1016}
1017
Jason Wang7b5d7532019-05-24 04:12:14 -04001018static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1019 __virtio16 *idx)
1020{
1021 return vhost_get_avail(vq, *idx, &vq->avail->idx);
1022}
1023
1024static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1025 __virtio16 *head, int idx)
1026{
1027 return vhost_get_avail(vq, *head,
1028 &vq->avail->ring[idx & (vq->num - 1)]);
1029}
1030
1031static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1032 __virtio16 *flags)
1033{
1034 return vhost_get_avail(vq, *flags, &vq->avail->flags);
1035}
1036
1037static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1038 __virtio16 *event)
1039{
1040 return vhost_get_avail(vq, *event, vhost_used_event(vq));
1041}
1042
1043static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1044 __virtio16 *idx)
1045{
1046 return vhost_get_used(vq, *idx, &vq->used->idx);
1047}
1048
1049static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1050 struct vring_desc *desc, int idx)
1051{
1052 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1053}
1054
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001055static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1056 struct vhost_iotlb_msg *msg)
1057{
1058 struct vhost_msg_node *node, *n;
1059
1060 spin_lock(&d->iotlb_lock);
1061
1062 list_for_each_entry_safe(node, n, &d->pending_list, node) {
1063 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1064 if (msg->iova <= vq_msg->iova &&
Jason Wang2d66f992018-08-24 16:53:13 +08001065 msg->iova + msg->size - 1 >= vq_msg->iova &&
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001066 vq_msg->type == VHOST_IOTLB_MISS) {
1067 vhost_poll_queue(&node->vq->poll);
1068 list_del(&node->node);
1069 kfree(node);
1070 }
1071 }
1072
1073 spin_unlock(&d->iotlb_lock);
1074}
1075
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001076static bool umem_access_ok(u64 uaddr, u64 size, int access)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001077{
1078 unsigned long a = uaddr;
1079
Michael S. Tsirkinec33d032016-08-01 23:20:53 +03001080 /* Make sure 64 bit math will not overflow. */
1081 if (vhost_overflow(uaddr, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001082 return false;
Michael S. Tsirkinec33d032016-08-01 23:20:53 +03001083
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001084 if ((access & VHOST_ACCESS_RO) &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001085 !access_ok((void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001086 return false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001087 if ((access & VHOST_ACCESS_WO) &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08001088 !access_ok((void __user *)a, size))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001089 return false;
1090 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001091}
1092
Michael S. Tsirkin72952cc2016-12-06 06:01:41 +02001093static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1094 struct vhost_iotlb_msg *msg)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001095{
1096 int ret = 0;
1097
Jason Wang1b15ad62018-05-22 19:58:57 +08001098 mutex_lock(&dev->mutex);
Jason Wang86a07da2018-12-13 10:53:39 +08001099 vhost_dev_lock_vqs(dev);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001100 switch (msg->type) {
1101 case VHOST_IOTLB_UPDATE:
1102 if (!dev->iotlb) {
1103 ret = -EFAULT;
1104 break;
1105 }
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001106 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001107 ret = -EFAULT;
1108 break;
1109 }
Jason Wangf8894912017-02-28 17:56:02 +08001110 vhost_vq_meta_reset(dev);
Jason Wang0bbe3062020-03-26 22:01:19 +08001111 if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1112 msg->iova + msg->size - 1,
1113 msg->uaddr, msg->perm)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001114 ret = -ENOMEM;
1115 break;
1116 }
1117 vhost_iotlb_notify_vq(dev, msg);
1118 break;
1119 case VHOST_IOTLB_INVALIDATE:
Jason Wang6f3180a2018-01-23 17:27:26 +08001120 if (!dev->iotlb) {
1121 ret = -EFAULT;
1122 break;
1123 }
Jason Wangf8894912017-02-28 17:56:02 +08001124 vhost_vq_meta_reset(dev);
Jason Wang0bbe3062020-03-26 22:01:19 +08001125 vhost_iotlb_del_range(dev->iotlb, msg->iova,
1126 msg->iova + msg->size - 1);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001127 break;
1128 default:
1129 ret = -EINVAL;
1130 break;
1131 }
1132
Jason Wang86a07da2018-12-13 10:53:39 +08001133 vhost_dev_unlock_vqs(dev);
Jason Wang1b15ad62018-05-22 19:58:57 +08001134 mutex_unlock(&dev->mutex);
1135
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001136 return ret;
1137}
1138ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1139 struct iov_iter *from)
1140{
Jason Wang429711a2018-08-06 11:17:47 +08001141 struct vhost_iotlb_msg msg;
1142 size_t offset;
1143 int type, ret;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001144
Jason Wang429711a2018-08-06 11:17:47 +08001145 ret = copy_from_iter(&type, sizeof(type), from);
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001146 if (ret != sizeof(type)) {
1147 ret = -EINVAL;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001148 goto done;
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001149 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001150
Jason Wang429711a2018-08-06 11:17:47 +08001151 switch (type) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001152 case VHOST_IOTLB_MSG:
Jason Wang429711a2018-08-06 11:17:47 +08001153 /* There maybe a hole after type for V1 message type,
1154 * so skip it here.
1155 */
1156 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1157 break;
1158 case VHOST_IOTLB_MSG_V2:
1159 offset = sizeof(__u32);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001160 break;
1161 default:
1162 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001163 goto done;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001164 }
1165
Jason Wang429711a2018-08-06 11:17:47 +08001166 iov_iter_advance(from, offset);
1167 ret = copy_from_iter(&msg, sizeof(msg), from);
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001168 if (ret != sizeof(msg)) {
1169 ret = -EINVAL;
Jason Wang429711a2018-08-06 11:17:47 +08001170 goto done;
Pavel Tikhomirov74ad7412018-12-13 17:53:50 +03001171 }
Jason Wang792a4f22020-03-26 22:01:18 +08001172
1173 if (dev->msg_handler)
1174 ret = dev->msg_handler(dev, &msg);
1175 else
1176 ret = vhost_process_iotlb_msg(dev, &msg);
1177 if (ret) {
Jason Wang429711a2018-08-06 11:17:47 +08001178 ret = -EFAULT;
1179 goto done;
1180 }
1181
1182 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1183 sizeof(struct vhost_msg_v2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001184done:
1185 return ret;
1186}
1187EXPORT_SYMBOL(vhost_chr_write_iter);
1188
Al Viroafc9a422017-07-03 06:39:46 -04001189__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001190 poll_table *wait)
1191{
Al Viroafc9a422017-07-03 06:39:46 -04001192 __poll_t mask = 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001193
1194 poll_wait(file, &dev->wait, wait);
1195
1196 if (!list_empty(&dev->read_list))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001197 mask |= EPOLLIN | EPOLLRDNORM;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001198
1199 return mask;
1200}
1201EXPORT_SYMBOL(vhost_chr_poll);
1202
1203ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1204 int noblock)
1205{
1206 DEFINE_WAIT(wait);
1207 struct vhost_msg_node *node;
1208 ssize_t ret = 0;
1209 unsigned size = sizeof(struct vhost_msg);
1210
1211 if (iov_iter_count(to) < size)
1212 return 0;
1213
1214 while (1) {
1215 if (!noblock)
1216 prepare_to_wait(&dev->wait, &wait,
1217 TASK_INTERRUPTIBLE);
1218
1219 node = vhost_dequeue_msg(dev, &dev->read_list);
1220 if (node)
1221 break;
1222 if (noblock) {
1223 ret = -EAGAIN;
1224 break;
1225 }
1226 if (signal_pending(current)) {
1227 ret = -ERESTARTSYS;
1228 break;
1229 }
1230 if (!dev->iotlb) {
1231 ret = -EBADFD;
1232 break;
1233 }
1234
1235 schedule();
1236 }
1237
1238 if (!noblock)
1239 finish_wait(&dev->wait, &wait);
1240
1241 if (node) {
Jason Wang429711a2018-08-06 11:17:47 +08001242 struct vhost_iotlb_msg *msg;
1243 void *start = &node->msg;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001244
Jason Wang429711a2018-08-06 11:17:47 +08001245 switch (node->msg.type) {
1246 case VHOST_IOTLB_MSG:
1247 size = sizeof(node->msg);
1248 msg = &node->msg.iotlb;
1249 break;
1250 case VHOST_IOTLB_MSG_V2:
1251 size = sizeof(node->msg_v2);
1252 msg = &node->msg_v2.iotlb;
1253 break;
1254 default:
1255 BUG();
1256 break;
1257 }
1258
1259 ret = copy_to_iter(start, size, to);
1260 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001261 kfree(node);
1262 return ret;
1263 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001264 vhost_enqueue_msg(dev, &dev->pending_list, node);
1265 }
1266
1267 return ret;
1268}
1269EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1270
1271static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1272{
1273 struct vhost_dev *dev = vq->dev;
1274 struct vhost_msg_node *node;
1275 struct vhost_iotlb_msg *msg;
Jason Wang429711a2018-08-06 11:17:47 +08001276 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001277
Jason Wang429711a2018-08-06 11:17:47 +08001278 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001279 if (!node)
1280 return -ENOMEM;
1281
Jason Wang429711a2018-08-06 11:17:47 +08001282 if (v2) {
1283 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1284 msg = &node->msg_v2.iotlb;
1285 } else {
1286 msg = &node->msg.iotlb;
1287 }
1288
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001289 msg->type = VHOST_IOTLB_MISS;
1290 msg->iova = iova;
1291 msg->perm = access;
1292
1293 vhost_enqueue_msg(dev, &dev->read_list, node);
1294
1295 return 0;
Jason Wangbfe2bc52016-06-23 02:04:30 -04001296}
1297
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001298static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
Michael S. Tsirkina865e422020-04-06 08:42:55 -04001299 vring_desc_t __user *desc,
1300 vring_avail_t __user *avail,
1301 vring_used_t __user *used)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001302
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001303{
Greg Kurz0210a8d2020-10-03 12:01:52 +02001304 /* If an IOTLB device is present, the vring addresses are
1305 * GIOVAs. Access validation occurs at prefetch time. */
1306 if (vq->iotlb)
1307 return true;
1308
Jason Wang4942e822019-05-24 04:12:16 -04001309 return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1310 access_ok(avail, vhost_get_avail_size(vq, num)) &&
1311 access_ok(used, vhost_get_used_size(vq, num));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001312}
1313
Jason Wangf8894912017-02-28 17:56:02 +08001314static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
Jason Wang0bbe3062020-03-26 22:01:19 +08001315 const struct vhost_iotlb_map *map,
Jason Wangf8894912017-02-28 17:56:02 +08001316 int type)
1317{
1318 int access = (type == VHOST_ADDR_USED) ?
1319 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1320
Jason Wang0bbe3062020-03-26 22:01:19 +08001321 if (likely(map->perm & access))
1322 vq->meta_iotlb[type] = map;
Jason Wangf8894912017-02-28 17:56:02 +08001323}
1324
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001325static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1326 int access, u64 addr, u64 len, int type)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001327{
Jason Wang0bbe3062020-03-26 22:01:19 +08001328 const struct vhost_iotlb_map *map;
1329 struct vhost_iotlb *umem = vq->iotlb;
Michael S. Tsirkinca2c5b32017-08-21 22:33:33 +03001330 u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
Jason Wangf8894912017-02-28 17:56:02 +08001331
1332 if (vhost_vq_meta_fetch(vq, addr, len, type))
1333 return true;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001334
1335 while (len > s) {
Jason Wang0bbe3062020-03-26 22:01:19 +08001336 map = vhost_iotlb_itree_first(umem, addr, last);
1337 if (map == NULL || map->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001338 vhost_iotlb_miss(vq, addr, access);
1339 return false;
Jason Wang0bbe3062020-03-26 22:01:19 +08001340 } else if (!(map->perm & access)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001341 /* Report the possible access violation by
1342 * request another translation from userspace.
1343 */
1344 return false;
1345 }
1346
Jason Wang0bbe3062020-03-26 22:01:19 +08001347 size = map->size - addr + map->start;
Jason Wangf8894912017-02-28 17:56:02 +08001348
1349 if (orig_addr == addr && size >= len)
Jason Wang0bbe3062020-03-26 22:01:19 +08001350 vhost_vq_meta_update(vq, map, type);
Jason Wangf8894912017-02-28 17:56:02 +08001351
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001352 s += size;
1353 addr += size;
1354 }
1355
1356 return true;
1357}
1358
Jason Wang9b5e8302019-05-24 04:12:15 -04001359int vq_meta_prefetch(struct vhost_virtqueue *vq)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001360{
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001361 unsigned int num = vq->num;
1362
Michael S. Tsirkin3d2c7d32019-08-10 13:53:21 -04001363 if (!vq->iotlb)
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001364 return 1;
1365
Jason Wang0bbe3062020-03-26 22:01:19 +08001366 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
Jason Wang4942e822019-05-24 04:12:16 -04001367 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
Jason Wang0bbe3062020-03-26 22:01:19 +08001368 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
Jason Wang4942e822019-05-24 04:12:16 -04001369 vhost_get_avail_size(vq, num),
Jason Wangf8894912017-02-28 17:56:02 +08001370 VHOST_ADDR_AVAIL) &&
Jason Wang0bbe3062020-03-26 22:01:19 +08001371 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
Jason Wang4942e822019-05-24 04:12:16 -04001372 vhost_get_used_size(vq, num), VHOST_ADDR_USED);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001373}
Jason Wang9b5e8302019-05-24 04:12:15 -04001374EXPORT_SYMBOL_GPL(vq_meta_prefetch);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001375
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001376/* Can we log writes? */
1377/* Caller should have device mutex but not vq mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001378bool vhost_log_access_ok(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001379{
Jason Wanga9709d62016-06-23 02:04:31 -04001380 return memory_access_ok(dev, dev->umem, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001381}
Asias He6ac1afb2013-05-06 16:38:21 +08001382EXPORT_SYMBOL_GPL(vhost_log_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001383
Greg Kurzab512252020-10-03 12:02:13 +02001384static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1385 void __user *log_base,
1386 bool log_used,
1387 u64 log_addr)
1388{
1389 /* If an IOTLB device is present, log_addr is a GIOVA that
1390 * will never be logged by log_used(). */
1391 if (vq->iotlb)
1392 return true;
1393
1394 return !log_used || log_access_ok(log_base, log_addr,
1395 vhost_get_used_size(vq, vq->num));
1396}
1397
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001398/* Verify access for write logging. */
1399/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001400static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1401 void __user *log_base)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001402{
Jason Wanga9709d62016-06-23 02:04:31 -04001403 return vq_memory_access_ok(log_base, vq->umem,
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001404 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
Greg Kurzab512252020-10-03 12:02:13 +02001405 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001406}
1407
1408/* Can we start vq? */
1409/* Caller should have vq mutex and device mutex */
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001410bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001411{
Stefan Hajnoczid14d2b72018-04-11 10:35:40 +08001412 if (!vq_log_access_ok(vq, vq->log_base))
Stefan Hajnocziddd3d402018-04-11 10:35:41 +08001413 return false;
Jason Wangd65026c2018-03-29 16:00:04 +08001414
Jason Wangd65026c2018-03-29 16:00:04 +08001415 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001416}
Asias He6ac1afb2013-05-06 16:38:21 +08001417EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001418
1419static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1420{
Jason Wanga9709d62016-06-23 02:04:31 -04001421 struct vhost_memory mem, *newmem;
1422 struct vhost_memory_region *region;
Jason Wang0bbe3062020-03-26 22:01:19 +08001423 struct vhost_iotlb *newumem, *oldumem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001424 unsigned long size = offsetof(struct vhost_memory, regions);
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001425 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +05301426
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001427 if (copy_from_user(&mem, m, size))
1428 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001429 if (mem.padding)
1430 return -EOPNOTSUPP;
Igor Mammedovc9ce42f2015-07-02 15:08:11 +02001431 if (mem.nregions > max_mem_regions)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001432 return -E2BIG;
Matthew Wilcoxb2303d72018-06-07 07:57:18 -07001433 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1434 GFP_KERNEL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001435 if (!newmem)
1436 return -ENOMEM;
1437
1438 memcpy(newmem, &mem, size);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001439 if (copy_from_user(newmem->regions, m->regions,
Gustavo A. R. Silvabf11d712020-07-31 08:09:56 -05001440 flex_array_size(newmem, regions, mem.nregions))) {
Igor Mammedovbcfeaca2015-06-16 18:33:35 +02001441 kvfree(newmem);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001442 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001443 }
1444
Jason Wang0bbe3062020-03-26 22:01:19 +08001445 newumem = iotlb_alloc();
Jason Wanga9709d62016-06-23 02:04:31 -04001446 if (!newumem) {
Igor Mammedov4de72552015-07-01 11:07:09 +02001447 kvfree(newmem);
Jason Wanga9709d62016-06-23 02:04:31 -04001448 return -ENOMEM;
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +09001449 }
Jason Wanga9709d62016-06-23 02:04:31 -04001450
Jason Wanga9709d62016-06-23 02:04:31 -04001451 for (region = newmem->regions;
1452 region < newmem->regions + mem.nregions;
1453 region++) {
Jason Wang0bbe3062020-03-26 22:01:19 +08001454 if (vhost_iotlb_add_range(newumem,
1455 region->guest_phys_addr,
1456 region->guest_phys_addr +
1457 region->memory_size - 1,
1458 region->userspace_addr,
1459 VHOST_MAP_RW))
Jason Wanga9709d62016-06-23 02:04:31 -04001460 goto err;
Jason Wanga9709d62016-06-23 02:04:31 -04001461 }
1462
1463 if (!memory_access_ok(d, newumem, 0))
1464 goto err;
1465
1466 oldumem = d->umem;
1467 d->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001468
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001469 /* All memory accesses are done under some VQ mutex. */
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001470 for (i = 0; i < d->nvqs; ++i) {
1471 mutex_lock(&d->vqs[i]->mutex);
Jason Wanga9709d62016-06-23 02:04:31 -04001472 d->vqs[i]->umem = newumem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +03001473 mutex_unlock(&d->vqs[i]->mutex);
1474 }
Jason Wanga9709d62016-06-23 02:04:31 -04001475
1476 kvfree(newmem);
Jason Wang0bbe3062020-03-26 22:01:19 +08001477 vhost_iotlb_free(oldumem);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001478 return 0;
Jason Wanga9709d62016-06-23 02:04:31 -04001479
1480err:
Jason Wang0bbe3062020-03-26 22:01:19 +08001481 vhost_iotlb_free(newumem);
Jason Wanga9709d62016-06-23 02:04:31 -04001482 kvfree(newmem);
1483 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001484}
1485
Jason Wangfeebcae2019-05-24 04:12:17 -04001486static long vhost_vring_set_num(struct vhost_dev *d,
1487 struct vhost_virtqueue *vq,
1488 void __user *argp)
1489{
1490 struct vhost_vring_state s;
1491
1492 /* Resizing ring with an active backend?
1493 * You don't want to do that. */
1494 if (vq->private_data)
1495 return -EBUSY;
1496
1497 if (copy_from_user(&s, argp, sizeof s))
1498 return -EFAULT;
1499
1500 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1501 return -EINVAL;
1502 vq->num = s.num;
1503
1504 return 0;
1505}
1506
1507static long vhost_vring_set_addr(struct vhost_dev *d,
1508 struct vhost_virtqueue *vq,
1509 void __user *argp)
1510{
1511 struct vhost_vring_addr a;
1512
1513 if (copy_from_user(&a, argp, sizeof a))
1514 return -EFAULT;
1515 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1516 return -EOPNOTSUPP;
1517
1518 /* For 32bit, verify that the top 32bits of the user
1519 data are set to zero. */
1520 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1521 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1522 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1523 return -EFAULT;
1524
1525 /* Make sure it's safe to cast pointers to vring types. */
1526 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1527 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1528 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1529 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1530 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1531 return -EINVAL;
1532
1533 /* We only verify access here if backend is configured.
1534 * If it is not, we don't as size might not have been setup.
1535 * We will verify when backend is configured. */
1536 if (vq->private_data) {
1537 if (!vq_access_ok(vq, vq->num,
1538 (void __user *)(unsigned long)a.desc_user_addr,
1539 (void __user *)(unsigned long)a.avail_user_addr,
1540 (void __user *)(unsigned long)a.used_user_addr))
1541 return -EINVAL;
1542
1543 /* Also validate log access for used ring if enabled. */
Greg Kurzab512252020-10-03 12:02:13 +02001544 if (!vq_log_used_access_ok(vq, vq->log_base,
1545 a.flags & (0x1 << VHOST_VRING_F_LOG),
1546 a.log_guest_addr))
Jason Wangfeebcae2019-05-24 04:12:17 -04001547 return -EINVAL;
1548 }
1549
1550 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1551 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1552 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1553 vq->log_addr = a.log_guest_addr;
1554 vq->used = (void __user *)(unsigned long)a.used_user_addr;
1555
1556 return 0;
1557}
1558
1559static long vhost_vring_set_num_addr(struct vhost_dev *d,
1560 struct vhost_virtqueue *vq,
1561 unsigned int ioctl,
1562 void __user *argp)
1563{
1564 long r;
1565
1566 mutex_lock(&vq->mutex);
1567
1568 switch (ioctl) {
1569 case VHOST_SET_VRING_NUM:
1570 r = vhost_vring_set_num(d, vq, argp);
1571 break;
1572 case VHOST_SET_VRING_ADDR:
1573 r = vhost_vring_set_addr(d, vq, argp);
1574 break;
1575 default:
1576 BUG();
1577 }
1578
1579 mutex_unlock(&vq->mutex);
1580
1581 return r;
1582}
Sonny Rao26b36602018-03-14 10:05:06 -07001583long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001584{
Al Virocecb46f2012-08-27 14:21:39 -04001585 struct file *eventfp, *filep = NULL;
1586 bool pollstart = false, pollstop = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001587 struct eventfd_ctx *ctx = NULL;
1588 u32 __user *idxp = argp;
1589 struct vhost_virtqueue *vq;
1590 struct vhost_vring_state s;
1591 struct vhost_vring_file f;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001592 u32 idx;
1593 long r;
1594
1595 r = get_user(idx, idxp);
1596 if (r < 0)
1597 return r;
Krishna Kumar0f3d9a12010-05-25 11:10:36 +05301598 if (idx >= d->nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001599 return -ENOBUFS;
1600
Jason Wangff002262018-10-30 14:10:49 +08001601 idx = array_index_nospec(idx, d->nvqs);
Asias He3ab2e422013-04-27 11:16:48 +08001602 vq = d->vqs[idx];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001603
Jason Wangfeebcae2019-05-24 04:12:17 -04001604 if (ioctl == VHOST_SET_VRING_NUM ||
1605 ioctl == VHOST_SET_VRING_ADDR) {
1606 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1607 }
1608
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001609 mutex_lock(&vq->mutex);
1610
1611 switch (ioctl) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001612 case VHOST_SET_VRING_BASE:
1613 /* Moving base with an active backend?
1614 * You don't want to do that. */
1615 if (vq->private_data) {
1616 r = -EBUSY;
1617 break;
1618 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001619 if (copy_from_user(&s, argp, sizeof s)) {
1620 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001621 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001622 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001623 if (s.num > 0xffff) {
1624 r = -EINVAL;
1625 break;
1626 }
Jason Wang8d658432017-07-27 11:22:05 +08001627 vq->last_avail_idx = s.num;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001628 /* Forget the cached index value. */
1629 vq->avail_idx = vq->last_avail_idx;
1630 break;
1631 case VHOST_GET_VRING_BASE:
1632 s.index = idx;
1633 s.num = vq->last_avail_idx;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001634 if (copy_to_user(argp, &s, sizeof s))
1635 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001636 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001637 case VHOST_SET_VRING_KICK:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001638 if (copy_from_user(&f, argp, sizeof f)) {
1639 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001640 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001641 }
Zhu Lingshane0136c12020-06-05 18:27:14 +08001642 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001643 if (IS_ERR(eventfp)) {
1644 r = PTR_ERR(eventfp);
1645 break;
1646 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001647 if (eventfp != vq->kick) {
Al Virocecb46f2012-08-27 14:21:39 -04001648 pollstop = (filep = vq->kick) != NULL;
1649 pollstart = (vq->kick = eventfp) != NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001650 } else
1651 filep = eventfp;
1652 break;
1653 case VHOST_SET_VRING_CALL:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001654 if (copy_from_user(&f, argp, sizeof f)) {
1655 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001656 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001657 }
Zhu Lingshane0136c12020-06-05 18:27:14 +08001658 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
Eric Biggerse050c7d2018-01-06 14:52:19 -08001659 if (IS_ERR(ctx)) {
1660 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001661 break;
1662 }
Zhu Lingshan265a0ad2020-07-31 14:55:28 +08001663
Zhu Lingshan265a0ad2020-07-31 14:55:28 +08001664 swap(ctx, vq->call_ctx.ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001665 break;
1666 case VHOST_SET_VRING_ERR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001667 if (copy_from_user(&f, argp, sizeof f)) {
1668 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001669 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001670 }
Zhu Lingshane0136c12020-06-05 18:27:14 +08001671 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
Eric Biggers09f332a2018-01-06 14:52:20 -08001672 if (IS_ERR(ctx)) {
1673 r = PTR_ERR(ctx);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +02001674 break;
1675 }
Eric Biggers09f332a2018-01-06 14:52:20 -08001676 swap(ctx, vq->error_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001677 break;
Greg Kurz2751c982015-04-24 14:27:24 +02001678 case VHOST_SET_VRING_ENDIAN:
1679 r = vhost_set_vring_endian(vq, argp);
1680 break;
1681 case VHOST_GET_VRING_ENDIAN:
1682 r = vhost_get_vring_endian(vq, idx, argp);
1683 break;
Jason Wang03088132016-03-04 06:24:53 -05001684 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1685 if (copy_from_user(&s, argp, sizeof(s))) {
1686 r = -EFAULT;
1687 break;
1688 }
1689 vq->busyloop_timeout = s.num;
1690 break;
1691 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1692 s.index = idx;
1693 s.num = vq->busyloop_timeout;
1694 if (copy_to_user(argp, &s, sizeof(s)))
1695 r = -EFAULT;
1696 break;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001697 default:
1698 r = -ENOIOCTLCMD;
1699 }
1700
1701 if (pollstop && vq->handle_kick)
1702 vhost_poll_stop(&vq->poll);
1703
Eric Biggerse050c7d2018-01-06 14:52:19 -08001704 if (!IS_ERR_OR_NULL(ctx))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001705 eventfd_ctx_put(ctx);
1706 if (filep)
1707 fput(filep);
1708
1709 if (pollstart && vq->handle_kick)
Jason Wang2b8b3282013-01-28 01:05:18 +00001710 r = vhost_poll_start(&vq->poll, vq->kick);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001711
1712 mutex_unlock(&vq->mutex);
1713
1714 if (pollstop && vq->handle_kick)
1715 vhost_poll_flush(&vq->poll);
1716 return r;
1717}
Asias He6ac1afb2013-05-06 16:38:21 +08001718EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001719
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001720int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1721{
Jason Wang0bbe3062020-03-26 22:01:19 +08001722 struct vhost_iotlb *niotlb, *oiotlb;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001723 int i;
1724
Jason Wang0bbe3062020-03-26 22:01:19 +08001725 niotlb = iotlb_alloc();
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001726 if (!niotlb)
1727 return -ENOMEM;
1728
1729 oiotlb = d->iotlb;
1730 d->iotlb = niotlb;
1731
1732 for (i = 0; i < d->nvqs; ++i) {
Jason Wangb13f9c62018-08-08 11:43:04 +08001733 struct vhost_virtqueue *vq = d->vqs[i];
1734
1735 mutex_lock(&vq->mutex);
1736 vq->iotlb = niotlb;
1737 __vhost_vq_meta_reset(vq);
1738 mutex_unlock(&vq->mutex);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001739 }
1740
Jason Wang0bbe3062020-03-26 22:01:19 +08001741 vhost_iotlb_free(oiotlb);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04001742
1743 return 0;
1744}
1745EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1746
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001747/* Caller must have device mutex */
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001748long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001749{
Eric Biggersd25cc432018-01-06 14:52:21 -08001750 struct eventfd_ctx *ctx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001751 u64 p;
1752 long r;
1753 int i, fd;
1754
1755 /* If you are not the owner, you can become one */
1756 if (ioctl == VHOST_SET_OWNER) {
1757 r = vhost_dev_set_owner(d);
1758 goto done;
1759 }
1760
1761 /* You must be the owner to do anything else */
1762 r = vhost_dev_check_owner(d);
1763 if (r)
1764 goto done;
1765
1766 switch (ioctl) {
1767 case VHOST_SET_MEM_TABLE:
1768 r = vhost_set_memory(d, argp);
1769 break;
1770 case VHOST_SET_LOG_BASE:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001771 if (copy_from_user(&p, argp, sizeof p)) {
1772 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001773 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +09001774 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001775 if ((u64)(unsigned long)p != p) {
1776 r = -EFAULT;
1777 break;
1778 }
1779 for (i = 0; i < d->nvqs; ++i) {
1780 struct vhost_virtqueue *vq;
1781 void __user *base = (void __user *)(unsigned long)p;
Asias He3ab2e422013-04-27 11:16:48 +08001782 vq = d->vqs[i];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001783 mutex_lock(&vq->mutex);
1784 /* If ring is inactive, will check when it's enabled. */
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001785 if (vq->private_data && !vq_log_access_ok(vq, base))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001786 r = -EFAULT;
1787 else
1788 vq->log_base = base;
1789 mutex_unlock(&vq->mutex);
1790 }
1791 break;
1792 case VHOST_SET_LOG_FD:
1793 r = get_user(fd, (int __user *)argp);
1794 if (r < 0)
1795 break;
Zhu Lingshane0136c12020-06-05 18:27:14 +08001796 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
Eric Biggersd25cc432018-01-06 14:52:21 -08001797 if (IS_ERR(ctx)) {
1798 r = PTR_ERR(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001799 break;
1800 }
Eric Biggersd25cc432018-01-06 14:52:21 -08001801 swap(ctx, d->log_ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001802 for (i = 0; i < d->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +08001803 mutex_lock(&d->vqs[i]->mutex);
1804 d->vqs[i]->log_ctx = d->log_ctx;
1805 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001806 }
1807 if (ctx)
1808 eventfd_ctx_put(ctx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001809 break;
1810 default:
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +02001811 r = -ENOIOCTLCMD;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001812 break;
1813 }
1814done:
1815 return r;
1816}
Asias He6ac1afb2013-05-06 16:38:21 +08001817EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001818
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001819/* TODO: This is really inefficient. We need something like get_user()
1820 * (instruction directly accesses the data, with an exception table entry
Mauro Carvalho Chehabcb1aaeb2019-06-07 15:54:32 -03001821 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001822 */
1823static int set_bit_to_user(int nr, void __user *addr)
1824{
1825 unsigned long log = (unsigned long)addr;
1826 struct page *page;
1827 void *base;
1828 int bit = nr + (log % PAGE_SIZE) * 8;
1829 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301830
John Hubbard690623e2020-06-07 21:41:15 -07001831 r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001832 if (r < 0)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001833 return r;
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +02001834 BUG_ON(r != 1);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001835 base = kmap_atomic(page);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001836 set_bit(bit, base);
Cong Wangc6daa7f2011-11-25 23:14:26 +08001837 kunmap_atomic(base);
John Hubbard690623e2020-06-07 21:41:15 -07001838 unpin_user_pages_dirty_lock(&page, 1, true);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001839 return 0;
1840}
1841
1842static int log_write(void __user *log_base,
1843 u64 write_address, u64 write_length)
1844{
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001845 u64 write_page = write_address / VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001846 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301847
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001848 if (!write_length)
1849 return 0;
Michael S. Tsirkin3bf9be42010-11-29 10:19:07 +02001850 write_length += write_address % VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001851 for (;;) {
1852 u64 base = (u64)(unsigned long)log_base;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001853 u64 log = base + write_page / 8;
1854 int bit = write_page % 8;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001855 if ((u64)(unsigned long)log != log)
1856 return -EFAULT;
1857 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1858 if (r < 0)
1859 return r;
1860 if (write_length <= VHOST_PAGE_SIZE)
1861 break;
1862 write_length -= VHOST_PAGE_SIZE;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +02001863 write_page += 1;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001864 }
1865 return r;
1866}
1867
Jason Wangcc5e7102019-01-16 16:54:42 +08001868static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1869{
Jason Wang0bbe3062020-03-26 22:01:19 +08001870 struct vhost_iotlb *umem = vq->umem;
1871 struct vhost_iotlb_map *u;
Jason Wangcc5e7102019-01-16 16:54:42 +08001872 u64 start, end, l, min;
1873 int r;
1874 bool hit = false;
1875
1876 while (len) {
1877 min = len;
1878 /* More than one GPAs can be mapped into a single HVA. So
1879 * iterate all possible umems here to be safe.
1880 */
Jason Wang0bbe3062020-03-26 22:01:19 +08001881 list_for_each_entry(u, &umem->list, link) {
1882 if (u->addr > hva - 1 + len ||
1883 u->addr - 1 + u->size < hva)
Jason Wangcc5e7102019-01-16 16:54:42 +08001884 continue;
Jason Wang0bbe3062020-03-26 22:01:19 +08001885 start = max(u->addr, hva);
1886 end = min(u->addr - 1 + u->size, hva - 1 + len);
Jason Wangcc5e7102019-01-16 16:54:42 +08001887 l = end - start + 1;
1888 r = log_write(vq->log_base,
Jason Wang0bbe3062020-03-26 22:01:19 +08001889 u->start + start - u->addr,
Jason Wangcc5e7102019-01-16 16:54:42 +08001890 l);
1891 if (r < 0)
1892 return r;
1893 hit = true;
1894 min = min(l, min);
1895 }
1896
1897 if (!hit)
1898 return -EFAULT;
1899
1900 len -= min;
1901 hva += min;
1902 }
1903
1904 return 0;
1905}
1906
1907static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1908{
Li Wang5e5e8732020-09-15 02:08:09 +08001909 struct iovec *iov = vq->log_iov;
Jason Wangcc5e7102019-01-16 16:54:42 +08001910 int i, ret;
1911
1912 if (!vq->iotlb)
1913 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1914
1915 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1916 len, iov, 64, VHOST_ACCESS_WO);
Jason Wang816db762019-02-19 14:53:44 +08001917 if (ret < 0)
Jason Wangcc5e7102019-01-16 16:54:42 +08001918 return ret;
1919
1920 for (i = 0; i < ret; i++) {
1921 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1922 iov[i].iov_len);
1923 if (ret)
1924 return ret;
1925 }
1926
1927 return 0;
1928}
1929
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001930int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
Jason Wangcc5e7102019-01-16 16:54:42 +08001931 unsigned int log_num, u64 len, struct iovec *iov, int count)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001932{
1933 int i, r;
1934
1935 /* Make sure data written is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001936 smp_wmb();
Jason Wangcc5e7102019-01-16 16:54:42 +08001937
1938 if (vq->iotlb) {
1939 for (i = 0; i < count; i++) {
1940 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1941 iov[i].iov_len);
1942 if (r < 0)
1943 return r;
1944 }
1945 return 0;
1946 }
1947
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001948 for (i = 0; i < log_num; ++i) {
1949 u64 l = min(log[i].len, len);
1950 r = log_write(vq->log_base, log[i].addr, l);
1951 if (r < 0)
1952 return r;
1953 len -= l;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001954 if (!len) {
1955 if (vq->log_ctx)
1956 eventfd_signal(vq->log_ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001957 return 0;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +02001958 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001959 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001960 /* Length written exceeds what we have stored. This is a bug. */
1961 BUG();
1962 return 0;
1963}
Asias He6ac1afb2013-05-06 16:38:21 +08001964EXPORT_SYMBOL_GPL(vhost_log_write);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001965
Jason Wang2723fea2011-06-21 18:04:38 +08001966static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1967{
1968 void __user *used;
Jason Wang7b5d7532019-05-24 04:12:14 -04001969 if (vhost_put_used_flags(vq))
Jason Wang2723fea2011-06-21 18:04:38 +08001970 return -EFAULT;
1971 if (unlikely(vq->log_used)) {
1972 /* Make sure the flag is seen before log. */
1973 smp_wmb();
1974 /* Log used flag write. */
1975 used = &vq->used->flags;
Jason Wangcc5e7102019-01-16 16:54:42 +08001976 log_used(vq, (used - (void __user *)vq->used),
1977 sizeof vq->used->flags);
Jason Wang2723fea2011-06-21 18:04:38 +08001978 if (vq->log_ctx)
1979 eventfd_signal(vq->log_ctx, 1);
1980 }
1981 return 0;
1982}
1983
1984static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1985{
Jason Wang7b5d7532019-05-24 04:12:14 -04001986 if (vhost_put_avail_event(vq))
Jason Wang2723fea2011-06-21 18:04:38 +08001987 return -EFAULT;
1988 if (unlikely(vq->log_used)) {
1989 void __user *used;
1990 /* Make sure the event is seen before log. */
1991 smp_wmb();
1992 /* Log avail event write */
1993 used = vhost_avail_event(vq);
Jason Wangcc5e7102019-01-16 16:54:42 +08001994 log_used(vq, (used - (void __user *)vq->used),
1995 sizeof *vhost_avail_event(vq));
Jason Wang2723fea2011-06-21 18:04:38 +08001996 if (vq->log_ctx)
1997 eventfd_signal(vq->log_ctx, 1);
1998 }
1999 return 0;
2000}
2001
Greg Kurz80f7d032016-02-16 15:59:44 +01002002int vhost_vq_init_access(struct vhost_virtqueue *vq)
Jason Wang2723fea2011-06-21 18:04:38 +08002003{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002004 __virtio16 last_used_idx;
Jason Wang2723fea2011-06-21 18:04:38 +08002005 int r;
Greg Kurze1f33be2016-02-16 15:54:28 +01002006 bool is_le = vq->is_le;
2007
Halil Pasiccda8bba2017-01-30 11:09:36 +01002008 if (!vq->private_data)
Jason Wang2723fea2011-06-21 18:04:38 +08002009 return 0;
Greg Kurz2751c982015-04-24 14:27:24 +02002010
2011 vhost_init_is_le(vq);
Jason Wang2723fea2011-06-21 18:04:38 +08002012
2013 r = vhost_update_used_flags(vq);
2014 if (r)
Greg Kurze1f33be2016-02-16 15:54:28 +01002015 goto err;
Jason Wang2723fea2011-06-21 18:04:38 +08002016 vq->signalled_used_valid = false;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002017 if (!vq->iotlb &&
Linus Torvalds96d4f262019-01-03 18:57:57 -08002018 !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
Greg Kurze1f33be2016-02-16 15:54:28 +01002019 r = -EFAULT;
2020 goto err;
2021 }
Jason Wang7b5d7532019-05-24 04:12:14 -04002022 r = vhost_get_used_idx(vq, &last_used_idx);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002023 if (r) {
2024 vq_err(vq, "Can't access used idx at %p\n",
2025 &vq->used->idx);
Greg Kurze1f33be2016-02-16 15:54:28 +01002026 goto err;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002027 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002028 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
Michael S. Tsirkin64f7f052014-12-01 17:39:39 +02002029 return 0;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002030
Greg Kurze1f33be2016-02-16 15:54:28 +01002031err:
2032 vq->is_le = is_le;
2033 return r;
Jason Wang2723fea2011-06-21 18:04:38 +08002034}
Greg Kurz80f7d032016-02-16 15:59:44 +01002035EXPORT_SYMBOL_GPL(vhost_vq_init_access);
Jason Wang2723fea2011-06-21 18:04:38 +08002036
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002037static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002038 struct iovec iov[], int iov_size, int access)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002039{
Jason Wang0bbe3062020-03-26 22:01:19 +08002040 const struct vhost_iotlb_map *map;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002041 struct vhost_dev *dev = vq->dev;
Jason Wang0bbe3062020-03-26 22:01:19 +08002042 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002043 struct iovec *_iov;
2044 u64 s = 0;
2045 int ret = 0;
2046
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002047 while ((u64)len > s) {
2048 u64 size;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002049 if (unlikely(ret >= iov_size)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002050 ret = -ENOBUFS;
2051 break;
2052 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002053
Jason Wang0bbe3062020-03-26 22:01:19 +08002054 map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2055 if (map == NULL || map->start > addr) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002056 if (umem != dev->iotlb) {
2057 ret = -EFAULT;
2058 break;
2059 }
2060 ret = -EAGAIN;
2061 break;
Jason Wang0bbe3062020-03-26 22:01:19 +08002062 } else if (!(map->perm & access)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002063 ret = -EPERM;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002064 break;
2065 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002066
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002067 _iov = iov + ret;
Jason Wang0bbe3062020-03-26 22:01:19 +08002068 size = map->size - addr + map->start;
Michael S. Tsirkinbd971202012-11-26 05:57:27 +00002069 _iov->iov_len = min((u64)len - s, size);
Michael S. Tsirkin0d4a3f22019-09-14 15:21:51 -04002070 _iov->iov_base = (void __user *)(unsigned long)
Jason Wang0bbe3062020-03-26 22:01:19 +08002071 (map->addr + addr - map->start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002072 s += size;
2073 addr += size;
2074 ++ret;
2075 }
2076
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002077 if (ret == -EAGAIN)
2078 vhost_iotlb_miss(vq, addr, access);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002079 return ret;
2080}
2081
2082/* Each buffer in the virtqueues is actually a chain of descriptors. This
2083 * function returns the next descriptor in the chain,
2084 * or -1U if we're at the end. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002085static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002086{
2087 unsigned int next;
2088
2089 /* If this descriptor says it doesn't chain, we're done. */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002090 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002091 return -1U;
2092
2093 /* Check they're not leading us off end of descriptors. */
Paul E. McKenney3a5db0b2017-11-27 09:45:10 -08002094 next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002095 return next;
2096}
2097
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002098static int get_indirect(struct vhost_virtqueue *vq,
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002099 struct iovec iov[], unsigned int iov_size,
2100 unsigned int *out_num, unsigned int *in_num,
2101 struct vhost_log *log, unsigned int *log_num,
2102 struct vring_desc *indirect)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002103{
2104 struct vring_desc desc;
2105 unsigned int i = 0, count, found = 0;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002106 u32 len = vhost32_to_cpu(vq, indirect->len);
Al Viroaad9a1c2014-12-10 14:49:01 -05002107 struct iov_iter from;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002108 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002109
2110 /* Sanity check */
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002111 if (unlikely(len % sizeof desc)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002112 vq_err(vq, "Invalid length in indirect descriptor: "
2113 "len 0x%llx not multiple of 0x%zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002114 (unsigned long long)len,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002115 sizeof desc);
2116 return -EINVAL;
2117 }
2118
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002119 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002120 UIO_MAXIOV, VHOST_ACCESS_RO);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002121 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002122 if (ret != -EAGAIN)
2123 vq_err(vq, "Translation failure %d in indirect.\n", ret);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002124 return ret;
2125 }
Al Viroaad9a1c2014-12-10 14:49:01 -05002126 iov_iter_init(&from, READ, vq->indirect, ret, len);
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002127 count = len / sizeof desc;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002128 /* Buffers are chained via a 16 bit next field, so
2129 * we can have at most 2^16 of these. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002130 if (unlikely(count > USHRT_MAX + 1)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002131 vq_err(vq, "Indirect buffer length too big: %d\n",
2132 indirect->len);
2133 return -E2BIG;
2134 }
2135
2136 do {
2137 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002138 if (unlikely(++found > count)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002139 vq_err(vq, "Loop detected: last one at %u "
2140 "indirect size %u\n",
2141 i, count);
2142 return -EINVAL;
2143 }
Al Virocbbd26b2016-11-01 22:09:04 -04002144 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002145 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002146 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002147 return -EINVAL;
2148 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002149 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002150 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002151 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002152 return -EINVAL;
2153 }
2154
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002155 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2156 access = VHOST_ACCESS_WO;
2157 else
2158 access = VHOST_ACCESS_RO;
2159
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002160 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2161 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002162 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002163 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002164 if (ret != -EAGAIN)
2165 vq_err(vq, "Translation failure %d indirect idx %d\n",
2166 ret, i);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002167 return ret;
2168 }
2169 /* If this is an input descriptor, increment that count. */
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002170 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002171 *in_num += ret;
yongduan060423b2019-09-11 17:44:24 +08002172 if (unlikely(log && ret)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002173 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2174 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002175 ++*log_num;
2176 }
2177 } else {
2178 /* If it's an output descriptor, they're all supposed
2179 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002180 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002181 vq_err(vq, "Indirect descriptor "
2182 "has out after in: idx %d\n", i);
2183 return -EINVAL;
2184 }
2185 *out_num += ret;
2186 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002187 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002188 return 0;
2189}
2190
2191/* This looks in the virtqueue and for the first available buffer, and converts
2192 * it to an iovec for convenient access. Since descriptors consist of some
2193 * number of output then some number of input descriptors, it's actually two
2194 * iovecs, but we pack them into one and note how many of each there were.
2195 *
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002196 * This function returns the descriptor number found, or vq->num (which is
2197 * never a valid descriptor number) if none was found. A negative code is
2198 * returned on error. */
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002199int vhost_get_vq_desc(struct vhost_virtqueue *vq,
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002200 struct iovec iov[], unsigned int iov_size,
2201 unsigned int *out_num, unsigned int *in_num,
2202 struct vhost_log *log, unsigned int *log_num)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002203{
2204 struct vring_desc desc;
2205 unsigned int i, head, found = 0;
2206 u16 last_avail_idx;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002207 __virtio16 avail_idx;
2208 __virtio16 ring_head;
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002209 int ret, access;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002210
2211 /* Check it isn't doing very strange things with descriptor numbers. */
2212 last_avail_idx = vq->last_avail_idx;
Jason Wange3b56cd2017-02-07 15:49:50 +08002213
2214 if (vq->avail_idx == vq->last_avail_idx) {
Jason Wang7b5d7532019-05-24 04:12:14 -04002215 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
Jason Wange3b56cd2017-02-07 15:49:50 +08002216 vq_err(vq, "Failed to access avail idx at %p\n",
2217 &vq->avail->idx);
2218 return -EFAULT;
2219 }
2220 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2221
2222 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2223 vq_err(vq, "Guest moved used index from %u to %u",
2224 last_avail_idx, vq->avail_idx);
2225 return -EFAULT;
2226 }
2227
2228 /* If there's nothing new since last we looked, return
2229 * invalid.
2230 */
2231 if (vq->avail_idx == last_avail_idx)
2232 return vq->num;
2233
2234 /* Only get avail ring entries after they have been
2235 * exposed by guest.
2236 */
2237 smp_rmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002238 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002239
2240 /* Grab the next descriptor number they're advertising, and increment
2241 * the index we've seen. */
Jason Wang7b5d7532019-05-24 04:12:14 -04002242 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002243 vq_err(vq, "Failed to read head: idx %d address %p\n",
2244 last_avail_idx,
2245 &vq->avail->ring[last_avail_idx % vq->num]);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002246 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002247 }
2248
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002249 head = vhost16_to_cpu(vq, ring_head);
2250
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002251 /* If their number is silly, that's an error. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002252 if (unlikely(head >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002253 vq_err(vq, "Guest says index %u > %u is available",
2254 head, vq->num);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002255 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002256 }
2257
2258 /* When we start there are none of either input nor output. */
2259 *out_num = *in_num = 0;
2260 if (unlikely(log))
2261 *log_num = 0;
2262
2263 i = head;
2264 do {
2265 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002266 if (unlikely(i >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002267 vq_err(vq, "Desc index is %u > %u, head = %u",
2268 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002269 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002270 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002271 if (unlikely(++found > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002272 vq_err(vq, "Loop detected: last one at %u "
2273 "vq size %u head %u\n",
2274 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002275 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002276 }
Jason Wang7b5d7532019-05-24 04:12:14 -04002277 ret = vhost_get_desc(vq, &desc, i);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002278 if (unlikely(ret)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002279 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2280 i, vq->desc + i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002281 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002282 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002283 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03002284 ret = get_indirect(vq, iov, iov_size,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002285 out_num, in_num,
2286 log, log_num, &desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002287 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002288 if (ret != -EAGAIN)
2289 vq_err(vq, "Failure detected "
2290 "in indirect descriptor at idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002291 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002292 }
2293 continue;
2294 }
2295
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002296 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2297 access = VHOST_ACCESS_WO;
2298 else
2299 access = VHOST_ACCESS_RO;
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002300 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2301 vhost32_to_cpu(vq, desc.len), iov + iov_count,
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002302 iov_size - iov_count, access);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002303 if (unlikely(ret < 0)) {
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002304 if (ret != -EAGAIN)
2305 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2306 ret, i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002307 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002308 }
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002309 if (access == VHOST_ACCESS_WO) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002310 /* If this is an input descriptor,
2311 * increment that count. */
2312 *in_num += ret;
yongduan060423b2019-09-11 17:44:24 +08002313 if (unlikely(log && ret)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002314 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2315 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002316 ++*log_num;
2317 }
2318 } else {
2319 /* If it's an output descriptor, they're all supposed
2320 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03002321 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002322 vq_err(vq, "Descriptor has out after in: "
2323 "idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03002324 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002325 }
2326 *out_num += ret;
2327 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002328 } while ((i = next_desc(vq, &desc)) != -1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002329
2330 /* On success, increment avail index. */
2331 vq->last_avail_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002332
2333 /* Assume notifications from guest are disabled at this point,
2334 * if they aren't we would need to update avail_event index. */
2335 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002336 return head;
2337}
Asias He6ac1afb2013-05-06 16:38:21 +08002338EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002339
2340/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
David Stevens8dd014a2010-07-27 18:52:21 +03002341void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002342{
David Stevens8dd014a2010-07-27 18:52:21 +03002343 vq->last_avail_idx -= n;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002344}
Asias He6ac1afb2013-05-06 16:38:21 +08002345EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002346
2347/* After we've used one of their buffers, we tell them about it. We'll then
2348 * want to notify the guest, using eventfd. */
2349int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2350{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002351 struct vring_used_elem heads = {
2352 cpu_to_vhost32(vq, head),
2353 cpu_to_vhost32(vq, len)
2354 };
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002355
Jason Wangc49e4e52013-09-02 16:40:58 +08002356 return vhost_add_used_n(vq, &heads, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002357}
Asias He6ac1afb2013-05-06 16:38:21 +08002358EXPORT_SYMBOL_GPL(vhost_add_used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002359
David Stevens8dd014a2010-07-27 18:52:21 +03002360static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2361 struct vring_used_elem *heads,
2362 unsigned count)
2363{
Michael S. Tsirkina865e422020-04-06 08:42:55 -04002364 vring_used_elem_t __user *used;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002365 u16 old, new;
David Stevens8dd014a2010-07-27 18:52:21 +03002366 int start;
2367
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002368 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002369 used = vq->used->ring + start;
Jason Wang7b5d7532019-05-24 04:12:14 -04002370 if (vhost_put_used(vq, heads, start, count)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002371 vq_err(vq, "Failed to write used");
2372 return -EFAULT;
2373 }
2374 if (unlikely(vq->log_used)) {
2375 /* Make sure data is seen before log. */
2376 smp_wmb();
2377 /* Log used ring entry write. */
Jason Wangcc5e7102019-01-16 16:54:42 +08002378 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2379 count * sizeof *used);
David Stevens8dd014a2010-07-27 18:52:21 +03002380 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002381 old = vq->last_used_idx;
2382 new = (vq->last_used_idx += count);
2383 /* If the driver never bothers to signal in a very long while,
2384 * used index might wrap around. If that happens, invalidate
2385 * signalled_used index we stored. TODO: make sure driver
2386 * signals at least once in 2^16 and remove this. */
2387 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2388 vq->signalled_used_valid = false;
David Stevens8dd014a2010-07-27 18:52:21 +03002389 return 0;
2390}
2391
2392/* After we've used one of their buffers, we tell them about it. We'll then
2393 * want to notify the guest, using eventfd. */
2394int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2395 unsigned count)
2396{
2397 int start, n, r;
2398
Michael S. Tsirkin5fba13b2015-11-29 13:34:44 +02002399 start = vq->last_used_idx & (vq->num - 1);
David Stevens8dd014a2010-07-27 18:52:21 +03002400 n = vq->num - start;
2401 if (n < count) {
2402 r = __vhost_add_used_n(vq, heads, n);
2403 if (r < 0)
2404 return r;
2405 heads += n;
2406 count -= n;
2407 }
2408 r = __vhost_add_used_n(vq, heads, count);
2409
2410 /* Make sure buffer is written before we update index. */
2411 smp_wmb();
Jason Wang7b5d7532019-05-24 04:12:14 -04002412 if (vhost_put_used_idx(vq)) {
David Stevens8dd014a2010-07-27 18:52:21 +03002413 vq_err(vq, "Failed to increment used idx");
2414 return -EFAULT;
2415 }
2416 if (unlikely(vq->log_used)) {
Jason Wang841df922018-12-13 10:53:37 +08002417 /* Make sure used idx is seen before log. */
2418 smp_wmb();
David Stevens8dd014a2010-07-27 18:52:21 +03002419 /* Log used index update. */
Jason Wangcc5e7102019-01-16 16:54:42 +08002420 log_used(vq, offsetof(struct vring_used, idx),
2421 sizeof vq->used->idx);
David Stevens8dd014a2010-07-27 18:52:21 +03002422 if (vq->log_ctx)
2423 eventfd_signal(vq->log_ctx, 1);
2424 }
2425 return r;
2426}
Asias He6ac1afb2013-05-06 16:38:21 +08002427EXPORT_SYMBOL_GPL(vhost_add_used_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002428
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002429static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002430{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002431 __u16 old, new;
2432 __virtio16 event;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002433 bool v;
Jason Wang8d658432017-07-27 11:22:05 +08002434 /* Flush out used index updates. This is paired
2435 * with the barrier that the Guest executes when enabling
2436 * interrupts. */
2437 smp_mb();
Michael S. Tsirkin0d499352010-05-11 19:44:17 +03002438
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002439 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002440 unlikely(vq->avail_idx == vq->last_avail_idx))
2441 return true;
2442
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002443 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002444 __virtio16 flags;
Jason Wang7b5d7532019-05-24 04:12:14 -04002445 if (vhost_get_avail_flags(vq, &flags)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002446 vq_err(vq, "Failed to get flags");
2447 return true;
2448 }
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002449 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002450 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002451 old = vq->signalled_used;
2452 v = vq->signalled_used_valid;
2453 new = vq->signalled_used = vq->last_used_idx;
2454 vq->signalled_used_valid = true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002455
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002456 if (unlikely(!v))
2457 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002458
Jason Wang7b5d7532019-05-24 04:12:14 -04002459 if (vhost_get_used_event(vq, &event)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002460 vq_err(vq, "Failed to get used event idx");
2461 return true;
2462 }
Jason Wang8d658432017-07-27 11:22:05 +08002463 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002464}
2465
2466/* This actually signals the guest, using eventfd. */
2467void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2468{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002469 /* Signal the Guest tell them we used something up. */
Zhu Lingshan265a0ad2020-07-31 14:55:28 +08002470 if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2471 eventfd_signal(vq->call_ctx.ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002472}
Asias He6ac1afb2013-05-06 16:38:21 +08002473EXPORT_SYMBOL_GPL(vhost_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002474
2475/* And here's the combo meal deal. Supersize me! */
2476void vhost_add_used_and_signal(struct vhost_dev *dev,
2477 struct vhost_virtqueue *vq,
2478 unsigned int head, int len)
2479{
2480 vhost_add_used(vq, head, len);
2481 vhost_signal(dev, vq);
2482}
Asias He6ac1afb2013-05-06 16:38:21 +08002483EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002484
David Stevens8dd014a2010-07-27 18:52:21 +03002485/* multi-buffer version of vhost_add_used_and_signal */
2486void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2487 struct vhost_virtqueue *vq,
2488 struct vring_used_elem *heads, unsigned count)
2489{
2490 vhost_add_used_n(vq, heads, count);
2491 vhost_signal(dev, vq);
2492}
Asias He6ac1afb2013-05-06 16:38:21 +08002493EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
David Stevens8dd014a2010-07-27 18:52:21 +03002494
Jason Wangd4a60602016-03-04 06:24:52 -05002495/* return true if we're sure that avaiable ring is empty */
2496bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2497{
2498 __virtio16 avail_idx;
2499 int r;
2500
Jason Wang275bf962017-01-18 15:02:01 +08002501 if (vq->avail_idx != vq->last_avail_idx)
Jason Wangd4a60602016-03-04 06:24:52 -05002502 return false;
2503
Jason Wang7b5d7532019-05-24 04:12:14 -04002504 r = vhost_get_avail_idx(vq, &avail_idx);
Jason Wang275bf962017-01-18 15:02:01 +08002505 if (unlikely(r))
2506 return false;
2507 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2508
2509 return vq->avail_idx == vq->last_avail_idx;
Jason Wangd4a60602016-03-04 06:24:52 -05002510}
2511EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2512
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002513/* OK, now we need to know about added descriptors. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002514bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002515{
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002516 __virtio16 avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002517 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302518
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002519 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2520 return false;
2521 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002522 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002523 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002524 if (r) {
2525 vq_err(vq, "Failed to enable notification at %p: %d\n",
2526 &vq->used->flags, r);
2527 return false;
2528 }
2529 } else {
Jason Wang2723fea2011-06-21 18:04:38 +08002530 r = vhost_update_avail_event(vq, vq->avail_idx);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002531 if (r) {
2532 vq_err(vq, "Failed to update avail event index at %p: %d\n",
2533 vhost_avail_event(vq), r);
2534 return false;
2535 }
2536 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002537 /* They could have slipped one in as we were doing that: make
2538 * sure it's written, then check again. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00002539 smp_mb();
Jason Wang7b5d7532019-05-24 04:12:14 -04002540 r = vhost_get_avail_idx(vq, &avail_idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002541 if (r) {
2542 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2543 &vq->avail->idx, r);
2544 return false;
2545 }
2546
Michael S. Tsirkin3b1bbe82014-10-24 14:04:47 +03002547 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002548}
Asias He6ac1afb2013-05-06 16:38:21 +08002549EXPORT_SYMBOL_GPL(vhost_enable_notify);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002550
2551/* We don't need to be notified again. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002552void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002553{
2554 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05302555
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002556 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2557 return;
2558 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03002559 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08002560 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002561 if (r)
Yunsheng Linae6961d2020-09-01 10:39:09 +08002562 vq_err(vq, "Failed to disable notification at %p: %d\n",
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03002563 &vq->used->flags, r);
2564 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00002565}
Asias He6ac1afb2013-05-06 16:38:21 +08002566EXPORT_SYMBOL_GPL(vhost_disable_notify);
2567
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002568/* Create a new message. */
2569struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2570{
2571 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2572 if (!node)
2573 return NULL;
Michael S. Tsirkin670ae9c2018-05-12 00:33:10 +03002574
2575 /* Make sure all padding within the structure is initialized. */
2576 memset(&node->msg, 0, sizeof node->msg);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002577 node->vq = vq;
2578 node->msg.type = type;
2579 return node;
2580}
2581EXPORT_SYMBOL_GPL(vhost_new_msg);
2582
2583void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2584 struct vhost_msg_node *node)
2585{
2586 spin_lock(&dev->iotlb_lock);
2587 list_add_tail(&node->node, head);
2588 spin_unlock(&dev->iotlb_lock);
2589
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002590 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002591}
2592EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2593
2594struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2595 struct list_head *head)
2596{
2597 struct vhost_msg_node *node = NULL;
2598
2599 spin_lock(&dev->iotlb_lock);
2600 if (!list_empty(head)) {
2601 node = list_first_entry(head, struct vhost_msg_node,
2602 node);
2603 list_del(&node->node);
2604 }
2605 spin_unlock(&dev->iotlb_lock);
2606
2607 return node;
2608}
2609EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2610
Jason Wang460f7ce2020-08-04 19:20:38 +03002611void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2612{
2613 struct vhost_virtqueue *vq;
2614 int i;
2615
2616 mutex_lock(&dev->mutex);
2617 for (i = 0; i < dev->nvqs; ++i) {
2618 vq = dev->vqs[i];
2619 mutex_lock(&vq->mutex);
2620 vq->acked_backend_features = features;
2621 mutex_unlock(&vq->mutex);
2622 }
2623 mutex_unlock(&dev->mutex);
2624}
2625EXPORT_SYMBOL_GPL(vhost_set_backend_features);
Jason Wang6b1e6cc2016-06-23 02:04:32 -04002626
Asias He6ac1afb2013-05-06 16:38:21 +08002627static int __init vhost_init(void)
2628{
2629 return 0;
2630}
2631
2632static void __exit vhost_exit(void)
2633{
2634}
2635
2636module_init(vhost_init);
2637module_exit(vhost_exit);
2638
2639MODULE_VERSION("0.0.1");
2640MODULE_LICENSE("GPL v2");
2641MODULE_AUTHOR("Michael S. Tsirkin");
2642MODULE_DESCRIPTION("Host kernel accelerator for virtio");