blob: ae66278e4dcf225278cbfed95b68452c7657b8a4 [file] [log] [blame]
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001/* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
3 *
4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 *
6 * Inspiration, some code, and most witty comments come from
Rob Landley61516582011-05-06 09:27:36 -07007 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00008 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 *
11 * Generic code for virtio server in host kernel.
12 */
13
14#include <linux/eventfd.h>
15#include <linux/vhost.h>
16#include <linux/virtio_net.h>
17#include <linux/mm.h>
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +020018#include <linux/mmu_context.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000019#include <linux/miscdevice.h>
20#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000021#include <linux/rcupdate.h>
22#include <linux/poll.h>
23#include <linux/file.h>
24#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Tejun Heoc23f34452010-06-02 20:40:00 +020026#include <linux/kthread.h>
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +030027#include <linux/cgroup.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000028
29#include <linux/net.h>
30#include <linux/if_packet.h>
31#include <linux/if_arp.h>
32
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000033#include "vhost.h"
34
35enum {
36 VHOST_MEMORY_MAX_NREGIONS = 64,
37 VHOST_MEMORY_F_LOG = 0x1,
38};
39
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +000040static unsigned vhost_zcopy_mask __read_mostly;
41
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030042#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
43#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
44
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000045static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
46 poll_table *pt)
47{
48 struct vhost_poll *poll;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000049
Krishna Kumard47effe2011-03-01 17:06:37 +053050 poll = container_of(pt, struct vhost_poll, table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000051 poll->wqh = wqh;
52 add_wait_queue(wqh, &poll->wait);
53}
54
55static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
56 void *key)
57{
Tejun Heoc23f34452010-06-02 20:40:00 +020058 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
59
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000060 if (!((unsigned long)key & poll->mask))
61 return 0;
62
Tejun Heoc23f34452010-06-02 20:40:00 +020063 vhost_poll_queue(poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000064 return 0;
65}
66
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +030067static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000068{
Tejun Heoc23f34452010-06-02 20:40:00 +020069 INIT_LIST_HEAD(&work->node);
70 work->fn = fn;
71 init_waitqueue_head(&work->done);
72 work->flushing = 0;
73 work->queue_seq = work->done_seq = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000074}
75
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +030076/* Init poll structure */
77void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
78 unsigned long mask, struct vhost_dev *dev)
79{
80 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
81 init_poll_funcptr(&poll->table, vhost_poll_func);
82 poll->mask = mask;
83 poll->dev = dev;
84
85 vhost_work_init(&poll->work, fn);
86}
87
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000088/* Start polling a file. We add ourselves to file's wait queue. The caller must
89 * keep a reference to a file until after vhost_poll_stop is called. */
90void vhost_poll_start(struct vhost_poll *poll, struct file *file)
91{
92 unsigned long mask;
Krishna Kumard47effe2011-03-01 17:06:37 +053093
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000094 mask = file->f_op->poll(file, &poll->table);
95 if (mask)
96 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
97}
98
99/* Stop polling a file. After this function returns, it becomes safe to drop the
100 * file reference. You must also flush afterwards. */
101void vhost_poll_stop(struct vhost_poll *poll)
102{
103 remove_wait_queue(poll->wqh, &poll->wait);
104}
105
Michael S. Tsirkin0174b0c2011-01-10 10:03:20 +0200106static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
107 unsigned seq)
108{
109 int left;
Krishna Kumard47effe2011-03-01 17:06:37 +0530110
Michael S. Tsirkin0174b0c2011-01-10 10:03:20 +0200111 spin_lock_irq(&dev->work_lock);
112 left = seq - work->done_seq;
113 spin_unlock_irq(&dev->work_lock);
114 return left <= 0;
115}
116
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300117static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000118{
Tejun Heoc23f34452010-06-02 20:40:00 +0200119 unsigned seq;
Tejun Heoc23f34452010-06-02 20:40:00 +0200120 int flushing;
121
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300122 spin_lock_irq(&dev->work_lock);
Tejun Heoc23f34452010-06-02 20:40:00 +0200123 seq = work->queue_seq;
124 work->flushing++;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300125 spin_unlock_irq(&dev->work_lock);
Michael S. Tsirkin0174b0c2011-01-10 10:03:20 +0200126 wait_event(work->done, vhost_work_seq_done(dev, work, seq));
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300127 spin_lock_irq(&dev->work_lock);
Tejun Heoc23f34452010-06-02 20:40:00 +0200128 flushing = --work->flushing;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300129 spin_unlock_irq(&dev->work_lock);
Tejun Heoc23f34452010-06-02 20:40:00 +0200130 BUG_ON(flushing < 0);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000131}
132
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300133/* Flush any work that has been scheduled. When calling this, don't hold any
134 * locks that are also used by the callback. */
135void vhost_poll_flush(struct vhost_poll *poll)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000136{
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300137 vhost_work_flush(poll->dev, &poll->work);
138}
139
140static inline void vhost_work_queue(struct vhost_dev *dev,
141 struct vhost_work *work)
142{
Tejun Heoc23f34452010-06-02 20:40:00 +0200143 unsigned long flags;
144
145 spin_lock_irqsave(&dev->work_lock, flags);
146 if (list_empty(&work->node)) {
147 list_add_tail(&work->node, &dev->work_list);
148 work->queue_seq++;
149 wake_up_process(dev->worker);
150 }
151 spin_unlock_irqrestore(&dev->work_lock, flags);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000152}
153
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300154void vhost_poll_queue(struct vhost_poll *poll)
155{
156 vhost_work_queue(poll->dev, &poll->work);
157}
158
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000159static void vhost_vq_reset(struct vhost_dev *dev,
160 struct vhost_virtqueue *vq)
161{
162 vq->num = 1;
163 vq->desc = NULL;
164 vq->avail = NULL;
165 vq->used = NULL;
166 vq->last_avail_idx = 0;
167 vq->avail_idx = 0;
168 vq->last_used_idx = 0;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300169 vq->signalled_used = 0;
170 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000171 vq->used_flags = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000172 vq->log_used = false;
173 vq->log_addr = -1ull;
David Stevens8dd014a2010-07-27 18:52:21 +0300174 vq->vhost_hlen = 0;
175 vq->sock_hlen = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000176 vq->private_data = NULL;
177 vq->log_base = NULL;
178 vq->error_ctx = NULL;
179 vq->error = NULL;
180 vq->kick = NULL;
181 vq->call_ctx = NULL;
182 vq->call = NULL;
Michael S. Tsirkin73a99f02010-02-23 11:23:45 +0200183 vq->log_ctx = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000184 vq->upend_idx = 0;
185 vq->done_idx = 0;
186 vq->ubufs = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000187}
188
Tejun Heoc23f34452010-06-02 20:40:00 +0200189static int vhost_worker(void *data)
190{
191 struct vhost_dev *dev = data;
192 struct vhost_work *work = NULL;
193 unsigned uninitialized_var(seq);
194
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200195 use_mm(dev->mm);
196
Tejun Heoc23f34452010-06-02 20:40:00 +0200197 for (;;) {
198 /* mb paired w/ kthread_stop */
199 set_current_state(TASK_INTERRUPTIBLE);
200
201 spin_lock_irq(&dev->work_lock);
202 if (work) {
203 work->done_seq = seq;
204 if (work->flushing)
205 wake_up_all(&work->done);
206 }
207
208 if (kthread_should_stop()) {
209 spin_unlock_irq(&dev->work_lock);
210 __set_current_state(TASK_RUNNING);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200211 break;
Tejun Heoc23f34452010-06-02 20:40:00 +0200212 }
213 if (!list_empty(&dev->work_list)) {
214 work = list_first_entry(&dev->work_list,
215 struct vhost_work, node);
216 list_del_init(&work->node);
217 seq = work->queue_seq;
218 } else
219 work = NULL;
220 spin_unlock_irq(&dev->work_lock);
221
222 if (work) {
223 __set_current_state(TASK_RUNNING);
224 work->fn(work);
Nadav Har'Eld550dda2012-02-27 15:07:29 +0200225 if (need_resched())
226 schedule();
Tejun Heoc23f34452010-06-02 20:40:00 +0200227 } else
228 schedule();
229
230 }
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200231 unuse_mm(dev->mm);
232 return 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200233}
234
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000235static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
236{
237 kfree(vq->indirect);
238 vq->indirect = NULL;
239 kfree(vq->log);
240 vq->log = NULL;
241 kfree(vq->heads);
242 vq->heads = NULL;
243 kfree(vq->ubuf_info);
244 vq->ubuf_info = NULL;
245}
246
247void vhost_enable_zcopy(int vq)
248{
249 vhost_zcopy_mask |= 0x1 << vq;
250}
251
Jason Wange0e9b402010-09-14 23:53:05 +0800252/* Helper to allocate iovec buffers for all vqs. */
253static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
254{
255 int i;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000256 bool zcopy;
Krishna Kumard47effe2011-03-01 17:06:37 +0530257
Jason Wange0e9b402010-09-14 23:53:05 +0800258 for (i = 0; i < dev->nvqs; ++i) {
259 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
260 UIO_MAXIOV, GFP_KERNEL);
261 dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
262 GFP_KERNEL);
263 dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
264 UIO_MAXIOV, GFP_KERNEL);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000265 zcopy = vhost_zcopy_mask & (0x1 << i);
266 if (zcopy)
267 dev->vqs[i].ubuf_info =
268 kmalloc(sizeof *dev->vqs[i].ubuf_info *
269 UIO_MAXIOV, GFP_KERNEL);
Jason Wange0e9b402010-09-14 23:53:05 +0800270 if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000271 !dev->vqs[i].heads ||
272 (zcopy && !dev->vqs[i].ubuf_info))
Jason Wange0e9b402010-09-14 23:53:05 +0800273 goto err_nomem;
274 }
275 return 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530276
Jason Wange0e9b402010-09-14 23:53:05 +0800277err_nomem:
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000278 for (; i >= 0; --i)
279 vhost_vq_free_iovecs(&dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800280 return -ENOMEM;
281}
282
283static void vhost_dev_free_iovecs(struct vhost_dev *dev)
284{
285 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530286
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000287 for (i = 0; i < dev->nvqs; ++i)
288 vhost_vq_free_iovecs(&dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800289}
290
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000291long vhost_dev_init(struct vhost_dev *dev,
292 struct vhost_virtqueue *vqs, int nvqs)
293{
294 int i;
Tejun Heoc23f34452010-06-02 20:40:00 +0200295
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000296 dev->vqs = vqs;
297 dev->nvqs = nvqs;
298 mutex_init(&dev->mutex);
299 dev->log_ctx = NULL;
300 dev->log_file = NULL;
301 dev->memory = NULL;
302 dev->mm = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200303 spin_lock_init(&dev->work_lock);
304 INIT_LIST_HEAD(&dev->work_list);
305 dev->worker = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000306
307 for (i = 0; i < dev->nvqs; ++i) {
Jason Wange0e9b402010-09-14 23:53:05 +0800308 dev->vqs[i].log = NULL;
309 dev->vqs[i].indirect = NULL;
310 dev->vqs[i].heads = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000311 dev->vqs[i].ubuf_info = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000312 dev->vqs[i].dev = dev;
313 mutex_init(&dev->vqs[i].mutex);
314 vhost_vq_reset(dev, dev->vqs + i);
315 if (dev->vqs[i].handle_kick)
316 vhost_poll_init(&dev->vqs[i].poll,
Tejun Heoc23f34452010-06-02 20:40:00 +0200317 dev->vqs[i].handle_kick, POLLIN, dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000318 }
Tejun Heoc23f34452010-06-02 20:40:00 +0200319
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000320 return 0;
321}
322
323/* Caller should have device mutex */
324long vhost_dev_check_owner(struct vhost_dev *dev)
325{
326 /* Are you the owner? If not, I don't think you mean to do that */
327 return dev->mm == current->mm ? 0 : -EPERM;
328}
329
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300330struct vhost_attach_cgroups_struct {
Krishna Kumard47effe2011-03-01 17:06:37 +0530331 struct vhost_work work;
332 struct task_struct *owner;
333 int ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300334};
335
336static void vhost_attach_cgroups_work(struct vhost_work *work)
337{
Krishna Kumard47effe2011-03-01 17:06:37 +0530338 struct vhost_attach_cgroups_struct *s;
339
340 s = container_of(work, struct vhost_attach_cgroups_struct, work);
341 s->ret = cgroup_attach_task_all(s->owner, current);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300342}
343
344static int vhost_attach_cgroups(struct vhost_dev *dev)
345{
Krishna Kumard47effe2011-03-01 17:06:37 +0530346 struct vhost_attach_cgroups_struct attach;
347
348 attach.owner = current;
349 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
350 vhost_work_queue(dev, &attach.work);
351 vhost_work_flush(dev, &attach.work);
352 return attach.ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300353}
354
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000355/* Caller should have device mutex */
356static long vhost_dev_set_owner(struct vhost_dev *dev)
357{
Tejun Heoc23f34452010-06-02 20:40:00 +0200358 struct task_struct *worker;
359 int err;
Krishna Kumard47effe2011-03-01 17:06:37 +0530360
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000361 /* Is there an owner already? */
Tejun Heoc23f34452010-06-02 20:40:00 +0200362 if (dev->mm) {
363 err = -EBUSY;
364 goto err_mm;
365 }
Krishna Kumard47effe2011-03-01 17:06:37 +0530366
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000367 /* No owner, become one */
368 dev->mm = get_task_mm(current);
Tejun Heoc23f34452010-06-02 20:40:00 +0200369 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
370 if (IS_ERR(worker)) {
371 err = PTR_ERR(worker);
372 goto err_worker;
373 }
374
375 dev->worker = worker;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300376 wake_up_process(worker); /* avoid contributing to loadavg */
377
378 err = vhost_attach_cgroups(dev);
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300379 if (err)
380 goto err_cgroup;
Tejun Heoc23f34452010-06-02 20:40:00 +0200381
Jason Wange0e9b402010-09-14 23:53:05 +0800382 err = vhost_dev_alloc_iovecs(dev);
383 if (err)
384 goto err_cgroup;
385
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000386 return 0;
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300387err_cgroup:
388 kthread_stop(worker);
Michael S. Tsirkin615cc222010-09-02 14:16:36 +0300389 dev->worker = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200390err_worker:
391 if (dev->mm)
392 mmput(dev->mm);
393 dev->mm = NULL;
394err_mm:
395 return err;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000396}
397
398/* Caller should have device mutex */
399long vhost_dev_reset_owner(struct vhost_dev *dev)
400{
401 struct vhost_memory *memory;
402
403 /* Restore memory to default empty mapping. */
404 memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
405 if (!memory)
406 return -ENOMEM;
407
408 vhost_dev_cleanup(dev);
409
410 memory->nregions = 0;
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100411 RCU_INIT_POINTER(dev->memory, memory);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000412 return 0;
413}
414
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000415/* In case of DMA done not in order in lower device driver for some reason.
416 * upend_idx is used to track end of used idx, done_idx is used to track head
417 * of used idx. Once lower device DMA done contiguously, we will signal KVM
418 * guest used idx.
419 */
420int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
421{
422 int i;
423 int j = 0;
424
425 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
426 if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
427 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
428 vhost_add_used_and_signal(vq->dev, vq,
429 vq->heads[i].id, 0);
430 ++j;
431 } else
432 break;
433 }
434 if (j)
435 vq->done_idx = i;
436 return j;
437}
438
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000439/* Caller should have device mutex */
440void vhost_dev_cleanup(struct vhost_dev *dev)
441{
442 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530443
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000444 for (i = 0; i < dev->nvqs; ++i) {
445 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
446 vhost_poll_stop(&dev->vqs[i].poll);
447 vhost_poll_flush(&dev->vqs[i].poll);
448 }
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000449 /* Wait for all lower device DMAs done. */
450 if (dev->vqs[i].ubufs)
451 vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
452
453 /* Signal guest as appropriate. */
454 vhost_zerocopy_signal_used(&dev->vqs[i]);
455
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000456 if (dev->vqs[i].error_ctx)
457 eventfd_ctx_put(dev->vqs[i].error_ctx);
458 if (dev->vqs[i].error)
459 fput(dev->vqs[i].error);
460 if (dev->vqs[i].kick)
461 fput(dev->vqs[i].kick);
462 if (dev->vqs[i].call_ctx)
463 eventfd_ctx_put(dev->vqs[i].call_ctx);
464 if (dev->vqs[i].call)
465 fput(dev->vqs[i].call);
466 vhost_vq_reset(dev, dev->vqs + i);
467 }
Jason Wange0e9b402010-09-14 23:53:05 +0800468 vhost_dev_free_iovecs(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000469 if (dev->log_ctx)
470 eventfd_ctx_put(dev->log_ctx);
471 dev->log_ctx = NULL;
472 if (dev->log_file)
473 fput(dev->log_file);
474 dev->log_file = NULL;
475 /* No one will access memory at this point */
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100476 kfree(rcu_dereference_protected(dev->memory,
477 lockdep_is_held(&dev->mutex)));
478 RCU_INIT_POINTER(dev->memory, NULL);
Tejun Heoc23f34452010-06-02 20:40:00 +0200479 WARN_ON(!list_empty(&dev->work_list));
Eric Dumazet78b620c2010-08-31 02:05:57 +0000480 if (dev->worker) {
481 kthread_stop(dev->worker);
482 dev->worker = NULL;
483 }
Michael S. Tsirkin533a19b2010-10-06 15:34:38 +0200484 if (dev->mm)
485 mmput(dev->mm);
486 dev->mm = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000487}
488
489static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
490{
491 u64 a = addr / VHOST_PAGE_SIZE / 8;
Krishna Kumard47effe2011-03-01 17:06:37 +0530492
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000493 /* Make sure 64 bit math will not overflow. */
494 if (a > ULONG_MAX - (unsigned long)log_base ||
495 a + (unsigned long)log_base > ULONG_MAX)
Dan Carpenter6d97e552010-10-11 19:24:19 +0200496 return 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000497
498 return access_ok(VERIFY_WRITE, log_base + a,
499 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
500}
501
502/* Caller should have vq mutex and device mutex. */
503static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
504 int log_all)
505{
506 int i;
Jeff Dike179b2842010-04-07 09:59:10 -0400507
Michael S. Tsirkinf8322fb2010-05-27 12:28:03 +0300508 if (!mem)
509 return 0;
Jeff Dike179b2842010-04-07 09:59:10 -0400510
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000511 for (i = 0; i < mem->nregions; ++i) {
512 struct vhost_memory_region *m = mem->regions + i;
513 unsigned long a = m->userspace_addr;
514 if (m->memory_size > ULONG_MAX)
515 return 0;
516 else if (!access_ok(VERIFY_WRITE, (void __user *)a,
517 m->memory_size))
518 return 0;
519 else if (log_all && !log_access_ok(log_base,
520 m->guest_phys_addr,
521 m->memory_size))
522 return 0;
523 }
524 return 1;
525}
526
527/* Can we switch to this memory table? */
528/* Caller should have device mutex but not vq mutex */
529static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
530 int log_all)
531{
532 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530533
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000534 for (i = 0; i < d->nvqs; ++i) {
535 int ok;
536 mutex_lock(&d->vqs[i].mutex);
537 /* If ring is inactive, will check when it's enabled. */
538 if (d->vqs[i].private_data)
539 ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
540 log_all);
541 else
542 ok = 1;
543 mutex_unlock(&d->vqs[i].mutex);
544 if (!ok)
545 return 0;
546 }
547 return 1;
548}
549
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300550static int vq_access_ok(struct vhost_dev *d, unsigned int num,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000551 struct vring_desc __user *desc,
552 struct vring_avail __user *avail,
553 struct vring_used __user *used)
554{
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300555 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000556 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
557 access_ok(VERIFY_READ, avail,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300558 sizeof *avail + num * sizeof *avail->ring + s) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000559 access_ok(VERIFY_WRITE, used,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300560 sizeof *used + num * sizeof *used->ring + s);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000561}
562
563/* Can we log writes? */
564/* Caller should have device mutex but not vq mutex */
565int vhost_log_access_ok(struct vhost_dev *dev)
566{
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100567 struct vhost_memory *mp;
568
569 mp = rcu_dereference_protected(dev->memory,
570 lockdep_is_held(&dev->mutex));
571 return memory_access_ok(dev, mp, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000572}
573
574/* Verify access for write logging. */
575/* Caller should have vq mutex and device mutex */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300576static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
577 void __user *log_base)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000578{
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100579 struct vhost_memory *mp;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300580 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100581
582 mp = rcu_dereference_protected(vq->dev->memory,
583 lockdep_is_held(&vq->mutex));
584 return vq_memory_access_ok(log_base, mp,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000585 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
586 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
587 sizeof *vq->used +
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300588 vq->num * sizeof *vq->used->ring + s));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000589}
590
591/* Can we start vq? */
592/* Caller should have vq mutex and device mutex */
593int vhost_vq_access_ok(struct vhost_virtqueue *vq)
594{
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300595 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
596 vq_log_access_ok(vq->dev, vq, vq->log_base);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000597}
598
599static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
600{
601 struct vhost_memory mem, *newmem, *oldmem;
602 unsigned long size = offsetof(struct vhost_memory, regions);
Krishna Kumard47effe2011-03-01 17:06:37 +0530603
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900604 if (copy_from_user(&mem, m, size))
605 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000606 if (mem.padding)
607 return -EOPNOTSUPP;
608 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
609 return -E2BIG;
610 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
611 if (!newmem)
612 return -ENOMEM;
613
614 memcpy(newmem, &mem, size);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900615 if (copy_from_user(newmem->regions, m->regions,
616 mem.nregions * sizeof *m->regions)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000617 kfree(newmem);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900618 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000619 }
620
Krishna Kumard47effe2011-03-01 17:06:37 +0530621 if (!memory_access_ok(d, newmem,
622 vhost_has_feature(d, VHOST_F_LOG_ALL))) {
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +0900623 kfree(newmem);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000624 return -EFAULT;
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +0900625 }
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100626 oldmem = rcu_dereference_protected(d->memory,
627 lockdep_is_held(&d->mutex));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000628 rcu_assign_pointer(d->memory, newmem);
629 synchronize_rcu();
630 kfree(oldmem);
631 return 0;
632}
633
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000634static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
635{
636 struct file *eventfp, *filep = NULL,
637 *pollstart = NULL, *pollstop = NULL;
638 struct eventfd_ctx *ctx = NULL;
639 u32 __user *idxp = argp;
640 struct vhost_virtqueue *vq;
641 struct vhost_vring_state s;
642 struct vhost_vring_file f;
643 struct vhost_vring_addr a;
644 u32 idx;
645 long r;
646
647 r = get_user(idx, idxp);
648 if (r < 0)
649 return r;
Krishna Kumar0f3d9a12010-05-25 11:10:36 +0530650 if (idx >= d->nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000651 return -ENOBUFS;
652
653 vq = d->vqs + idx;
654
655 mutex_lock(&vq->mutex);
656
657 switch (ioctl) {
658 case VHOST_SET_VRING_NUM:
659 /* Resizing ring with an active backend?
660 * You don't want to do that. */
661 if (vq->private_data) {
662 r = -EBUSY;
663 break;
664 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900665 if (copy_from_user(&s, argp, sizeof s)) {
666 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000667 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900668 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000669 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
670 r = -EINVAL;
671 break;
672 }
673 vq->num = s.num;
674 break;
675 case VHOST_SET_VRING_BASE:
676 /* Moving base with an active backend?
677 * You don't want to do that. */
678 if (vq->private_data) {
679 r = -EBUSY;
680 break;
681 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900682 if (copy_from_user(&s, argp, sizeof s)) {
683 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000684 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900685 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000686 if (s.num > 0xffff) {
687 r = -EINVAL;
688 break;
689 }
690 vq->last_avail_idx = s.num;
691 /* Forget the cached index value. */
692 vq->avail_idx = vq->last_avail_idx;
693 break;
694 case VHOST_GET_VRING_BASE:
695 s.index = idx;
696 s.num = vq->last_avail_idx;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900697 if (copy_to_user(argp, &s, sizeof s))
698 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000699 break;
700 case VHOST_SET_VRING_ADDR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900701 if (copy_from_user(&a, argp, sizeof a)) {
702 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000703 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900704 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000705 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
706 r = -EOPNOTSUPP;
707 break;
708 }
709 /* For 32bit, verify that the top 32bits of the user
710 data are set to zero. */
711 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
712 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
713 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
714 r = -EFAULT;
715 break;
716 }
717 if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
718 (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
719 (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
720 r = -EINVAL;
721 break;
722 }
723
724 /* We only verify access here if backend is configured.
725 * If it is not, we don't as size might not have been setup.
726 * We will verify when backend is configured. */
727 if (vq->private_data) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300728 if (!vq_access_ok(d, vq->num,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000729 (void __user *)(unsigned long)a.desc_user_addr,
730 (void __user *)(unsigned long)a.avail_user_addr,
731 (void __user *)(unsigned long)a.used_user_addr)) {
732 r = -EINVAL;
733 break;
734 }
735
736 /* Also validate log access for used ring if enabled. */
737 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
738 !log_access_ok(vq->log_base, a.log_guest_addr,
739 sizeof *vq->used +
740 vq->num * sizeof *vq->used->ring)) {
741 r = -EINVAL;
742 break;
743 }
744 }
745
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000746 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
747 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
748 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
749 vq->log_addr = a.log_guest_addr;
750 vq->used = (void __user *)(unsigned long)a.used_user_addr;
751 break;
752 case VHOST_SET_VRING_KICK:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900753 if (copy_from_user(&f, argp, sizeof f)) {
754 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000755 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900756 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000757 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +0200758 if (IS_ERR(eventfp)) {
759 r = PTR_ERR(eventfp);
760 break;
761 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000762 if (eventfp != vq->kick) {
763 pollstop = filep = vq->kick;
764 pollstart = vq->kick = eventfp;
765 } else
766 filep = eventfp;
767 break;
768 case VHOST_SET_VRING_CALL:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900769 if (copy_from_user(&f, argp, sizeof f)) {
770 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000771 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900772 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000773 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +0200774 if (IS_ERR(eventfp)) {
775 r = PTR_ERR(eventfp);
776 break;
777 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000778 if (eventfp != vq->call) {
779 filep = vq->call;
780 ctx = vq->call_ctx;
781 vq->call = eventfp;
782 vq->call_ctx = eventfp ?
783 eventfd_ctx_fileget(eventfp) : NULL;
784 } else
785 filep = eventfp;
786 break;
787 case VHOST_SET_VRING_ERR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900788 if (copy_from_user(&f, argp, sizeof f)) {
789 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000790 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900791 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000792 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +0200793 if (IS_ERR(eventfp)) {
794 r = PTR_ERR(eventfp);
795 break;
796 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000797 if (eventfp != vq->error) {
798 filep = vq->error;
799 vq->error = eventfp;
800 ctx = vq->error_ctx;
801 vq->error_ctx = eventfp ?
802 eventfd_ctx_fileget(eventfp) : NULL;
803 } else
804 filep = eventfp;
805 break;
806 default:
807 r = -ENOIOCTLCMD;
808 }
809
810 if (pollstop && vq->handle_kick)
811 vhost_poll_stop(&vq->poll);
812
813 if (ctx)
814 eventfd_ctx_put(ctx);
815 if (filep)
816 fput(filep);
817
818 if (pollstart && vq->handle_kick)
819 vhost_poll_start(&vq->poll, vq->kick);
820
821 mutex_unlock(&vq->mutex);
822
823 if (pollstop && vq->handle_kick)
824 vhost_poll_flush(&vq->poll);
825 return r;
826}
827
828/* Caller must have device mutex */
829long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
830{
831 void __user *argp = (void __user *)arg;
832 struct file *eventfp, *filep = NULL;
833 struct eventfd_ctx *ctx = NULL;
834 u64 p;
835 long r;
836 int i, fd;
837
838 /* If you are not the owner, you can become one */
839 if (ioctl == VHOST_SET_OWNER) {
840 r = vhost_dev_set_owner(d);
841 goto done;
842 }
843
844 /* You must be the owner to do anything else */
845 r = vhost_dev_check_owner(d);
846 if (r)
847 goto done;
848
849 switch (ioctl) {
850 case VHOST_SET_MEM_TABLE:
851 r = vhost_set_memory(d, argp);
852 break;
853 case VHOST_SET_LOG_BASE:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900854 if (copy_from_user(&p, argp, sizeof p)) {
855 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000856 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900857 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000858 if ((u64)(unsigned long)p != p) {
859 r = -EFAULT;
860 break;
861 }
862 for (i = 0; i < d->nvqs; ++i) {
863 struct vhost_virtqueue *vq;
864 void __user *base = (void __user *)(unsigned long)p;
865 vq = d->vqs + i;
866 mutex_lock(&vq->mutex);
867 /* If ring is inactive, will check when it's enabled. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300868 if (vq->private_data && !vq_log_access_ok(d, vq, base))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000869 r = -EFAULT;
870 else
871 vq->log_base = base;
872 mutex_unlock(&vq->mutex);
873 }
874 break;
875 case VHOST_SET_LOG_FD:
876 r = get_user(fd, (int __user *)argp);
877 if (r < 0)
878 break;
879 eventfp = fd == -1 ? NULL : eventfd_fget(fd);
880 if (IS_ERR(eventfp)) {
881 r = PTR_ERR(eventfp);
882 break;
883 }
884 if (eventfp != d->log_file) {
885 filep = d->log_file;
886 ctx = d->log_ctx;
887 d->log_ctx = eventfp ?
888 eventfd_ctx_fileget(eventfp) : NULL;
889 } else
890 filep = eventfp;
891 for (i = 0; i < d->nvqs; ++i) {
892 mutex_lock(&d->vqs[i].mutex);
893 d->vqs[i].log_ctx = d->log_ctx;
894 mutex_unlock(&d->vqs[i].mutex);
895 }
896 if (ctx)
897 eventfd_ctx_put(ctx);
898 if (filep)
899 fput(filep);
900 break;
901 default:
902 r = vhost_set_vring(d, ioctl, argp);
903 break;
904 }
905done:
906 return r;
907}
908
909static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
910 __u64 addr, __u32 len)
911{
912 struct vhost_memory_region *reg;
913 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530914
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000915 /* linear search is not brilliant, but we really have on the order of 6
916 * regions in practice */
917 for (i = 0; i < mem->nregions; ++i) {
918 reg = mem->regions + i;
919 if (reg->guest_phys_addr <= addr &&
920 reg->guest_phys_addr + reg->memory_size - 1 >= addr)
921 return reg;
922 }
923 return NULL;
924}
925
926/* TODO: This is really inefficient. We need something like get_user()
927 * (instruction directly accesses the data, with an exception table entry
928 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
929 */
930static int set_bit_to_user(int nr, void __user *addr)
931{
932 unsigned long log = (unsigned long)addr;
933 struct page *page;
934 void *base;
935 int bit = nr + (log % PAGE_SIZE) * 8;
936 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +0530937
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000938 r = get_user_pages_fast(log, 1, 1, &page);
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +0200939 if (r < 0)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000940 return r;
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +0200941 BUG_ON(r != 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000942 base = kmap_atomic(page, KM_USER0);
943 set_bit(bit, base);
944 kunmap_atomic(base, KM_USER0);
945 set_page_dirty_lock(page);
946 put_page(page);
947 return 0;
948}
949
950static int log_write(void __user *log_base,
951 u64 write_address, u64 write_length)
952{
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +0200953 u64 write_page = write_address / VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000954 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +0530955
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000956 if (!write_length)
957 return 0;
Michael S. Tsirkin3bf9be42010-11-29 10:19:07 +0200958 write_length += write_address % VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000959 for (;;) {
960 u64 base = (u64)(unsigned long)log_base;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +0200961 u64 log = base + write_page / 8;
962 int bit = write_page % 8;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000963 if ((u64)(unsigned long)log != log)
964 return -EFAULT;
965 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
966 if (r < 0)
967 return r;
968 if (write_length <= VHOST_PAGE_SIZE)
969 break;
970 write_length -= VHOST_PAGE_SIZE;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +0200971 write_page += 1;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000972 }
973 return r;
974}
975
976int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
977 unsigned int log_num, u64 len)
978{
979 int i, r;
980
981 /* Make sure data written is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +0000982 smp_wmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000983 for (i = 0; i < log_num; ++i) {
984 u64 l = min(log[i].len, len);
985 r = log_write(vq->log_base, log[i].addr, l);
986 if (r < 0)
987 return r;
988 len -= l;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +0200989 if (!len) {
990 if (vq->log_ctx)
991 eventfd_signal(vq->log_ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000992 return 0;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +0200993 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000994 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000995 /* Length written exceeds what we have stored. This is a bug. */
996 BUG();
997 return 0;
998}
999
Jason Wang2723fea2011-06-21 18:04:38 +08001000static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1001{
1002 void __user *used;
Michael S. Tsirkinb8342262011-07-19 17:15:43 +03001003 if (__put_user(vq->used_flags, &vq->used->flags) < 0)
Jason Wang2723fea2011-06-21 18:04:38 +08001004 return -EFAULT;
1005 if (unlikely(vq->log_used)) {
1006 /* Make sure the flag is seen before log. */
1007 smp_wmb();
1008 /* Log used flag write. */
1009 used = &vq->used->flags;
1010 log_write(vq->log_base, vq->log_addr +
1011 (used - (void __user *)vq->used),
1012 sizeof vq->used->flags);
1013 if (vq->log_ctx)
1014 eventfd_signal(vq->log_ctx, 1);
1015 }
1016 return 0;
1017}
1018
1019static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1020{
Michael S. Tsirkinb8342262011-07-19 17:15:43 +03001021 if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
Jason Wang2723fea2011-06-21 18:04:38 +08001022 return -EFAULT;
1023 if (unlikely(vq->log_used)) {
1024 void __user *used;
1025 /* Make sure the event is seen before log. */
1026 smp_wmb();
1027 /* Log avail event write */
1028 used = vhost_avail_event(vq);
1029 log_write(vq->log_base, vq->log_addr +
1030 (used - (void __user *)vq->used),
1031 sizeof *vhost_avail_event(vq));
1032 if (vq->log_ctx)
1033 eventfd_signal(vq->log_ctx, 1);
1034 }
1035 return 0;
1036}
1037
1038int vhost_init_used(struct vhost_virtqueue *vq)
1039{
1040 int r;
1041 if (!vq->private_data)
1042 return 0;
1043
1044 r = vhost_update_used_flags(vq);
1045 if (r)
1046 return r;
1047 vq->signalled_used_valid = false;
1048 return get_user(vq->last_used_idx, &vq->used->idx);
1049}
1050
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001051static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1052 struct iovec iov[], int iov_size)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001053{
1054 const struct vhost_memory_region *reg;
1055 struct vhost_memory *mem;
1056 struct iovec *_iov;
1057 u64 s = 0;
1058 int ret = 0;
1059
1060 rcu_read_lock();
1061
1062 mem = rcu_dereference(dev->memory);
1063 while ((u64)len > s) {
1064 u64 size;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001065 if (unlikely(ret >= iov_size)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001066 ret = -ENOBUFS;
1067 break;
1068 }
1069 reg = find_region(mem, addr, len);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001070 if (unlikely(!reg)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001071 ret = -EFAULT;
1072 break;
1073 }
1074 _iov = iov + ret;
1075 size = reg->memory_size - addr + reg->guest_phys_addr;
1076 _iov->iov_len = min((u64)len, size);
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001077 _iov->iov_base = (void __user *)(unsigned long)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001078 (reg->userspace_addr + addr - reg->guest_phys_addr);
1079 s += size;
1080 addr += size;
1081 ++ret;
1082 }
1083
1084 rcu_read_unlock();
1085 return ret;
1086}
1087
1088/* Each buffer in the virtqueues is actually a chain of descriptors. This
1089 * function returns the next descriptor in the chain,
1090 * or -1U if we're at the end. */
1091static unsigned next_desc(struct vring_desc *desc)
1092{
1093 unsigned int next;
1094
1095 /* If this descriptor says it doesn't chain, we're done. */
1096 if (!(desc->flags & VRING_DESC_F_NEXT))
1097 return -1U;
1098
1099 /* Check they're not leading us off end of descriptors. */
1100 next = desc->next;
1101 /* Make sure compiler knows to grab that: we don't want it changing! */
1102 /* We will use the result as an index in an array, so most
1103 * architectures only need a compiler barrier here. */
1104 read_barrier_depends();
1105
1106 return next;
1107}
1108
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001109static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1110 struct iovec iov[], unsigned int iov_size,
1111 unsigned int *out_num, unsigned int *in_num,
1112 struct vhost_log *log, unsigned int *log_num,
1113 struct vring_desc *indirect)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001114{
1115 struct vring_desc desc;
1116 unsigned int i = 0, count, found = 0;
1117 int ret;
1118
1119 /* Sanity check */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001120 if (unlikely(indirect->len % sizeof desc)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001121 vq_err(vq, "Invalid length in indirect descriptor: "
1122 "len 0x%llx not multiple of 0x%zx\n",
1123 (unsigned long long)indirect->len,
1124 sizeof desc);
1125 return -EINVAL;
1126 }
1127
1128 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
Jason Wange0e9b402010-09-14 23:53:05 +08001129 UIO_MAXIOV);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001130 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001131 vq_err(vq, "Translation failure %d in indirect.\n", ret);
1132 return ret;
1133 }
1134
1135 /* We will use the result as an address to read from, so most
1136 * architectures only need a compiler barrier here. */
1137 read_barrier_depends();
1138
1139 count = indirect->len / sizeof desc;
1140 /* Buffers are chained via a 16 bit next field, so
1141 * we can have at most 2^16 of these. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001142 if (unlikely(count > USHRT_MAX + 1)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001143 vq_err(vq, "Indirect buffer length too big: %d\n",
1144 indirect->len);
1145 return -E2BIG;
1146 }
1147
1148 do {
1149 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001150 if (unlikely(++found > count)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001151 vq_err(vq, "Loop detected: last one at %u "
1152 "indirect size %u\n",
1153 i, count);
1154 return -EINVAL;
1155 }
Krishna Kumard47effe2011-03-01 17:06:37 +05301156 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1157 vq->indirect, sizeof desc))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001158 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1159 i, (size_t)indirect->addr + i * sizeof desc);
1160 return -EINVAL;
1161 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001162 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001163 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1164 i, (size_t)indirect->addr + i * sizeof desc);
1165 return -EINVAL;
1166 }
1167
1168 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1169 iov_size - iov_count);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001170 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001171 vq_err(vq, "Translation failure %d indirect idx %d\n",
1172 ret, i);
1173 return ret;
1174 }
1175 /* If this is an input descriptor, increment that count. */
1176 if (desc.flags & VRING_DESC_F_WRITE) {
1177 *in_num += ret;
1178 if (unlikely(log)) {
1179 log[*log_num].addr = desc.addr;
1180 log[*log_num].len = desc.len;
1181 ++*log_num;
1182 }
1183 } else {
1184 /* If it's an output descriptor, they're all supposed
1185 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001186 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001187 vq_err(vq, "Indirect descriptor "
1188 "has out after in: idx %d\n", i);
1189 return -EINVAL;
1190 }
1191 *out_num += ret;
1192 }
1193 } while ((i = next_desc(&desc)) != -1);
1194 return 0;
1195}
1196
1197/* This looks in the virtqueue and for the first available buffer, and converts
1198 * it to an iovec for convenient access. Since descriptors consist of some
1199 * number of output then some number of input descriptors, it's actually two
1200 * iovecs, but we pack them into one and note how many of each there were.
1201 *
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001202 * This function returns the descriptor number found, or vq->num (which is
1203 * never a valid descriptor number) if none was found. A negative code is
1204 * returned on error. */
1205int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1206 struct iovec iov[], unsigned int iov_size,
1207 unsigned int *out_num, unsigned int *in_num,
1208 struct vhost_log *log, unsigned int *log_num)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001209{
1210 struct vring_desc desc;
1211 unsigned int i, head, found = 0;
1212 u16 last_avail_idx;
1213 int ret;
1214
1215 /* Check it isn't doing very strange things with descriptor numbers. */
1216 last_avail_idx = vq->last_avail_idx;
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001217 if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001218 vq_err(vq, "Failed to access avail idx at %p\n",
1219 &vq->avail->idx);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001220 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001221 }
1222
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001223 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001224 vq_err(vq, "Guest moved used index from %u to %u",
1225 last_avail_idx, vq->avail_idx);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001226 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001227 }
1228
1229 /* If there's nothing new since last we looked, return invalid. */
1230 if (vq->avail_idx == last_avail_idx)
1231 return vq->num;
1232
1233 /* Only get avail ring entries after they have been exposed by guest. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001234 smp_rmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001235
1236 /* Grab the next descriptor number they're advertising, and increment
1237 * the index we've seen. */
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001238 if (unlikely(__get_user(head,
1239 &vq->avail->ring[last_avail_idx % vq->num]))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001240 vq_err(vq, "Failed to read head: idx %d address %p\n",
1241 last_avail_idx,
1242 &vq->avail->ring[last_avail_idx % vq->num]);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001243 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001244 }
1245
1246 /* If their number is silly, that's an error. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001247 if (unlikely(head >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001248 vq_err(vq, "Guest says index %u > %u is available",
1249 head, vq->num);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001250 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001251 }
1252
1253 /* When we start there are none of either input nor output. */
1254 *out_num = *in_num = 0;
1255 if (unlikely(log))
1256 *log_num = 0;
1257
1258 i = head;
1259 do {
1260 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001261 if (unlikely(i >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001262 vq_err(vq, "Desc index is %u > %u, head = %u",
1263 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001264 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001265 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001266 if (unlikely(++found > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001267 vq_err(vq, "Loop detected: last one at %u "
1268 "vq size %u head %u\n",
1269 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001270 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001271 }
Michael S. Tsirkinfcc042a2011-03-06 13:33:49 +02001272 ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001273 if (unlikely(ret)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001274 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1275 i, vq->desc + i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001276 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001277 }
1278 if (desc.flags & VRING_DESC_F_INDIRECT) {
1279 ret = get_indirect(dev, vq, iov, iov_size,
1280 out_num, in_num,
1281 log, log_num, &desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001282 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001283 vq_err(vq, "Failure detected "
1284 "in indirect descriptor at idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001285 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001286 }
1287 continue;
1288 }
1289
1290 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1291 iov_size - iov_count);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001292 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001293 vq_err(vq, "Translation failure %d descriptor idx %d\n",
1294 ret, i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001295 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001296 }
1297 if (desc.flags & VRING_DESC_F_WRITE) {
1298 /* If this is an input descriptor,
1299 * increment that count. */
1300 *in_num += ret;
1301 if (unlikely(log)) {
1302 log[*log_num].addr = desc.addr;
1303 log[*log_num].len = desc.len;
1304 ++*log_num;
1305 }
1306 } else {
1307 /* If it's an output descriptor, they're all supposed
1308 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001309 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001310 vq_err(vq, "Descriptor has out after in: "
1311 "idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001312 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001313 }
1314 *out_num += ret;
1315 }
1316 } while ((i = next_desc(&desc)) != -1);
1317
1318 /* On success, increment avail index. */
1319 vq->last_avail_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001320
1321 /* Assume notifications from guest are disabled at this point,
1322 * if they aren't we would need to update avail_event index. */
1323 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001324 return head;
1325}
1326
1327/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
David Stevens8dd014a2010-07-27 18:52:21 +03001328void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001329{
David Stevens8dd014a2010-07-27 18:52:21 +03001330 vq->last_avail_idx -= n;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001331}
1332
1333/* After we've used one of their buffers, we tell them about it. We'll then
1334 * want to notify the guest, using eventfd. */
1335int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1336{
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001337 struct vring_used_elem __user *used;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001338
1339 /* The virtqueue contains a ring of used buffers. Get a pointer to the
1340 * next entry in that used ring. */
1341 used = &vq->used->ring[vq->last_used_idx % vq->num];
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001342 if (__put_user(head, &used->id)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001343 vq_err(vq, "Failed to write used id");
1344 return -EFAULT;
1345 }
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001346 if (__put_user(len, &used->len)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001347 vq_err(vq, "Failed to write used len");
1348 return -EFAULT;
1349 }
1350 /* Make sure buffer is written before we update index. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001351 smp_wmb();
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001352 if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001353 vq_err(vq, "Failed to increment used idx");
1354 return -EFAULT;
1355 }
1356 if (unlikely(vq->log_used)) {
1357 /* Make sure data is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001358 smp_wmb();
Michael S. Tsirkin86e94242010-02-17 19:11:33 +02001359 /* Log used ring entry write. */
1360 log_write(vq->log_base,
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001361 vq->log_addr +
1362 ((void __user *)used - (void __user *)vq->used),
Michael S. Tsirkin86e94242010-02-17 19:11:33 +02001363 sizeof *used);
1364 /* Log used index update. */
1365 log_write(vq->log_base,
1366 vq->log_addr + offsetof(struct vring_used, idx),
1367 sizeof vq->used->idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001368 if (vq->log_ctx)
1369 eventfd_signal(vq->log_ctx, 1);
1370 }
1371 vq->last_used_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001372 /* If the driver never bothers to signal in a very long while,
1373 * used index might wrap around. If that happens, invalidate
1374 * signalled_used index we stored. TODO: make sure driver
1375 * signals at least once in 2^16 and remove this. */
1376 if (unlikely(vq->last_used_idx == vq->signalled_used))
1377 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001378 return 0;
1379}
1380
David Stevens8dd014a2010-07-27 18:52:21 +03001381static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1382 struct vring_used_elem *heads,
1383 unsigned count)
1384{
1385 struct vring_used_elem __user *used;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001386 u16 old, new;
David Stevens8dd014a2010-07-27 18:52:21 +03001387 int start;
1388
1389 start = vq->last_used_idx % vq->num;
1390 used = vq->used->ring + start;
Michael S. Tsirkindfe5ac52010-09-21 14:18:01 +02001391 if (__copy_to_user(used, heads, count * sizeof *used)) {
David Stevens8dd014a2010-07-27 18:52:21 +03001392 vq_err(vq, "Failed to write used");
1393 return -EFAULT;
1394 }
1395 if (unlikely(vq->log_used)) {
1396 /* Make sure data is seen before log. */
1397 smp_wmb();
1398 /* Log used ring entry write. */
1399 log_write(vq->log_base,
1400 vq->log_addr +
1401 ((void __user *)used - (void __user *)vq->used),
1402 count * sizeof *used);
1403 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001404 old = vq->last_used_idx;
1405 new = (vq->last_used_idx += count);
1406 /* If the driver never bothers to signal in a very long while,
1407 * used index might wrap around. If that happens, invalidate
1408 * signalled_used index we stored. TODO: make sure driver
1409 * signals at least once in 2^16 and remove this. */
1410 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1411 vq->signalled_used_valid = false;
David Stevens8dd014a2010-07-27 18:52:21 +03001412 return 0;
1413}
1414
1415/* After we've used one of their buffers, we tell them about it. We'll then
1416 * want to notify the guest, using eventfd. */
1417int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1418 unsigned count)
1419{
1420 int start, n, r;
1421
1422 start = vq->last_used_idx % vq->num;
1423 n = vq->num - start;
1424 if (n < count) {
1425 r = __vhost_add_used_n(vq, heads, n);
1426 if (r < 0)
1427 return r;
1428 heads += n;
1429 count -= n;
1430 }
1431 r = __vhost_add_used_n(vq, heads, count);
1432
1433 /* Make sure buffer is written before we update index. */
1434 smp_wmb();
1435 if (put_user(vq->last_used_idx, &vq->used->idx)) {
1436 vq_err(vq, "Failed to increment used idx");
1437 return -EFAULT;
1438 }
1439 if (unlikely(vq->log_used)) {
1440 /* Log used index update. */
1441 log_write(vq->log_base,
1442 vq->log_addr + offsetof(struct vring_used, idx),
1443 sizeof vq->used->idx);
1444 if (vq->log_ctx)
1445 eventfd_signal(vq->log_ctx, 1);
1446 }
1447 return r;
1448}
1449
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001450static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001451{
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001452 __u16 old, new, event;
1453 bool v;
Michael S. Tsirkin0d499352010-05-11 19:44:17 +03001454 /* Flush out used index updates. This is paired
1455 * with the barrier that the Guest executes when enabling
1456 * interrupts. */
1457 smp_mb();
1458
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001459 if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1460 unlikely(vq->avail_idx == vq->last_avail_idx))
1461 return true;
1462
1463 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1464 __u16 flags;
1465 if (__get_user(flags, &vq->avail->flags)) {
1466 vq_err(vq, "Failed to get flags");
1467 return true;
1468 }
1469 return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001470 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001471 old = vq->signalled_used;
1472 v = vq->signalled_used_valid;
1473 new = vq->signalled_used = vq->last_used_idx;
1474 vq->signalled_used_valid = true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001475
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001476 if (unlikely(!v))
1477 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001478
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001479 if (get_user(event, vhost_used_event(vq))) {
1480 vq_err(vq, "Failed to get used event idx");
1481 return true;
1482 }
1483 return vring_need_event(event, new, old);
1484}
1485
1486/* This actually signals the guest, using eventfd. */
1487void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1488{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001489 /* Signal the Guest tell them we used something up. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001490 if (vq->call_ctx && vhost_notify(dev, vq))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001491 eventfd_signal(vq->call_ctx, 1);
1492}
1493
1494/* And here's the combo meal deal. Supersize me! */
1495void vhost_add_used_and_signal(struct vhost_dev *dev,
1496 struct vhost_virtqueue *vq,
1497 unsigned int head, int len)
1498{
1499 vhost_add_used(vq, head, len);
1500 vhost_signal(dev, vq);
1501}
1502
David Stevens8dd014a2010-07-27 18:52:21 +03001503/* multi-buffer version of vhost_add_used_and_signal */
1504void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1505 struct vhost_virtqueue *vq,
1506 struct vring_used_elem *heads, unsigned count)
1507{
1508 vhost_add_used_n(vq, heads, count);
1509 vhost_signal(dev, vq);
1510}
1511
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001512/* OK, now we need to know about added descriptors. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001513bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001514{
1515 u16 avail_idx;
1516 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301517
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001518 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1519 return false;
1520 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001521 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08001522 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001523 if (r) {
1524 vq_err(vq, "Failed to enable notification at %p: %d\n",
1525 &vq->used->flags, r);
1526 return false;
1527 }
1528 } else {
Jason Wang2723fea2011-06-21 18:04:38 +08001529 r = vhost_update_avail_event(vq, vq->avail_idx);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001530 if (r) {
1531 vq_err(vq, "Failed to update avail event index at %p: %d\n",
1532 vhost_avail_event(vq), r);
1533 return false;
1534 }
1535 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001536 /* They could have slipped one in as we were doing that: make
1537 * sure it's written, then check again. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001538 smp_mb();
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001539 r = __get_user(avail_idx, &vq->avail->idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001540 if (r) {
1541 vq_err(vq, "Failed to check avail idx at %p: %d\n",
1542 &vq->avail->idx, r);
1543 return false;
1544 }
1545
David Stevens8dd014a2010-07-27 18:52:21 +03001546 return avail_idx != vq->avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001547}
1548
1549/* We don't need to be notified again. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001550void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001551{
1552 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301553
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001554 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1555 return;
1556 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001557 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08001558 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001559 if (r)
1560 vq_err(vq, "Failed to enable notification at %p: %d\n",
1561 &vq->used->flags, r);
1562 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001563}
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +00001564
1565static void vhost_zerocopy_done_signal(struct kref *kref)
1566{
1567 struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
1568 kref);
1569 wake_up(&ubufs->wait);
1570}
1571
1572struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
1573 bool zcopy)
1574{
1575 struct vhost_ubuf_ref *ubufs;
1576 /* No zero copy backend? Nothing to count. */
1577 if (!zcopy)
1578 return NULL;
1579 ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
1580 if (!ubufs)
1581 return ERR_PTR(-ENOMEM);
1582 kref_init(&ubufs->kref);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +00001583 init_waitqueue_head(&ubufs->wait);
1584 ubufs->vq = vq;
1585 return ubufs;
1586}
1587
1588void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
1589{
1590 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1591}
1592
1593void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1594{
1595 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1596 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1597 kfree(ubufs);
1598}
1599
1600void vhost_zerocopy_callback(void *arg)
1601{
1602 struct ubuf_info *ubuf = arg;
1603 struct vhost_ubuf_ref *ubufs = ubuf->arg;
1604 struct vhost_virtqueue *vq = ubufs->vq;
1605
1606 /* set len = 1 to mark this desc buffers done DMA */
1607 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1608 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1609}