blob: 14e2043d7685216c9a26adaf007228e2c47f276e [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Rusty Russellf87d0fb2013-03-20 13:50:14 +10302/*
3 * Helpers for the host side of a virtio ring.
4 *
5 * Since these may be in userspace, we use (inline) accessors.
6 */
Mark Rutland9d1b9722016-11-24 10:25:13 +00007#include <linux/compiler.h>
Dave Jonesf558a842013-05-03 16:40:09 -04008#include <linux/module.h>
Rusty Russellf87d0fb2013-03-20 13:50:14 +10309#include <linux/vringh.h>
10#include <linux/virtio_ring.h>
11#include <linux/kernel.h>
12#include <linux/ratelimit.h>
13#include <linux/uaccess.h>
14#include <linux/slab.h>
15#include <linux/export.h>
Michael S. Tsirkin33023632020-04-01 12:46:22 -040016#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
Jason Wang9ad9c492020-03-26 22:01:20 +080017#include <linux/bvec.h>
18#include <linux/highmem.h>
19#include <linux/vhost_iotlb.h>
Michael S. Tsirkin33023632020-04-01 12:46:22 -040020#endif
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +020021#include <uapi/linux/virtio_config.h>
Rusty Russellf87d0fb2013-03-20 13:50:14 +103022
23static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
24{
25 static DEFINE_RATELIMIT_STATE(vringh_rs,
26 DEFAULT_RATELIMIT_INTERVAL,
27 DEFAULT_RATELIMIT_BURST);
28 if (__ratelimit(&vringh_rs)) {
29 va_list ap;
30 va_start(ap, fmt);
31 printk(KERN_NOTICE "vringh:");
32 vprintk(fmt, ap);
33 va_end(ap);
34 }
35}
36
37/* Returns vring->num if empty, -ve on error. */
38static inline int __vringh_get_head(const struct vringh *vrh,
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +020039 int (*getu16)(const struct vringh *vrh,
40 u16 *val, const __virtio16 *p),
Rusty Russellf87d0fb2013-03-20 13:50:14 +103041 u16 *last_avail_idx)
42{
43 u16 avail_idx, i, head;
44 int err;
45
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +020046 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
Rusty Russellf87d0fb2013-03-20 13:50:14 +103047 if (err) {
48 vringh_bad("Failed to access avail idx at %p",
49 &vrh->vring.avail->idx);
50 return err;
51 }
52
53 if (*last_avail_idx == avail_idx)
54 return vrh->vring.num;
55
56 /* Only get avail ring entries after they have been exposed by guest. */
57 virtio_rmb(vrh->weak_barriers);
58
59 i = *last_avail_idx & (vrh->vring.num - 1);
60
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +020061 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
Rusty Russellf87d0fb2013-03-20 13:50:14 +103062 if (err) {
63 vringh_bad("Failed to read head: idx %d address %p",
64 *last_avail_idx, &vrh->vring.avail->ring[i]);
65 return err;
66 }
67
68 if (head >= vrh->vring.num) {
69 vringh_bad("Guest says index %u > %u is available",
70 head, vrh->vring.num);
71 return -EINVAL;
72 }
73
74 (*last_avail_idx)++;
75 return head;
76}
77
Stefano Garzarellab8c06ad2021-03-15 17:34:41 +010078/**
79 * vringh_kiov_advance - skip bytes from vring_kiov
80 * @iov: an iov passed to vringh_getdesc_*() (updated as we consume)
81 * @len: the maximum length to advance
82 */
83void vringh_kiov_advance(struct vringh_kiov *iov, size_t len)
84{
85 while (len && iov->i < iov->used) {
86 size_t partlen = min(iov->iov[iov->i].iov_len, len);
87
88 iov->consumed += partlen;
89 iov->iov[iov->i].iov_len -= partlen;
90 iov->iov[iov->i].iov_base += partlen;
91
92 if (!iov->iov[iov->i].iov_len) {
93 /* Fix up old iov element then increment. */
94 iov->iov[iov->i].iov_len = iov->consumed;
95 iov->iov[iov->i].iov_base -= iov->consumed;
96
97 iov->consumed = 0;
98 iov->i++;
99 }
100
101 len -= partlen;
102 }
103}
104EXPORT_SYMBOL(vringh_kiov_advance);
105
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030106/* Copy some bytes to/from the iovec. Returns num copied. */
Jason Wang9ad9c492020-03-26 22:01:20 +0800107static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
108 struct vringh_kiov *iov,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030109 void *ptr, size_t len,
Jason Wang9ad9c492020-03-26 22:01:20 +0800110 int (*xfer)(const struct vringh *vrh,
111 void *addr, void *ptr,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030112 size_t len))
113{
114 int err, done = 0;
115
116 while (len && iov->i < iov->used) {
117 size_t partlen;
118
119 partlen = min(iov->iov[iov->i].iov_len, len);
Jason Wang9ad9c492020-03-26 22:01:20 +0800120 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030121 if (err)
122 return err;
123 done += partlen;
124 len -= partlen;
125 ptr += partlen;
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030126
Stefano Garzarellab8c06ad2021-03-15 17:34:41 +0100127 vringh_kiov_advance(iov, partlen);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030128 }
129 return done;
130}
131
132/* May reduce *len if range is shorter. */
133static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
134 struct vringh_range *range,
135 bool (*getrange)(struct vringh *,
136 u64, struct vringh_range *))
137{
138 if (addr < range->start || addr > range->end_incl) {
139 if (!getrange(vrh, addr, range))
140 return false;
141 }
142 BUG_ON(addr < range->start || addr > range->end_incl);
143
144 /* To end of memory? */
145 if (unlikely(addr + *len == 0)) {
146 if (range->end_incl == -1ULL)
147 return true;
148 goto truncate;
149 }
150
151 /* Otherwise, don't wrap. */
152 if (addr + *len < addr) {
153 vringh_bad("Wrapping descriptor %zu@0x%llx",
154 *len, (unsigned long long)addr);
155 return false;
156 }
157
158 if (unlikely(addr + *len - 1 > range->end_incl))
159 goto truncate;
160 return true;
161
162truncate:
163 *len = range->end_incl + 1 - addr;
164 return true;
165}
166
167static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
168 struct vringh_range *range,
169 bool (*getrange)(struct vringh *,
170 u64, struct vringh_range *))
171{
172 return true;
173}
174
175/* No reason for this code to be inline. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200176static int move_to_indirect(const struct vringh *vrh,
177 int *up_next, u16 *i, void *addr,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030178 const struct vring_desc *desc,
179 struct vring_desc **descs, int *desc_max)
180{
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200181 u32 len;
182
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030183 /* Indirect tables can't have indirect. */
184 if (*up_next != -1) {
185 vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
186 return -EINVAL;
187 }
188
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200189 len = vringh32_to_cpu(vrh, desc->len);
190 if (unlikely(len % sizeof(struct vring_desc))) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030191 vringh_bad("Strange indirect len %u", desc->len);
192 return -EINVAL;
193 }
194
195 /* We will check this when we follow it! */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200196 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
197 *up_next = vringh16_to_cpu(vrh, desc->next);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030198 else
199 *up_next = -2;
200 *descs = addr;
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200201 *desc_max = len / sizeof(struct vring_desc);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030202
203 /* Now, start at the first indirect. */
204 *i = 0;
205 return 0;
206}
207
208static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
209{
210 struct kvec *new;
211 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
212
213 if (new_num < 8)
214 new_num = 8;
215
216 flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
217 if (flag)
Bartosz Golaszewski3a999742020-12-14 19:04:03 -0800218 new = krealloc_array(iov->iov, new_num,
219 sizeof(struct iovec), gfp);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030220 else {
Kees Cook6da2ec52018-06-12 13:55:00 -0700221 new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030222 if (new) {
223 memcpy(new, iov->iov,
224 iov->max_num * sizeof(struct iovec));
225 flag = VRINGH_IOV_ALLOCATED;
226 }
227 }
228 if (!new)
229 return -ENOMEM;
230 iov->iov = new;
231 iov->max_num = (new_num | flag);
232 return 0;
233}
234
235static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
236 struct vring_desc **descs, int *desc_max)
237{
238 u16 i = *up_next;
239
240 *up_next = -1;
241 *descs = vrh->vring.desc;
242 *desc_max = vrh->vring.num;
243 return i;
244}
245
246static int slow_copy(struct vringh *vrh, void *dst, const void *src,
247 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
248 struct vringh_range *range,
249 bool (*getrange)(struct vringh *vrh,
250 u64,
251 struct vringh_range *)),
252 bool (*getrange)(struct vringh *vrh,
253 u64 addr,
254 struct vringh_range *r),
255 struct vringh_range *range,
Jason Wang9ad9c492020-03-26 22:01:20 +0800256 int (*copy)(const struct vringh *vrh,
257 void *dst, const void *src, size_t len))
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030258{
259 size_t part, len = sizeof(struct vring_desc);
260
261 do {
262 u64 addr;
263 int err;
264
265 part = len;
266 addr = (u64)(unsigned long)src - range->offset;
267
268 if (!rcheck(vrh, addr, &part, range, getrange))
269 return -EINVAL;
270
Jason Wang9ad9c492020-03-26 22:01:20 +0800271 err = copy(vrh, dst, src, part);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030272 if (err)
273 return err;
274
275 dst += part;
276 src += part;
277 len -= part;
278 } while (len);
279 return 0;
280}
281
282static inline int
283__vringh_iov(struct vringh *vrh, u16 i,
284 struct vringh_kiov *riov,
285 struct vringh_kiov *wiov,
286 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
287 struct vringh_range *range,
288 bool (*getrange)(struct vringh *, u64,
289 struct vringh_range *)),
290 bool (*getrange)(struct vringh *, u64, struct vringh_range *),
291 gfp_t gfp,
Jason Wang9ad9c492020-03-26 22:01:20 +0800292 int (*copy)(const struct vringh *vrh,
293 void *dst, const void *src, size_t len))
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030294{
295 int err, count = 0, up_next, desc_max;
296 struct vring_desc desc, *descs;
297 struct vringh_range range = { -1ULL, 0 }, slowrange;
298 bool slow = false;
299
300 /* We start traversing vring's descriptor table. */
301 descs = vrh->vring.desc;
302 desc_max = vrh->vring.num;
303 up_next = -1;
304
Stefano Garzarella5745bcf2020-10-08 22:42:56 +0200305 /* You must want something! */
306 if (WARN_ON(!riov && !wiov))
307 return -EINVAL;
308
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030309 if (riov)
Stefano Garzarellabbc2c372021-03-15 17:34:39 +0100310 riov->i = riov->used = riov->consumed = 0;
Stefano Garzarella5745bcf2020-10-08 22:42:56 +0200311 if (wiov)
Stefano Garzarellabbc2c372021-03-15 17:34:39 +0100312 wiov->i = wiov->used = wiov->consumed = 0;
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030313
314 for (;;) {
315 void *addr;
316 struct vringh_kiov *iov;
317 size_t len;
318
319 if (unlikely(slow))
320 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
321 &slowrange, copy);
322 else
Jason Wang9ad9c492020-03-26 22:01:20 +0800323 err = copy(vrh, &desc, &descs[i], sizeof(desc));
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030324 if (unlikely(err))
325 goto fail;
326
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200327 if (unlikely(desc.flags &
328 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
329 u64 a = vringh64_to_cpu(vrh, desc.addr);
330
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030331 /* Make sure it's OK, and get offset. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200332 len = vringh32_to_cpu(vrh, desc.len);
333 if (!rcheck(vrh, a, &len, &range, getrange)) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030334 err = -EINVAL;
335 goto fail;
336 }
337
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200338 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030339 slow = true;
340 /* We need to save this range to use offset */
341 slowrange = range;
342 }
343
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200344 addr = (void *)(long)(a + range.offset);
345 err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030346 &descs, &desc_max);
347 if (err)
348 goto fail;
349 continue;
350 }
351
352 if (count++ == vrh->vring.num) {
353 vringh_bad("Descriptor loop in %p", descs);
354 err = -ELOOP;
355 goto fail;
356 }
357
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200358 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030359 iov = wiov;
360 else {
361 iov = riov;
Neeraj Upadhyaye74cfa92021-06-25 08:55:02 +0530362 if (unlikely(wiov && wiov->used)) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030363 vringh_bad("Readable desc %p after writable",
364 &descs[i]);
365 err = -EINVAL;
366 goto fail;
367 }
368 }
369
370 if (!iov) {
371 vringh_bad("Unexpected %s desc",
372 !wiov ? "writable" : "readable");
373 err = -EPROTO;
374 goto fail;
375 }
376
377 again:
378 /* Make sure it's OK, and get offset. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200379 len = vringh32_to_cpu(vrh, desc.len);
380 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
381 getrange)) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030382 err = -EINVAL;
383 goto fail;
384 }
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200385 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
386 range.offset);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030387
388 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
389 err = resize_iovec(iov, gfp);
390 if (err)
391 goto fail;
392 }
393
394 iov->iov[iov->used].iov_base = addr;
395 iov->iov[iov->used].iov_len = len;
396 iov->used++;
397
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200398 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
399 desc.len = cpu_to_vringh32(vrh,
400 vringh32_to_cpu(vrh, desc.len) - len);
401 desc.addr = cpu_to_vringh64(vrh,
402 vringh64_to_cpu(vrh, desc.addr) + len);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030403 goto again;
404 }
405
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200406 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
407 i = vringh16_to_cpu(vrh, desc.next);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030408 } else {
409 /* Just in case we need to finish traversing above. */
410 if (unlikely(up_next > 0)) {
411 i = return_from_indirect(vrh, &up_next,
412 &descs, &desc_max);
413 slow = false;
414 } else
415 break;
416 }
417
418 if (i >= desc_max) {
419 vringh_bad("Chained index %u > %u", i, desc_max);
420 err = -EINVAL;
421 goto fail;
422 }
423 }
424
425 return 0;
426
427fail:
428 return err;
429}
430
431static inline int __vringh_complete(struct vringh *vrh,
432 const struct vring_used_elem *used,
433 unsigned int num_used,
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200434 int (*putu16)(const struct vringh *vrh,
435 __virtio16 *p, u16 val),
Jason Wang9ad9c492020-03-26 22:01:20 +0800436 int (*putused)(const struct vringh *vrh,
437 struct vring_used_elem *dst,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030438 const struct vring_used_elem
439 *src, unsigned num))
440{
441 struct vring_used *used_ring;
442 int err;
443 u16 used_idx, off;
444
445 used_ring = vrh->vring.used;
446 used_idx = vrh->last_used_idx + vrh->completed;
447
448 off = used_idx % vrh->vring.num;
449
450 /* Compiler knows num_used == 1 sometimes, hence extra check */
451 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
452 u16 part = vrh->vring.num - off;
Jason Wang9ad9c492020-03-26 22:01:20 +0800453 err = putused(vrh, &used_ring->ring[off], used, part);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030454 if (!err)
Jason Wang9ad9c492020-03-26 22:01:20 +0800455 err = putused(vrh, &used_ring->ring[0], used + part,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030456 num_used - part);
457 } else
Jason Wang9ad9c492020-03-26 22:01:20 +0800458 err = putused(vrh, &used_ring->ring[off], used, num_used);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030459
460 if (err) {
461 vringh_bad("Failed to write %u used entries %u at %p",
462 num_used, off, &used_ring->ring[off]);
463 return err;
464 }
465
466 /* Make sure buffer is written before we update index. */
467 virtio_wmb(vrh->weak_barriers);
468
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200469 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030470 if (err) {
471 vringh_bad("Failed to update used index at %p",
472 &vrh->vring.used->idx);
473 return err;
474 }
475
476 vrh->completed += num_used;
477 return 0;
478}
479
480
481static inline int __vringh_need_notify(struct vringh *vrh,
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200482 int (*getu16)(const struct vringh *vrh,
483 u16 *val,
484 const __virtio16 *p))
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030485{
486 bool notify;
487 u16 used_event;
488 int err;
489
490 /* Flush out used index update. This is paired with the
491 * barrier that the Guest executes when enabling
492 * interrupts. */
493 virtio_mb(vrh->weak_barriers);
494
495 /* Old-style, without event indices. */
496 if (!vrh->event_indices) {
497 u16 flags;
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200498 err = getu16(vrh, &flags, &vrh->vring.avail->flags);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030499 if (err) {
500 vringh_bad("Failed to get flags at %p",
501 &vrh->vring.avail->flags);
502 return err;
503 }
504 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
505 }
506
507 /* Modern: we know when other side wants to know. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200508 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030509 if (err) {
510 vringh_bad("Failed to get used event idx at %p",
511 &vring_used_event(&vrh->vring));
512 return err;
513 }
514
515 /* Just in case we added so many that we wrap. */
516 if (unlikely(vrh->completed > 0xffff))
517 notify = true;
518 else
519 notify = vring_need_event(used_event,
520 vrh->last_used_idx + vrh->completed,
521 vrh->last_used_idx);
522
523 vrh->last_used_idx += vrh->completed;
524 vrh->completed = 0;
525 return notify;
526}
527
528static inline bool __vringh_notify_enable(struct vringh *vrh,
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200529 int (*getu16)(const struct vringh *vrh,
530 u16 *val, const __virtio16 *p),
531 int (*putu16)(const struct vringh *vrh,
532 __virtio16 *p, u16 val))
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030533{
534 u16 avail;
535
536 if (!vrh->event_indices) {
537 /* Old-school; update flags. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200538 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030539 vringh_bad("Clearing used flags %p",
540 &vrh->vring.used->flags);
541 return true;
542 }
543 } else {
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200544 if (putu16(vrh, &vring_avail_event(&vrh->vring),
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030545 vrh->last_avail_idx) != 0) {
546 vringh_bad("Updating avail event index %p",
547 &vring_avail_event(&vrh->vring));
548 return true;
549 }
550 }
551
552 /* They could have slipped one in as we were doing that: make
553 * sure it's written, then check again. */
554 virtio_mb(vrh->weak_barriers);
555
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200556 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030557 vringh_bad("Failed to check avail idx at %p",
558 &vrh->vring.avail->idx);
559 return true;
560 }
561
562 /* This is unlikely, so we just leave notifications enabled
563 * (if we're using event_indices, we'll only get one
564 * notification anyway). */
565 return avail == vrh->last_avail_idx;
566}
567
568static inline void __vringh_notify_disable(struct vringh *vrh,
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200569 int (*putu16)(const struct vringh *vrh,
570 __virtio16 *p, u16 val))
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030571{
572 if (!vrh->event_indices) {
573 /* Old-school; update flags. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200574 if (putu16(vrh, &vrh->vring.used->flags,
575 VRING_USED_F_NO_NOTIFY)) {
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030576 vringh_bad("Setting used flags %p",
577 &vrh->vring.used->flags);
578 }
579 }
580}
581
582/* Userspace access helpers: in this case, addresses are really userspace. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200583static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030584{
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200585 __virtio16 v = 0;
586 int rc = get_user(v, (__force __virtio16 __user *)p);
587 *val = vringh16_to_cpu(vrh, v);
588 return rc;
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030589}
590
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200591static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030592{
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200593 __virtio16 v = cpu_to_vringh16(vrh, val);
594 return put_user(v, (__force __virtio16 __user *)p);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030595}
596
Jason Wang9ad9c492020-03-26 22:01:20 +0800597static inline int copydesc_user(const struct vringh *vrh,
598 void *dst, const void *src, size_t len)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030599{
600 return copy_from_user(dst, (__force void __user *)src, len) ?
601 -EFAULT : 0;
602}
603
Jason Wang9ad9c492020-03-26 22:01:20 +0800604static inline int putused_user(const struct vringh *vrh,
605 struct vring_used_elem *dst,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030606 const struct vring_used_elem *src,
607 unsigned int num)
608{
609 return copy_to_user((__force void __user *)dst, src,
610 sizeof(*dst) * num) ? -EFAULT : 0;
611}
612
Jason Wang9ad9c492020-03-26 22:01:20 +0800613static inline int xfer_from_user(const struct vringh *vrh, void *src,
614 void *dst, size_t len)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030615{
616 return copy_from_user(dst, (__force void __user *)src, len) ?
617 -EFAULT : 0;
618}
619
Jason Wang9ad9c492020-03-26 22:01:20 +0800620static inline int xfer_to_user(const struct vringh *vrh,
621 void *dst, void *src, size_t len)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030622{
623 return copy_to_user((__force void __user *)dst, src, len) ?
624 -EFAULT : 0;
625}
626
627/**
628 * vringh_init_user - initialize a vringh for a userspace vring.
629 * @vrh: the vringh to initialize.
630 * @features: the feature bits for this ring.
631 * @num: the number of elements.
632 * @weak_barriers: true if we only need memory barriers, not I/O.
633 * @desc: the userpace descriptor pointer.
634 * @avail: the userpace avail pointer.
635 * @used: the userpace used pointer.
636 *
637 * Returns an error if num is invalid: you should check pointers
638 * yourself!
639 */
Michael S. Tsirkinb97a8a92014-12-12 00:36:06 +0200640int vringh_init_user(struct vringh *vrh, u64 features,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030641 unsigned int num, bool weak_barriers,
Michael S. Tsirkina865e422020-04-06 08:42:55 -0400642 vring_desc_t __user *desc,
643 vring_avail_t __user *avail,
644 vring_used_t __user *used)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030645{
646 /* Sane power of 2 please! */
647 if (!num || num > 0xffff || (num & (num - 1))) {
648 vringh_bad("Bad ring size %u", num);
649 return -EINVAL;
650 }
651
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200652 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030653 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
654 vrh->weak_barriers = weak_barriers;
655 vrh->completed = 0;
656 vrh->last_avail_idx = 0;
657 vrh->last_used_idx = 0;
658 vrh->vring.num = num;
659 /* vring expects kernel addresses, but only used via accessors. */
660 vrh->vring.desc = (__force struct vring_desc *)desc;
661 vrh->vring.avail = (__force struct vring_avail *)avail;
662 vrh->vring.used = (__force struct vring_used *)used;
663 return 0;
664}
665EXPORT_SYMBOL(vringh_init_user);
666
667/**
668 * vringh_getdesc_user - get next available descriptor from userspace ring.
669 * @vrh: the userspace vring.
670 * @riov: where to put the readable descriptors (or NULL)
671 * @wiov: where to put the writable descriptors (or NULL)
672 * @getrange: function to call to check ranges.
673 * @head: head index we received, for passing to vringh_complete_user().
674 *
675 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
676 *
677 * Note that on error return, you can tell the difference between an
678 * invalid ring and a single invalid descriptor: in the former case,
679 * *head will be vrh->vring.num. You may be able to ignore an invalid
680 * descriptor, but there's not much you can do with an invalid ring.
681 *
Stefano Garzarella69c13c52021-03-15 17:34:40 +0100682 * Note that you can reuse riov and wiov with subsequent calls. Content is
683 * overwritten and memory reallocated if more space is needed.
684 * When you don't have to use riov and wiov anymore, you should clean up them
685 * calling vringh_iov_cleanup() to release the memory, even on error!
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030686 */
687int vringh_getdesc_user(struct vringh *vrh,
688 struct vringh_iov *riov,
689 struct vringh_iov *wiov,
690 bool (*getrange)(struct vringh *vrh,
691 u64 addr, struct vringh_range *r),
692 u16 *head)
693{
694 int err;
695
696 *head = vrh->vring.num;
697 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
698 if (err < 0)
699 return err;
700
701 /* Empty... */
702 if (err == vrh->vring.num)
703 return 0;
704
705 /* We need the layouts to be the identical for this to work */
706 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
707 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
708 offsetof(struct vringh_iov, iov));
709 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
710 offsetof(struct vringh_iov, i));
711 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
712 offsetof(struct vringh_iov, used));
713 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
714 offsetof(struct vringh_iov, max_num));
715 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
716 BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
717 offsetof(struct kvec, iov_base));
718 BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
719 offsetof(struct kvec, iov_len));
720 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
721 != sizeof(((struct kvec *)NULL)->iov_base));
722 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
723 != sizeof(((struct kvec *)NULL)->iov_len));
724
725 *head = err;
726 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
727 (struct vringh_kiov *)wiov,
728 range_check, getrange, GFP_KERNEL, copydesc_user);
729 if (err)
730 return err;
731
732 return 1;
733}
734EXPORT_SYMBOL(vringh_getdesc_user);
735
736/**
737 * vringh_iov_pull_user - copy bytes from vring_iov.
738 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
739 * @dst: the place to copy.
740 * @len: the maximum length to copy.
741 *
742 * Returns the bytes copied <= len or a negative errno.
743 */
744ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
745{
Jason Wang9ad9c492020-03-26 22:01:20 +0800746 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030747 dst, len, xfer_from_user);
748}
749EXPORT_SYMBOL(vringh_iov_pull_user);
750
751/**
752 * vringh_iov_push_user - copy bytes into vring_iov.
753 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
Stefano Garzarella8009b0f2020-11-16 17:16:53 +0100754 * @src: the place to copy from.
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030755 * @len: the maximum length to copy.
756 *
757 * Returns the bytes copied <= len or a negative errno.
758 */
759ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
760 const void *src, size_t len)
761{
Jason Wang9ad9c492020-03-26 22:01:20 +0800762 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030763 (void *)src, len, xfer_to_user);
764}
765EXPORT_SYMBOL(vringh_iov_push_user);
766
767/**
768 * vringh_abandon_user - we've decided not to handle the descriptor(s).
769 * @vrh: the vring.
770 * @num: the number of descriptors to put back (ie. num
771 * vringh_get_user() to undo).
772 *
773 * The next vringh_get_user() will return the old descriptor(s) again.
774 */
775void vringh_abandon_user(struct vringh *vrh, unsigned int num)
776{
777 /* We only update vring_avail_event(vr) when we want to be notified,
778 * so we haven't changed that yet. */
779 vrh->last_avail_idx -= num;
780}
781EXPORT_SYMBOL(vringh_abandon_user);
782
783/**
784 * vringh_complete_user - we've finished with descriptor, publish it.
785 * @vrh: the vring.
786 * @head: the head as filled in by vringh_getdesc_user.
787 * @len: the length of data we have written.
788 *
789 * You should check vringh_need_notify_user() after one or more calls
790 * to this function.
791 */
792int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
793{
794 struct vring_used_elem used;
795
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200796 used.id = cpu_to_vringh32(vrh, head);
797 used.len = cpu_to_vringh32(vrh, len);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030798 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
799}
800EXPORT_SYMBOL(vringh_complete_user);
801
802/**
803 * vringh_complete_multi_user - we've finished with many descriptors.
804 * @vrh: the vring.
805 * @used: the head, length pairs.
806 * @num_used: the number of used elements.
807 *
808 * You should check vringh_need_notify_user() after one or more calls
809 * to this function.
810 */
811int vringh_complete_multi_user(struct vringh *vrh,
812 const struct vring_used_elem used[],
813 unsigned num_used)
814{
815 return __vringh_complete(vrh, used, num_used,
816 putu16_user, putused_user);
817}
818EXPORT_SYMBOL(vringh_complete_multi_user);
819
820/**
821 * vringh_notify_enable_user - we want to know if something changes.
822 * @vrh: the vring.
823 *
824 * This always enables notifications, but returns false if there are
825 * now more buffers available in the vring.
826 */
827bool vringh_notify_enable_user(struct vringh *vrh)
828{
829 return __vringh_notify_enable(vrh, getu16_user, putu16_user);
830}
831EXPORT_SYMBOL(vringh_notify_enable_user);
832
833/**
834 * vringh_notify_disable_user - don't tell us if something changes.
835 * @vrh: the vring.
836 *
837 * This is our normal running state: we disable and then only enable when
838 * we're going to sleep.
839 */
840void vringh_notify_disable_user(struct vringh *vrh)
841{
842 __vringh_notify_disable(vrh, putu16_user);
843}
844EXPORT_SYMBOL(vringh_notify_disable_user);
845
846/**
847 * vringh_need_notify_user - must we tell the other side about used buffers?
848 * @vrh: the vring we've called vringh_complete_user() on.
849 *
850 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
851 */
852int vringh_need_notify_user(struct vringh *vrh)
853{
854 return __vringh_need_notify(vrh, getu16_user);
855}
856EXPORT_SYMBOL(vringh_need_notify_user);
857
858/* Kernelspace access helpers. */
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200859static inline int getu16_kern(const struct vringh *vrh,
860 u16 *val, const __virtio16 *p)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030861{
Mark Rutland9d1b9722016-11-24 10:25:13 +0000862 *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030863 return 0;
864}
865
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200866static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030867{
Mark Rutland9d1b9722016-11-24 10:25:13 +0000868 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030869 return 0;
870}
871
Jason Wang9ad9c492020-03-26 22:01:20 +0800872static inline int copydesc_kern(const struct vringh *vrh,
873 void *dst, const void *src, size_t len)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030874{
875 memcpy(dst, src, len);
876 return 0;
877}
878
Jason Wang9ad9c492020-03-26 22:01:20 +0800879static inline int putused_kern(const struct vringh *vrh,
880 struct vring_used_elem *dst,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030881 const struct vring_used_elem *src,
882 unsigned int num)
883{
884 memcpy(dst, src, num * sizeof(*dst));
885 return 0;
886}
887
Jason Wang9ad9c492020-03-26 22:01:20 +0800888static inline int xfer_kern(const struct vringh *vrh, void *src,
889 void *dst, size_t len)
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030890{
891 memcpy(dst, src, len);
892 return 0;
893}
894
Jason Wang9ad9c492020-03-26 22:01:20 +0800895static inline int kern_xfer(const struct vringh *vrh, void *dst,
896 void *src, size_t len)
Jason Wangb3683de2019-10-24 11:57:18 +0800897{
898 memcpy(dst, src, len);
899 return 0;
900}
901
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030902/**
903 * vringh_init_kern - initialize a vringh for a kernelspace vring.
904 * @vrh: the vringh to initialize.
905 * @features: the feature bits for this ring.
906 * @num: the number of elements.
907 * @weak_barriers: true if we only need memory barriers, not I/O.
908 * @desc: the userpace descriptor pointer.
909 * @avail: the userpace avail pointer.
910 * @used: the userpace used pointer.
911 *
912 * Returns an error if num is invalid.
913 */
Michael S. Tsirkinb97a8a92014-12-12 00:36:06 +0200914int vringh_init_kern(struct vringh *vrh, u64 features,
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030915 unsigned int num, bool weak_barriers,
916 struct vring_desc *desc,
917 struct vring_avail *avail,
918 struct vring_used *used)
919{
920 /* Sane power of 2 please! */
921 if (!num || num > 0xffff || (num & (num - 1))) {
922 vringh_bad("Bad ring size %u", num);
923 return -EINVAL;
924 }
925
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +0200926 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030927 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
928 vrh->weak_barriers = weak_barriers;
929 vrh->completed = 0;
930 vrh->last_avail_idx = 0;
931 vrh->last_used_idx = 0;
932 vrh->vring.num = num;
933 vrh->vring.desc = desc;
934 vrh->vring.avail = avail;
935 vrh->vring.used = used;
936 return 0;
937}
938EXPORT_SYMBOL(vringh_init_kern);
939
940/**
941 * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
942 * @vrh: the kernelspace vring.
943 * @riov: where to put the readable descriptors (or NULL)
944 * @wiov: where to put the writable descriptors (or NULL)
945 * @head: head index we received, for passing to vringh_complete_kern().
946 * @gfp: flags for allocating larger riov/wiov.
947 *
948 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
949 *
950 * Note that on error return, you can tell the difference between an
951 * invalid ring and a single invalid descriptor: in the former case,
952 * *head will be vrh->vring.num. You may be able to ignore an invalid
953 * descriptor, but there's not much you can do with an invalid ring.
954 *
Stefano Garzarella69c13c52021-03-15 17:34:40 +0100955 * Note that you can reuse riov and wiov with subsequent calls. Content is
956 * overwritten and memory reallocated if more space is needed.
957 * When you don't have to use riov and wiov anymore, you should clean up them
958 * calling vringh_kiov_cleanup() to release the memory, even on error!
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030959 */
960int vringh_getdesc_kern(struct vringh *vrh,
961 struct vringh_kiov *riov,
962 struct vringh_kiov *wiov,
963 u16 *head,
964 gfp_t gfp)
965{
966 int err;
967
968 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
969 if (err < 0)
970 return err;
971
972 /* Empty... */
973 if (err == vrh->vring.num)
974 return 0;
975
976 *head = err;
977 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
978 gfp, copydesc_kern);
979 if (err)
980 return err;
981
982 return 1;
983}
984EXPORT_SYMBOL(vringh_getdesc_kern);
985
986/**
987 * vringh_iov_pull_kern - copy bytes from vring_iov.
988 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
989 * @dst: the place to copy.
990 * @len: the maximum length to copy.
991 *
992 * Returns the bytes copied <= len or a negative errno.
993 */
994ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
995{
Jason Wang9ad9c492020-03-26 22:01:20 +0800996 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
Rusty Russellf87d0fb2013-03-20 13:50:14 +1030997}
998EXPORT_SYMBOL(vringh_iov_pull_kern);
999
1000/**
1001 * vringh_iov_push_kern - copy bytes into vring_iov.
1002 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
Stefano Garzarella8009b0f2020-11-16 17:16:53 +01001003 * @src: the place to copy from.
Rusty Russellf87d0fb2013-03-20 13:50:14 +10301004 * @len: the maximum length to copy.
1005 *
1006 * Returns the bytes copied <= len or a negative errno.
1007 */
1008ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
1009 const void *src, size_t len)
1010{
Jason Wang9ad9c492020-03-26 22:01:20 +08001011 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
Rusty Russellf87d0fb2013-03-20 13:50:14 +10301012}
1013EXPORT_SYMBOL(vringh_iov_push_kern);
1014
1015/**
1016 * vringh_abandon_kern - we've decided not to handle the descriptor(s).
1017 * @vrh: the vring.
1018 * @num: the number of descriptors to put back (ie. num
1019 * vringh_get_kern() to undo).
1020 *
1021 * The next vringh_get_kern() will return the old descriptor(s) again.
1022 */
1023void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1024{
1025 /* We only update vring_avail_event(vr) when we want to be notified,
1026 * so we haven't changed that yet. */
1027 vrh->last_avail_idx -= num;
1028}
1029EXPORT_SYMBOL(vringh_abandon_kern);
1030
1031/**
1032 * vringh_complete_kern - we've finished with descriptor, publish it.
1033 * @vrh: the vring.
1034 * @head: the head as filled in by vringh_getdesc_kern.
1035 * @len: the length of data we have written.
1036 *
1037 * You should check vringh_need_notify_kern() after one or more calls
1038 * to this function.
1039 */
1040int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1041{
1042 struct vring_used_elem used;
1043
Michael S. Tsirkinb9f7ac82014-12-12 01:10:49 +02001044 used.id = cpu_to_vringh32(vrh, head);
1045 used.len = cpu_to_vringh32(vrh, len);
Rusty Russellf87d0fb2013-03-20 13:50:14 +10301046
1047 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1048}
1049EXPORT_SYMBOL(vringh_complete_kern);
1050
1051/**
1052 * vringh_notify_enable_kern - we want to know if something changes.
1053 * @vrh: the vring.
1054 *
1055 * This always enables notifications, but returns false if there are
1056 * now more buffers available in the vring.
1057 */
1058bool vringh_notify_enable_kern(struct vringh *vrh)
1059{
1060 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1061}
1062EXPORT_SYMBOL(vringh_notify_enable_kern);
1063
1064/**
1065 * vringh_notify_disable_kern - don't tell us if something changes.
1066 * @vrh: the vring.
1067 *
1068 * This is our normal running state: we disable and then only enable when
1069 * we're going to sleep.
1070 */
1071void vringh_notify_disable_kern(struct vringh *vrh)
1072{
1073 __vringh_notify_disable(vrh, putu16_kern);
1074}
1075EXPORT_SYMBOL(vringh_notify_disable_kern);
1076
1077/**
1078 * vringh_need_notify_kern - must we tell the other side about used buffers?
1079 * @vrh: the vring we've called vringh_complete_kern() on.
1080 *
1081 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1082 */
1083int vringh_need_notify_kern(struct vringh *vrh)
1084{
1085 return __vringh_need_notify(vrh, getu16_kern);
1086}
1087EXPORT_SYMBOL(vringh_need_notify_kern);
Dave Jonesf558a842013-05-03 16:40:09 -04001088
Michael S. Tsirkin33023632020-04-01 12:46:22 -04001089#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1090
Jason Wang9ad9c492020-03-26 22:01:20 +08001091static int iotlb_translate(const struct vringh *vrh,
1092 u64 addr, u64 len, struct bio_vec iov[],
1093 int iov_size, u32 perm)
1094{
1095 struct vhost_iotlb_map *map;
1096 struct vhost_iotlb *iotlb = vrh->iotlb;
1097 int ret = 0;
1098 u64 s = 0;
1099
Stefano Garzarellaf53d9912021-03-15 17:34:38 +01001100 spin_lock(vrh->iotlb_lock);
1101
Jason Wang9ad9c492020-03-26 22:01:20 +08001102 while (len > s) {
1103 u64 size, pa, pfn;
1104
1105 if (unlikely(ret >= iov_size)) {
1106 ret = -ENOBUFS;
1107 break;
1108 }
1109
1110 map = vhost_iotlb_itree_first(iotlb, addr,
1111 addr + len - 1);
1112 if (!map || map->start > addr) {
1113 ret = -EINVAL;
1114 break;
1115 } else if (!(map->perm & perm)) {
1116 ret = -EPERM;
1117 break;
1118 }
1119
1120 size = map->size - addr + map->start;
1121 pa = map->addr + addr - map->start;
1122 pfn = pa >> PAGE_SHIFT;
1123 iov[ret].bv_page = pfn_to_page(pfn);
1124 iov[ret].bv_len = min(len - s, size);
1125 iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1126 s += size;
1127 addr += size;
1128 ++ret;
1129 }
1130
Stefano Garzarellaf53d9912021-03-15 17:34:38 +01001131 spin_unlock(vrh->iotlb_lock);
1132
Jason Wang9ad9c492020-03-26 22:01:20 +08001133 return ret;
1134}
1135
1136static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1137 void *src, size_t len)
1138{
1139 struct iov_iter iter;
1140 struct bio_vec iov[16];
1141 int ret;
1142
1143 ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1144 len, iov, 16, VHOST_MAP_RO);
1145 if (ret < 0)
1146 return ret;
1147
1148 iov_iter_bvec(&iter, READ, iov, ret, len);
1149
1150 ret = copy_from_iter(dst, len, &iter);
1151
1152 return ret;
1153}
1154
1155static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1156 void *src, size_t len)
1157{
1158 struct iov_iter iter;
1159 struct bio_vec iov[16];
1160 int ret;
1161
1162 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1163 len, iov, 16, VHOST_MAP_WO);
1164 if (ret < 0)
1165 return ret;
1166
1167 iov_iter_bvec(&iter, WRITE, iov, ret, len);
1168
1169 return copy_to_iter(src, len, &iter);
1170}
1171
1172static inline int getu16_iotlb(const struct vringh *vrh,
1173 u16 *val, const __virtio16 *p)
1174{
1175 struct bio_vec iov;
1176 void *kaddr, *from;
1177 int ret;
1178
1179 /* Atomic read is needed for getu16 */
1180 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1181 &iov, 1, VHOST_MAP_RO);
1182 if (ret < 0)
1183 return ret;
1184
1185 kaddr = kmap_atomic(iov.bv_page);
1186 from = kaddr + iov.bv_offset;
1187 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1188 kunmap_atomic(kaddr);
1189
1190 return 0;
1191}
1192
1193static inline int putu16_iotlb(const struct vringh *vrh,
1194 __virtio16 *p, u16 val)
1195{
1196 struct bio_vec iov;
1197 void *kaddr, *to;
1198 int ret;
1199
1200 /* Atomic write is needed for putu16 */
1201 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1202 &iov, 1, VHOST_MAP_WO);
1203 if (ret < 0)
1204 return ret;
1205
1206 kaddr = kmap_atomic(iov.bv_page);
1207 to = kaddr + iov.bv_offset;
1208 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1209 kunmap_atomic(kaddr);
1210
1211 return 0;
1212}
1213
1214static inline int copydesc_iotlb(const struct vringh *vrh,
1215 void *dst, const void *src, size_t len)
1216{
1217 int ret;
1218
1219 ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1220 if (ret != len)
1221 return -EFAULT;
1222
1223 return 0;
1224}
1225
1226static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1227 void *dst, size_t len)
1228{
1229 int ret;
1230
1231 ret = copy_from_iotlb(vrh, dst, src, len);
1232 if (ret != len)
1233 return -EFAULT;
1234
1235 return 0;
1236}
1237
1238static inline int xfer_to_iotlb(const struct vringh *vrh,
1239 void *dst, void *src, size_t len)
1240{
1241 int ret;
1242
1243 ret = copy_to_iotlb(vrh, dst, src, len);
1244 if (ret != len)
1245 return -EFAULT;
1246
1247 return 0;
1248}
1249
1250static inline int putused_iotlb(const struct vringh *vrh,
1251 struct vring_used_elem *dst,
1252 const struct vring_used_elem *src,
1253 unsigned int num)
1254{
1255 int size = num * sizeof(*dst);
1256 int ret;
1257
1258 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1259 if (ret != size)
1260 return -EFAULT;
1261
1262 return 0;
1263}
1264
1265/**
1266 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1267 * @vrh: the vringh to initialize.
1268 * @features: the feature bits for this ring.
1269 * @num: the number of elements.
1270 * @weak_barriers: true if we only need memory barriers, not I/O.
1271 * @desc: the userpace descriptor pointer.
1272 * @avail: the userpace avail pointer.
1273 * @used: the userpace used pointer.
1274 *
1275 * Returns an error if num is invalid.
1276 */
1277int vringh_init_iotlb(struct vringh *vrh, u64 features,
1278 unsigned int num, bool weak_barriers,
1279 struct vring_desc *desc,
1280 struct vring_avail *avail,
1281 struct vring_used *used)
1282{
1283 return vringh_init_kern(vrh, features, num, weak_barriers,
1284 desc, avail, used);
1285}
1286EXPORT_SYMBOL(vringh_init_iotlb);
1287
1288/**
1289 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1290 * @vrh: the vring
1291 * @iotlb: iotlb associated with this vring
Stefano Garzarellaf53d9912021-03-15 17:34:38 +01001292 * @iotlb_lock: spinlock to synchronize the iotlb accesses
Jason Wang9ad9c492020-03-26 22:01:20 +08001293 */
Stefano Garzarellaf53d9912021-03-15 17:34:38 +01001294void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
1295 spinlock_t *iotlb_lock)
Jason Wang9ad9c492020-03-26 22:01:20 +08001296{
1297 vrh->iotlb = iotlb;
Stefano Garzarellaf53d9912021-03-15 17:34:38 +01001298 vrh->iotlb_lock = iotlb_lock;
Jason Wang9ad9c492020-03-26 22:01:20 +08001299}
1300EXPORT_SYMBOL(vringh_set_iotlb);
1301
1302/**
1303 * vringh_getdesc_iotlb - get next available descriptor from ring with
1304 * IOTLB.
1305 * @vrh: the kernelspace vring.
1306 * @riov: where to put the readable descriptors (or NULL)
1307 * @wiov: where to put the writable descriptors (or NULL)
1308 * @head: head index we received, for passing to vringh_complete_iotlb().
1309 * @gfp: flags for allocating larger riov/wiov.
1310 *
1311 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1312 *
1313 * Note that on error return, you can tell the difference between an
1314 * invalid ring and a single invalid descriptor: in the former case,
1315 * *head will be vrh->vring.num. You may be able to ignore an invalid
1316 * descriptor, but there's not much you can do with an invalid ring.
1317 *
Stefano Garzarella69c13c52021-03-15 17:34:40 +01001318 * Note that you can reuse riov and wiov with subsequent calls. Content is
1319 * overwritten and memory reallocated if more space is needed.
1320 * When you don't have to use riov and wiov anymore, you should clean up them
1321 * calling vringh_kiov_cleanup() to release the memory, even on error!
Jason Wang9ad9c492020-03-26 22:01:20 +08001322 */
1323int vringh_getdesc_iotlb(struct vringh *vrh,
1324 struct vringh_kiov *riov,
1325 struct vringh_kiov *wiov,
1326 u16 *head,
1327 gfp_t gfp)
1328{
1329 int err;
1330
1331 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1332 if (err < 0)
1333 return err;
1334
1335 /* Empty... */
1336 if (err == vrh->vring.num)
1337 return 0;
1338
1339 *head = err;
1340 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1341 gfp, copydesc_iotlb);
1342 if (err)
1343 return err;
1344
1345 return 1;
1346}
1347EXPORT_SYMBOL(vringh_getdesc_iotlb);
1348
1349/**
1350 * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1351 * @vrh: the vring.
1352 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1353 * @dst: the place to copy.
1354 * @len: the maximum length to copy.
1355 *
1356 * Returns the bytes copied <= len or a negative errno.
1357 */
1358ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1359 struct vringh_kiov *riov,
1360 void *dst, size_t len)
1361{
1362 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1363}
1364EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1365
1366/**
1367 * vringh_iov_push_iotlb - copy bytes into vring_iov.
1368 * @vrh: the vring.
1369 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
Stefano Garzarella8009b0f2020-11-16 17:16:53 +01001370 * @src: the place to copy from.
Jason Wang9ad9c492020-03-26 22:01:20 +08001371 * @len: the maximum length to copy.
1372 *
1373 * Returns the bytes copied <= len or a negative errno.
1374 */
1375ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1376 struct vringh_kiov *wiov,
1377 const void *src, size_t len)
1378{
1379 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1380}
1381EXPORT_SYMBOL(vringh_iov_push_iotlb);
1382
1383/**
1384 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1385 * @vrh: the vring.
1386 * @num: the number of descriptors to put back (ie. num
1387 * vringh_get_iotlb() to undo).
1388 *
1389 * The next vringh_get_iotlb() will return the old descriptor(s) again.
1390 */
1391void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1392{
1393 /* We only update vring_avail_event(vr) when we want to be notified,
1394 * so we haven't changed that yet.
1395 */
1396 vrh->last_avail_idx -= num;
1397}
1398EXPORT_SYMBOL(vringh_abandon_iotlb);
1399
1400/**
1401 * vringh_complete_iotlb - we've finished with descriptor, publish it.
1402 * @vrh: the vring.
1403 * @head: the head as filled in by vringh_getdesc_iotlb.
1404 * @len: the length of data we have written.
1405 *
1406 * You should check vringh_need_notify_iotlb() after one or more calls
1407 * to this function.
1408 */
1409int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1410{
1411 struct vring_used_elem used;
1412
1413 used.id = cpu_to_vringh32(vrh, head);
1414 used.len = cpu_to_vringh32(vrh, len);
1415
1416 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1417}
1418EXPORT_SYMBOL(vringh_complete_iotlb);
1419
1420/**
1421 * vringh_notify_enable_iotlb - we want to know if something changes.
1422 * @vrh: the vring.
1423 *
1424 * This always enables notifications, but returns false if there are
1425 * now more buffers available in the vring.
1426 */
1427bool vringh_notify_enable_iotlb(struct vringh *vrh)
1428{
1429 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1430}
1431EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1432
1433/**
1434 * vringh_notify_disable_iotlb - don't tell us if something changes.
1435 * @vrh: the vring.
1436 *
1437 * This is our normal running state: we disable and then only enable when
1438 * we're going to sleep.
1439 */
1440void vringh_notify_disable_iotlb(struct vringh *vrh)
1441{
1442 __vringh_notify_disable(vrh, putu16_iotlb);
1443}
1444EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1445
1446/**
1447 * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1448 * @vrh: the vring we've called vringh_complete_iotlb() on.
1449 *
1450 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1451 */
1452int vringh_need_notify_iotlb(struct vringh *vrh)
1453{
1454 return __vringh_need_notify(vrh, getu16_iotlb);
1455}
1456EXPORT_SYMBOL(vringh_need_notify_iotlb);
1457
Michael S. Tsirkin33023632020-04-01 12:46:22 -04001458#endif
Jason Wang9ad9c492020-03-26 22:01:20 +08001459
Dave Jonesf558a842013-05-03 16:40:09 -04001460MODULE_LICENSE("GPL");