blob: 962f1477b1fabd9fbed81696956cc7e978ced582 [file] [log] [blame]
Thomas Gleixnerfd534e92019-05-23 11:14:39 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002/* Virtio ring implementation.
3 *
4 * Copyright 2007 Rusty Russell IBM Corporation
Rusty Russell0a8a69d2007-10-22 11:03:40 +10005 */
6#include <linux/virtio.h>
7#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -05008#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +10009#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040011#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103012#include <linux/hrtimer.h>
Andy Lutomirski780bc792016-02-02 21:46:36 -080013#include <linux/dma-mapping.h>
Michael S. Tsirkinf8ce7262021-08-10 12:26:05 -040014#include <linux/spinlock.h>
Andy Lutomirski78fe3982016-02-02 21:46:40 -080015#include <xen/xen.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100016
17#ifdef DEBUG
18/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060019#define BAD_RING(_vq, fmt, args...) \
20 do { \
21 dev_err(&(_vq)->vq.vdev->dev, \
22 "%s:"fmt, (_vq)->vq.name, ##args); \
23 BUG(); \
24 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060025/* Caller is supposed to guarantee no reentry. */
26#define START_USE(_vq) \
27 do { \
28 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060029 panic("%s:in_use = %i\n", \
30 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060031 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060032 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010033#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060034 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Tiwei Bie4d6a1052018-11-21 18:03:22 +080035#define LAST_ADD_TIME_UPDATE(_vq) \
36 do { \
37 ktime_t now = ktime_get(); \
38 \
39 /* No kick or get, with .1 second between? Warn. */ \
40 if ((_vq)->last_add_time_valid) \
41 WARN_ON(ktime_to_ms(ktime_sub(now, \
42 (_vq)->last_add_time)) > 100); \
43 (_vq)->last_add_time = now; \
44 (_vq)->last_add_time_valid = true; \
45 } while (0)
46#define LAST_ADD_TIME_CHECK(_vq) \
47 do { \
48 if ((_vq)->last_add_time_valid) { \
49 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
50 (_vq)->last_add_time)) > 100); \
51 } \
52 } while (0)
53#define LAST_ADD_TIME_INVALID(_vq) \
54 ((_vq)->last_add_time_valid = false)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100055#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060056#define BAD_RING(_vq, fmt, args...) \
57 do { \
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
60 (_vq)->broken = true; \
61 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100062#define START_USE(vq)
63#define END_USE(vq)
Tiwei Bie4d6a1052018-11-21 18:03:22 +080064#define LAST_ADD_TIME_UPDATE(vq)
65#define LAST_ADD_TIME_CHECK(vq)
66#define LAST_ADD_TIME_INVALID(vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100067#endif
68
Tiwei Biecbeedb72018-11-21 18:03:24 +080069struct vring_desc_state_split {
Andy Lutomirski780bc792016-02-02 21:46:36 -080070 void *data; /* Data for callback. */
71 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
72};
73
Tiwei Bie1ce9e602018-11-21 18:03:27 +080074struct vring_desc_state_packed {
75 void *data; /* Data for callback. */
76 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
77 u16 num; /* Descriptor list length. */
Tiwei Bie1ce9e602018-11-21 18:03:27 +080078 u16 last; /* The last desc state in a list. */
79};
80
Jason Wang1f287502021-06-04 13:53:45 +080081struct vring_desc_extra {
Jason Wangef5c3662021-10-19 15:01:48 +080082 dma_addr_t addr; /* Descriptor DMA addr. */
83 u32 len; /* Descriptor length. */
Tiwei Bie1ce9e602018-11-21 18:03:27 +080084 u16 flags; /* Descriptor flags. */
Jason Wangaeef9b42021-06-04 13:53:44 +080085 u16 next; /* The next desc state in a list. */
Tiwei Bie1ce9e602018-11-21 18:03:27 +080086};
87
Michael S. Tsirkin43b4f722015-01-15 13:33:31 +020088struct vring_virtqueue {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100089 struct virtqueue vq;
90
Tiwei Bie1ce9e602018-11-21 18:03:27 +080091 /* Is this a packed ring? */
92 bool packed_ring;
93
Tiwei Biefb3fba62018-11-21 18:03:26 +080094 /* Is DMA API used? */
95 bool use_dma_api;
96
Rusty Russell7b21e342012-01-12 15:44:42 +103097 /* Can we use weak barriers? */
98 bool weak_barriers;
99
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000100 /* Other side has made a mess, don't try any more. */
101 bool broken;
102
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100103 /* Host supports indirect buffers */
104 bool indirect;
105
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300106 /* Host publishes avail event idx */
107 bool event;
108
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000109 /* Head of free buffer list. */
110 unsigned int free_head;
111 /* Number we've added since last sync. */
112 unsigned int num_added;
113
114 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -0600115 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000116
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -0400117 /* Hint for event idx: already triggered no need to disable. */
118 bool event_triggered;
119
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800120 union {
121 /* Available for split ring */
122 struct {
123 /* Actual memory layout for this queue. */
124 struct vring vring;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800125
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800126 /* Last written value to avail->flags */
127 u16 avail_flags_shadow;
Tiwei Biee593bf92018-11-21 18:03:21 +0800128
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800129 /*
130 * Last written value to avail->idx in
131 * guest byte order.
132 */
133 u16 avail_idx_shadow;
Tiwei Biecbeedb72018-11-21 18:03:24 +0800134
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800135 /* Per-descriptor state. */
136 struct vring_desc_state_split *desc_state;
Jason Wang72b5e892021-06-04 13:53:50 +0800137 struct vring_desc_extra *desc_extra;
Tiwei Bied79dca72018-11-21 18:03:25 +0800138
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800139 /* DMA address and size information */
140 dma_addr_t queue_dma_addr;
141 size_t queue_size_in_bytes;
142 } split;
143
144 /* Available for packed ring */
145 struct {
146 /* Actual memory layout for this queue. */
Michael S. Tsirkin9c0644e2019-02-01 17:13:57 -0500147 struct {
148 unsigned int num;
149 struct vring_packed_desc *desc;
150 struct vring_packed_desc_event *driver;
151 struct vring_packed_desc_event *device;
152 } vring;
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800153
154 /* Driver ring wrap counter. */
155 bool avail_wrap_counter;
156
157 /* Device ring wrap counter. */
158 bool used_wrap_counter;
159
160 /* Avail used flags. */
161 u16 avail_used_flags;
162
163 /* Index of the next avail descriptor. */
164 u16 next_avail_idx;
165
166 /*
167 * Last written value to driver->flags in
168 * guest byte order.
169 */
170 u16 event_flags_shadow;
171
172 /* Per-descriptor state. */
173 struct vring_desc_state_packed *desc_state;
Jason Wang1f287502021-06-04 13:53:45 +0800174 struct vring_desc_extra *desc_extra;
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800175
176 /* DMA address and size information */
177 dma_addr_t ring_dma_addr;
178 dma_addr_t driver_event_dma_addr;
179 dma_addr_t device_event_dma_addr;
180 size_t ring_size_in_bytes;
181 size_t event_size_in_bytes;
182 } packed;
183 };
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800184
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000185 /* How to notify other side. FIXME: commonalize hcalls! */
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030186 bool (*notify)(struct virtqueue *vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000187
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800188 /* DMA, allocation, and size information */
189 bool we_own_ring;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800190
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000191#ifdef DEBUG
192 /* They're supposed to lock for us. */
193 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030194
195 /* Figure out if their kicks are too delayed. */
196 bool last_add_time_valid;
197 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000198#endif
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000199};
200
Tiwei Biee6f633e2018-11-21 18:03:20 +0800201
202/*
203 * Helpers.
204 */
205
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000206#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
207
Tiwei Bie2f18c2d2018-11-21 18:03:23 +0800208static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
209 unsigned int total_sg)
210{
211 struct vring_virtqueue *vq = to_vvq(_vq);
212
213 /*
214 * If the host supports indirect descriptor tables, and we have multiple
215 * buffers, then go indirect. FIXME: tune this threshold
216 */
217 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
218}
219
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800220/*
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300221 * Modern virtio devices have feature bits to specify whether they need a
222 * quirk and bypass the IOMMU. If not there, just use the DMA API.
223 *
224 * If there, the interaction between virtio and DMA API is messy.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800225 *
226 * On most systems with virtio, physical addresses match bus addresses,
227 * and it doesn't particularly matter whether we use the DMA API.
228 *
229 * On some systems, including Xen and any system with a physical device
230 * that speaks virtio behind a physical IOMMU, we must use the DMA API
231 * for virtio DMA to work at all.
232 *
233 * On other systems, including SPARC and PPC64, virtio-pci devices are
234 * enumerated as though they are behind an IOMMU, but the virtio host
235 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
236 * there or somehow map everything as the identity.
237 *
238 * For the time being, we preserve historic behavior and bypass the DMA
239 * API.
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300240 *
241 * TODO: install a per-device DMA ops structure that does the right thing
242 * taking into account all the above quirks, and use the DMA API
243 * unconditionally on data path.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800244 */
245
246static bool vring_use_dma_api(struct virtio_device *vdev)
247{
Michael S. Tsirkin24b68422020-06-24 19:17:04 -0400248 if (!virtio_has_dma_quirk(vdev))
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300249 return true;
250
251 /* Otherwise, we are left to guess. */
Andy Lutomirski78fe3982016-02-02 21:46:40 -0800252 /*
253 * In theory, it's possible to have a buggy QEMU-supposed
254 * emulated Q35 IOMMU and Xen enabled at the same time. On
255 * such a configuration, virtio has never worked and will
256 * not work without an even larger kludge. Instead, enable
257 * the DMA API if we're a Xen guest, which at least allows
258 * all of the sensible Xen configurations to work correctly.
259 */
260 if (xen_domain())
261 return true;
262
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800263 return false;
264}
265
Joerg Roedele6d6dd62019-02-07 12:59:16 +0100266size_t virtio_max_dma_size(struct virtio_device *vdev)
267{
268 size_t max_segment_size = SIZE_MAX;
269
270 if (vring_use_dma_api(vdev))
Will Deacon817fc972021-12-01 11:20:18 +0000271 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
Joerg Roedele6d6dd62019-02-07 12:59:16 +0100272
273 return max_segment_size;
274}
275EXPORT_SYMBOL_GPL(virtio_max_dma_size);
276
Tiwei Bied79dca72018-11-21 18:03:25 +0800277static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
278 dma_addr_t *dma_handle, gfp_t flag)
279{
280 if (vring_use_dma_api(vdev)) {
281 return dma_alloc_coherent(vdev->dev.parent, size,
282 dma_handle, flag);
283 } else {
284 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
285
286 if (queue) {
287 phys_addr_t phys_addr = virt_to_phys(queue);
288 *dma_handle = (dma_addr_t)phys_addr;
289
290 /*
291 * Sanity check: make sure we dind't truncate
292 * the address. The only arches I can find that
293 * have 64-bit phys_addr_t but 32-bit dma_addr_t
294 * are certain non-highmem MIPS and x86
295 * configurations, but these configurations
296 * should never allocate physical pages above 32
297 * bits, so this is fine. Just in case, throw a
298 * warning and abort if we end up with an
299 * unrepresentable address.
300 */
301 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
302 free_pages_exact(queue, PAGE_ALIGN(size));
303 return NULL;
304 }
305 }
306 return queue;
307 }
308}
309
310static void vring_free_queue(struct virtio_device *vdev, size_t size,
311 void *queue, dma_addr_t dma_handle)
312{
313 if (vring_use_dma_api(vdev))
314 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
315 else
316 free_pages_exact(queue, PAGE_ALIGN(size));
317}
318
Andy Lutomirski780bc792016-02-02 21:46:36 -0800319/*
320 * The DMA ops on various arches are rather gnarly right now, and
321 * making all of the arch DMA ops work on the vring device itself
322 * is a mess. For now, we use the parent device for DMA ops.
323 */
Michael S. Tsirkin75bfa812016-10-31 00:38:21 +0200324static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800325{
326 return vq->vq.vdev->dev.parent;
327}
328
329/* Map one sg entry. */
330static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
331 struct scatterlist *sg,
332 enum dma_data_direction direction)
333{
Tiwei Biefb3fba62018-11-21 18:03:26 +0800334 if (!vq->use_dma_api)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800335 return (dma_addr_t)sg_phys(sg);
336
337 /*
338 * We can't use dma_map_sg, because we don't use scatterlists in
339 * the way it expects (we don't guarantee that the scatterlist
340 * will exist for the lifetime of the mapping).
341 */
342 return dma_map_page(vring_dma_dev(vq),
343 sg_page(sg), sg->offset, sg->length,
344 direction);
345}
346
347static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
348 void *cpu_addr, size_t size,
349 enum dma_data_direction direction)
350{
Tiwei Biefb3fba62018-11-21 18:03:26 +0800351 if (!vq->use_dma_api)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800352 return (dma_addr_t)virt_to_phys(cpu_addr);
353
354 return dma_map_single(vring_dma_dev(vq),
355 cpu_addr, size, direction);
356}
357
Tiwei Biee6f633e2018-11-21 18:03:20 +0800358static int vring_mapping_error(const struct vring_virtqueue *vq,
359 dma_addr_t addr)
360{
Tiwei Biefb3fba62018-11-21 18:03:26 +0800361 if (!vq->use_dma_api)
Tiwei Biee6f633e2018-11-21 18:03:20 +0800362 return 0;
363
364 return dma_mapping_error(vring_dma_dev(vq), addr);
365}
366
367
368/*
369 * Split ring specific functions - *_split().
370 */
371
Jason Wang72b5e892021-06-04 13:53:50 +0800372static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
373 struct vring_desc *desc)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800374{
375 u16 flags;
376
Tiwei Biefb3fba62018-11-21 18:03:26 +0800377 if (!vq->use_dma_api)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800378 return;
379
380 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
381
382 if (flags & VRING_DESC_F_INDIRECT) {
383 dma_unmap_single(vring_dma_dev(vq),
384 virtio64_to_cpu(vq->vq.vdev, desc->addr),
385 virtio32_to_cpu(vq->vq.vdev, desc->len),
386 (flags & VRING_DESC_F_WRITE) ?
387 DMA_FROM_DEVICE : DMA_TO_DEVICE);
388 } else {
389 dma_unmap_page(vring_dma_dev(vq),
390 virtio64_to_cpu(vq->vq.vdev, desc->addr),
391 virtio32_to_cpu(vq->vq.vdev, desc->len),
392 (flags & VRING_DESC_F_WRITE) ?
393 DMA_FROM_DEVICE : DMA_TO_DEVICE);
394 }
395}
396
Jason Wang72b5e892021-06-04 13:53:50 +0800397static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
398 unsigned int i)
399{
400 struct vring_desc_extra *extra = vq->split.desc_extra;
401 u16 flags;
402
403 if (!vq->use_dma_api)
404 goto out;
405
406 flags = extra[i].flags;
407
408 if (flags & VRING_DESC_F_INDIRECT) {
409 dma_unmap_single(vring_dma_dev(vq),
410 extra[i].addr,
411 extra[i].len,
412 (flags & VRING_DESC_F_WRITE) ?
413 DMA_FROM_DEVICE : DMA_TO_DEVICE);
414 } else {
415 dma_unmap_page(vring_dma_dev(vq),
416 extra[i].addr,
417 extra[i].len,
418 (flags & VRING_DESC_F_WRITE) ?
419 DMA_FROM_DEVICE : DMA_TO_DEVICE);
420 }
421
422out:
423 return extra[i].next;
424}
425
Tiwei Bie138fd252018-11-21 18:03:19 +0800426static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
427 unsigned int total_sg,
428 gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100429{
430 struct vring_desc *desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930431 unsigned int i;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100432
Will Deaconb92b1b82012-10-19 14:03:33 +0100433 /*
434 * We require lowmem mappings for the descriptors because
435 * otherwise virt_to_phys will give us bogus addresses in the
436 * virtqueue.
437 */
Michal Hocko82107532015-12-01 15:32:49 +0100438 gfp &= ~__GFP_HIGHMEM;
Will Deaconb92b1b82012-10-19 14:03:33 +0100439
Kees Cook6da2ec52018-06-12 13:55:00 -0700440 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100441 if (!desc)
Rusty Russellb25bd252014-09-11 10:17:38 +0930442 return NULL;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100443
Rusty Russellb25bd252014-09-11 10:17:38 +0930444 for (i = 0; i < total_sg; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300445 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
Rusty Russellb25bd252014-09-11 10:17:38 +0930446 return desc;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100447}
448
Jason Wangfe4c3862021-06-04 13:53:48 +0800449static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
450 struct vring_desc *desc,
451 unsigned int i,
452 dma_addr_t addr,
453 unsigned int len,
Jason Wang72b5e892021-06-04 13:53:50 +0800454 u16 flags,
455 bool indirect)
Jason Wangfe4c3862021-06-04 13:53:48 +0800456{
Jason Wang72b5e892021-06-04 13:53:50 +0800457 struct vring_virtqueue *vring = to_vvq(vq);
458 struct vring_desc_extra *extra = vring->split.desc_extra;
459 u16 next;
460
Jason Wangfe4c3862021-06-04 13:53:48 +0800461 desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
462 desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
463 desc[i].len = cpu_to_virtio32(vq->vdev, len);
464
Jason Wang72b5e892021-06-04 13:53:50 +0800465 if (!indirect) {
466 next = extra[i].next;
467 desc[i].next = cpu_to_virtio16(vq->vdev, next);
468
469 extra[i].addr = addr;
470 extra[i].len = len;
471 extra[i].flags = flags;
472 } else
473 next = virtio16_to_cpu(vq->vdev, desc[i].next);
474
475 return next;
Jason Wangfe4c3862021-06-04 13:53:48 +0800476}
477
Tiwei Bie138fd252018-11-21 18:03:19 +0800478static inline int virtqueue_add_split(struct virtqueue *_vq,
479 struct scatterlist *sgs[],
480 unsigned int total_sg,
481 unsigned int out_sgs,
482 unsigned int in_sgs,
483 void *data,
484 void *ctx,
485 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000486{
487 struct vring_virtqueue *vq = to_vvq(_vq);
Rusty Russell13816c72013-03-20 15:37:09 +1030488 struct scatterlist *sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930489 struct vring_desc *desc;
Kees Cook3f649ab2020-06-03 13:09:38 -0700490 unsigned int i, n, avail, descs_used, prev, err_idx;
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930491 int head;
Rusty Russellb25bd252014-09-11 10:17:38 +0930492 bool indirect;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000493
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100494 START_USE(vq);
495
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000496 BUG_ON(data == NULL);
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200497 BUG_ON(ctx && vq->indirect);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100498
Rusty Russell70670442014-03-13 11:23:40 +1030499 if (unlikely(vq->broken)) {
500 END_USE(vq);
501 return -EIO;
502 }
503
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800504 LAST_ADD_TIME_UPDATE(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030505
Rusty Russell13816c72013-03-20 15:37:09 +1030506 BUG_ON(total_sg == 0);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000507
Rusty Russellb25bd252014-09-11 10:17:38 +0930508 head = vq->free_head;
509
Tiwei Bie2f18c2d2018-11-21 18:03:23 +0800510 if (virtqueue_use_indirect(_vq, total_sg))
Tiwei Bie138fd252018-11-21 18:03:19 +0800511 desc = alloc_indirect_split(_vq, total_sg, gfp);
Richard W.M. Jones44ed8082017-08-10 17:56:51 +0100512 else {
Rusty Russellb25bd252014-09-11 10:17:38 +0930513 desc = NULL;
Tiwei Biee593bf92018-11-21 18:03:21 +0800514 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
Richard W.M. Jones44ed8082017-08-10 17:56:51 +0100515 }
Rusty Russellb25bd252014-09-11 10:17:38 +0930516
517 if (desc) {
518 /* Use a single buffer which doesn't continue */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800519 indirect = true;
Rusty Russellb25bd252014-09-11 10:17:38 +0930520 /* Set up rest to use this indirect table. */
521 i = 0;
522 descs_used = 1;
Rusty Russellb25bd252014-09-11 10:17:38 +0930523 } else {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800524 indirect = false;
Tiwei Biee593bf92018-11-21 18:03:21 +0800525 desc = vq->split.vring.desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930526 i = head;
527 descs_used = total_sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930528 }
529
530 if (vq->vq.num_free < descs_used) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000531 pr_debug("Can't add buf len %i - avail = %i\n",
Rusty Russellb25bd252014-09-11 10:17:38 +0930532 descs_used, vq->vq.num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500533 /* FIXME: for historical reasons, we force a notify here if
534 * there are outgoing parts to the buffer. Presumably the
535 * host should service the ring ASAP. */
Rusty Russell13816c72013-03-20 15:37:09 +1030536 if (out_sgs)
Rusty Russell44653ea2008-07-25 12:06:04 -0500537 vq->notify(&vq->vq);
Wei Yongjun58625ed2016-08-02 14:16:31 +0000538 if (indirect)
539 kfree(desc);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000540 END_USE(vq);
541 return -ENOSPC;
542 }
543
Rusty Russell13816c72013-03-20 15:37:09 +1030544 for (n = 0; n < out_sgs; n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930545 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800546 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
547 if (vring_mapping_error(vq, addr))
548 goto unmap_release;
549
Rusty Russell13816c72013-03-20 15:37:09 +1030550 prev = i;
Jason Wang72b5e892021-06-04 13:53:50 +0800551 /* Note that we trust indirect descriptor
552 * table since it use stream DMA mapping.
553 */
Jason Wangfe4c3862021-06-04 13:53:48 +0800554 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
Jason Wang72b5e892021-06-04 13:53:50 +0800555 VRING_DESC_F_NEXT,
556 indirect);
Rusty Russell13816c72013-03-20 15:37:09 +1030557 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000558 }
Rusty Russell13816c72013-03-20 15:37:09 +1030559 for (; n < (out_sgs + in_sgs); n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930560 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800561 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
562 if (vring_mapping_error(vq, addr))
563 goto unmap_release;
564
Rusty Russell13816c72013-03-20 15:37:09 +1030565 prev = i;
Jason Wang72b5e892021-06-04 13:53:50 +0800566 /* Note that we trust indirect descriptor
567 * table since it use stream DMA mapping.
568 */
Jason Wangfe4c3862021-06-04 13:53:48 +0800569 i = virtqueue_add_desc_split(_vq, desc, i, addr,
570 sg->length,
571 VRING_DESC_F_NEXT |
Jason Wang72b5e892021-06-04 13:53:50 +0800572 VRING_DESC_F_WRITE,
573 indirect);
Rusty Russell13816c72013-03-20 15:37:09 +1030574 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000575 }
576 /* Last one doesn't continue. */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300577 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
Jason Wang72b5e892021-06-04 13:53:50 +0800578 if (!indirect && vq->use_dma_api)
Vincent Whitchurch890d3352021-10-26 15:31:00 +0200579 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
Jason Wang72b5e892021-06-04 13:53:50 +0800580 ~VRING_DESC_F_NEXT;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000581
Andy Lutomirski780bc792016-02-02 21:46:36 -0800582 if (indirect) {
583 /* Now that the indirect table is filled in, map it. */
584 dma_addr_t addr = vring_map_single(
585 vq, desc, total_sg * sizeof(struct vring_desc),
586 DMA_TO_DEVICE);
587 if (vring_mapping_error(vq, addr))
588 goto unmap_release;
589
Jason Wangfe4c3862021-06-04 13:53:48 +0800590 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
591 head, addr,
592 total_sg * sizeof(struct vring_desc),
Jason Wang72b5e892021-06-04 13:53:50 +0800593 VRING_DESC_F_INDIRECT,
594 false);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800595 }
596
597 /* We're using some buffers from the free list. */
598 vq->vq.num_free -= descs_used;
599
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000600 /* Update free pointer */
Rusty Russellb25bd252014-09-11 10:17:38 +0930601 if (indirect)
Jason Wang72b5e892021-06-04 13:53:50 +0800602 vq->free_head = vq->split.desc_extra[head].next;
Rusty Russellb25bd252014-09-11 10:17:38 +0930603 else
604 vq->free_head = i;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000605
Andy Lutomirski780bc792016-02-02 21:46:36 -0800606 /* Store token and indirect buffer state. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800607 vq->split.desc_state[head].data = data;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800608 if (indirect)
Tiwei Biecbeedb72018-11-21 18:03:24 +0800609 vq->split.desc_state[head].indir_desc = desc;
Jason Wang87646a32017-07-19 16:54:45 +0800610 else
Tiwei Biecbeedb72018-11-21 18:03:24 +0800611 vq->split.desc_state[head].indir_desc = ctx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000612
613 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030614 * do sync). */
Tiwei Biee593bf92018-11-21 18:03:21 +0800615 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
616 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000617
Rusty Russellee7cd892012-01-12 15:44:43 +1030618 /* Descriptors and available array need to be set before we expose the
619 * new available array entries. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030620 virtio_wmb(vq->weak_barriers);
Tiwei Biee593bf92018-11-21 18:03:21 +0800621 vq->split.avail_idx_shadow++;
622 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
623 vq->split.avail_idx_shadow);
Rusty Russellee7cd892012-01-12 15:44:43 +1030624 vq->num_added++;
625
Tetsuo Handa5e05bf52015-02-11 15:01:13 +1030626 pr_debug("Added buffer head %i to %p\n", head, vq);
627 END_USE(vq);
628
Rusty Russellee7cd892012-01-12 15:44:43 +1030629 /* This is very unlikely, but theoretically possible. Kick
630 * just in case. */
631 if (unlikely(vq->num_added == (1 << 16) - 1))
632 virtqueue_kick(_vq);
633
Rusty Russell98e8c6b2012-10-16 23:56:15 +1030634 return 0;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800635
636unmap_release:
637 err_idx = i;
Matthias Langecf8f1692019-09-06 16:59:01 +0200638
639 if (indirect)
640 i = 0;
641 else
642 i = head;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800643
644 for (n = 0; n < total_sg; n++) {
645 if (i == err_idx)
646 break;
Jason Wang72b5e892021-06-04 13:53:50 +0800647 if (indirect) {
648 vring_unmap_one_split_indirect(vq, &desc[i]);
649 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
650 } else
651 i = vring_unmap_one_split(vq, i);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800652 }
653
Andy Lutomirski780bc792016-02-02 21:46:36 -0800654 if (indirect)
655 kfree(desc);
656
Michael S. Tsirkin3cc36f62016-08-03 07:18:51 +0300657 END_USE(vq);
Halil Pasicf7728002019-11-14 13:46:46 +0100658 return -ENOMEM;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000659}
Rusty Russell13816c72013-03-20 15:37:09 +1030660
Tiwei Bie138fd252018-11-21 18:03:19 +0800661static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000662{
663 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300664 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030665 bool needs_kick;
666
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000667 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800668 /* We need to expose available array entries before checking avail
669 * event. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030670 virtio_mb(vq->weak_barriers);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000671
Tiwei Biee593bf92018-11-21 18:03:21 +0800672 old = vq->split.avail_idx_shadow - vq->num_added;
673 new = vq->split.avail_idx_shadow;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000674 vq->num_added = 0;
675
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800676 LAST_ADD_TIME_CHECK(vq);
677 LAST_ADD_TIME_INVALID(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030678
Rusty Russell41f03772012-01-12 15:44:43 +1030679 if (vq->event) {
Tiwei Biee593bf92018-11-21 18:03:21 +0800680 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
681 vring_avail_event(&vq->split.vring)),
Rusty Russell41f03772012-01-12 15:44:43 +1030682 new, old);
683 } else {
Tiwei Biee593bf92018-11-21 18:03:21 +0800684 needs_kick = !(vq->split.vring.used->flags &
685 cpu_to_virtio16(_vq->vdev,
686 VRING_USED_F_NO_NOTIFY));
Rusty Russell41f03772012-01-12 15:44:43 +1030687 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000688 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030689 return needs_kick;
690}
Tiwei Bie138fd252018-11-21 18:03:19 +0800691
Tiwei Bie138fd252018-11-21 18:03:19 +0800692static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
693 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000694{
Andy Lutomirski780bc792016-02-02 21:46:36 -0800695 unsigned int i, j;
Gongleic60923c2016-11-22 13:51:50 +0800696 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000697
698 /* Clear data ptr. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800699 vq->split.desc_state[head].data = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000700
Andy Lutomirski780bc792016-02-02 21:46:36 -0800701 /* Put back on free list: unmap first-level descriptors and find end */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000702 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100703
Tiwei Biee593bf92018-11-21 18:03:21 +0800704 while (vq->split.vring.desc[i].flags & nextflag) {
Jason Wang72b5e892021-06-04 13:53:50 +0800705 vring_unmap_one_split(vq, i);
706 i = vq->split.desc_extra[i].next;
Rusty Russell06ca2872012-10-16 23:56:14 +1030707 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000708 }
709
Jason Wang72b5e892021-06-04 13:53:50 +0800710 vring_unmap_one_split(vq, i);
711 vq->split.desc_extra[i].next = vq->free_head;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000712 vq->free_head = head;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800713
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000714 /* Plus final descriptor */
Rusty Russell06ca2872012-10-16 23:56:14 +1030715 vq->vq.num_free++;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800716
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200717 if (vq->indirect) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800718 struct vring_desc *indir_desc =
719 vq->split.desc_state[head].indir_desc;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200720 u32 len;
721
722 /* Free the indirect table, if any, now that it's unmapped. */
723 if (!indir_desc)
724 return;
725
Jason Wang72b5e892021-06-04 13:53:50 +0800726 len = vq->split.desc_extra[head].len;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800727
Jason Wang72b5e892021-06-04 13:53:50 +0800728 BUG_ON(!(vq->split.desc_extra[head].flags &
729 VRING_DESC_F_INDIRECT));
Andy Lutomirski780bc792016-02-02 21:46:36 -0800730 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
731
732 for (j = 0; j < len / sizeof(struct vring_desc); j++)
Jason Wang72b5e892021-06-04 13:53:50 +0800733 vring_unmap_one_split_indirect(vq, &indir_desc[j]);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800734
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200735 kfree(indir_desc);
Tiwei Biecbeedb72018-11-21 18:03:24 +0800736 vq->split.desc_state[head].indir_desc = NULL;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200737 } else if (ctx) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800738 *ctx = vq->split.desc_state[head].indir_desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800739 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000740}
741
Tiwei Bie138fd252018-11-21 18:03:19 +0800742static inline bool more_used_split(const struct vring_virtqueue *vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000743{
Tiwei Biee593bf92018-11-21 18:03:21 +0800744 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
745 vq->split.vring.used->idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000746}
747
Tiwei Bie138fd252018-11-21 18:03:19 +0800748static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
749 unsigned int *len,
750 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000751{
752 struct vring_virtqueue *vq = to_vvq(_vq);
753 void *ret;
754 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030755 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000756
757 START_USE(vq);
758
Rusty Russell5ef82752008-05-02 21:50:43 -0500759 if (unlikely(vq->broken)) {
760 END_USE(vq);
761 return NULL;
762 }
763
Tiwei Bie138fd252018-11-21 18:03:19 +0800764 if (!more_used_split(vq)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000765 pr_debug("No more buffers in queue\n");
766 END_USE(vq);
767 return NULL;
768 }
769
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200770 /* Only get used array entries after they have been exposed by host. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030771 virtio_rmb(vq->weak_barriers);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200772
Tiwei Biee593bf92018-11-21 18:03:21 +0800773 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
774 i = virtio32_to_cpu(_vq->vdev,
775 vq->split.vring.used->ring[last_used].id);
776 *len = virtio32_to_cpu(_vq->vdev,
777 vq->split.vring.used->ring[last_used].len);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000778
Tiwei Biee593bf92018-11-21 18:03:21 +0800779 if (unlikely(i >= vq->split.vring.num)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000780 BAD_RING(vq, "id %u out of range\n", i);
781 return NULL;
782 }
Tiwei Biecbeedb72018-11-21 18:03:24 +0800783 if (unlikely(!vq->split.desc_state[i].data)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000784 BAD_RING(vq, "id %u is not a head!\n", i);
785 return NULL;
786 }
787
Tiwei Bie138fd252018-11-21 18:03:19 +0800788 /* detach_buf_split clears data, so grab it now. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800789 ret = vq->split.desc_state[i].data;
Tiwei Bie138fd252018-11-21 18:03:19 +0800790 detach_buf_split(vq, i, ctx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000791 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300792 /* If we expect an interrupt for the next entry, tell host
793 * by writing event index and flush out the write before
794 * the read in the next get_buf call. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800795 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200796 virtio_store_mb(vq->weak_barriers,
Tiwei Biee593bf92018-11-21 18:03:21 +0800797 &vring_used_event(&vq->split.vring),
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200798 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300799
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800800 LAST_ADD_TIME_INVALID(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030801
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000802 END_USE(vq);
803 return ret;
804}
Tiwei Bie138fd252018-11-21 18:03:19 +0800805
Tiwei Bie138fd252018-11-21 18:03:19 +0800806static void virtqueue_disable_cb_split(struct virtqueue *_vq)
807{
808 struct vring_virtqueue *vq = to_vvq(_vq);
809
Tiwei Biee593bf92018-11-21 18:03:21 +0800810 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
811 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -0400812 if (vq->event)
813 /* TODO: this is a hack. Figure out a cleaner value to write. */
814 vring_used_event(&vq->split.vring) = 0x0;
815 else
Tiwei Biee593bf92018-11-21 18:03:21 +0800816 vq->split.vring.avail->flags =
817 cpu_to_virtio16(_vq->vdev,
818 vq->split.avail_flags_shadow);
Tiwei Bie138fd252018-11-21 18:03:19 +0800819 }
820}
821
Tiwei Bie138fd252018-11-21 18:03:19 +0800822static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300823{
824 struct vring_virtqueue *vq = to_vvq(_vq);
825 u16 last_used_idx;
826
827 START_USE(vq);
828
829 /* We optimistically turn back on interrupts, then check if there was
830 * more to do. */
831 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
832 * either clear the flags bit or point the event index at the next
833 * entry. Always do both to keep code simple. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800834 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
835 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200836 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800837 vq->split.vring.avail->flags =
838 cpu_to_virtio16(_vq->vdev,
839 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800840 }
Tiwei Biee593bf92018-11-21 18:03:21 +0800841 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
842 last_used_idx = vq->last_used_idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300843 END_USE(vq);
844 return last_used_idx;
845}
Tiwei Bie138fd252018-11-21 18:03:19 +0800846
Tiwei Bie138fd252018-11-21 18:03:19 +0800847static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
848{
849 struct vring_virtqueue *vq = to_vvq(_vq);
850
851 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
Tiwei Biee593bf92018-11-21 18:03:21 +0800852 vq->split.vring.used->idx);
Tiwei Bie138fd252018-11-21 18:03:19 +0800853}
854
Tiwei Bie138fd252018-11-21 18:03:19 +0800855static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300856{
857 struct vring_virtqueue *vq = to_vvq(_vq);
858 u16 bufs;
859
860 START_USE(vq);
861
862 /* We optimistically turn back on interrupts, then check if there was
863 * more to do. */
864 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
865 * either clear the flags bit or point the event index at the next
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200866 * entry. Always update the event index to keep code simple. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800867 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
868 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200869 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800870 vq->split.vring.avail->flags =
871 cpu_to_virtio16(_vq->vdev,
872 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800873 }
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300874 /* TODO: tune this threshold */
Tiwei Biee593bf92018-11-21 18:03:21 +0800875 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200876
877 virtio_store_mb(vq->weak_barriers,
Tiwei Biee593bf92018-11-21 18:03:21 +0800878 &vring_used_event(&vq->split.vring),
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200879 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
880
Tiwei Biee593bf92018-11-21 18:03:21 +0800881 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
882 - vq->last_used_idx) > bufs)) {
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300883 END_USE(vq);
884 return false;
885 }
886
887 END_USE(vq);
888 return true;
889}
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300890
Tiwei Bie138fd252018-11-21 18:03:19 +0800891static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530892{
893 struct vring_virtqueue *vq = to_vvq(_vq);
894 unsigned int i;
895 void *buf;
896
897 START_USE(vq);
898
Tiwei Biee593bf92018-11-21 18:03:21 +0800899 for (i = 0; i < vq->split.vring.num; i++) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800900 if (!vq->split.desc_state[i].data)
Shirley Mac021eac2010-01-18 19:15:23 +0530901 continue;
Tiwei Bie138fd252018-11-21 18:03:19 +0800902 /* detach_buf_split clears data, so grab it now. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800903 buf = vq->split.desc_state[i].data;
Tiwei Bie138fd252018-11-21 18:03:19 +0800904 detach_buf_split(vq, i, NULL);
Tiwei Biee593bf92018-11-21 18:03:21 +0800905 vq->split.avail_idx_shadow--;
906 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
907 vq->split.avail_idx_shadow);
Shirley Mac021eac2010-01-18 19:15:23 +0530908 END_USE(vq);
909 return buf;
910 }
911 /* That should have freed everything. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800912 BUG_ON(vq->vq.num_free != vq->split.vring.num);
Shirley Mac021eac2010-01-18 19:15:23 +0530913
914 END_USE(vq);
915 return NULL;
916}
Tiwei Bie138fd252018-11-21 18:03:19 +0800917
Tiwei Bied79dca72018-11-21 18:03:25 +0800918static struct virtqueue *vring_create_virtqueue_split(
919 unsigned int index,
920 unsigned int num,
921 unsigned int vring_align,
922 struct virtio_device *vdev,
923 bool weak_barriers,
924 bool may_reduce_num,
925 bool context,
926 bool (*notify)(struct virtqueue *),
927 void (*callback)(struct virtqueue *),
928 const char *name)
929{
930 struct virtqueue *vq;
931 void *queue = NULL;
932 dma_addr_t dma_addr;
933 size_t queue_size_in_bytes;
934 struct vring vring;
935
936 /* We assume num is a power of 2. */
937 if (num & (num - 1)) {
938 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
939 return NULL;
940 }
941
942 /* TODO: allocate each queue chunk individually */
943 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
944 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
945 &dma_addr,
946 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
947 if (queue)
948 break;
Cornelia Huckcf94db22019-04-08 14:33:22 +0200949 if (!may_reduce_num)
950 return NULL;
Tiwei Bied79dca72018-11-21 18:03:25 +0800951 }
952
953 if (!num)
954 return NULL;
955
956 if (!queue) {
957 /* Try to get a single page. You are my only hope! */
958 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
959 &dma_addr, GFP_KERNEL|__GFP_ZERO);
960 }
961 if (!queue)
962 return NULL;
963
964 queue_size_in_bytes = vring_size(num, vring_align);
965 vring_init(&vring, num, queue, vring_align);
966
967 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
968 notify, callback, name);
969 if (!vq) {
970 vring_free_queue(vdev, queue_size_in_bytes, queue,
971 dma_addr);
972 return NULL;
973 }
974
975 to_vvq(vq)->split.queue_dma_addr = dma_addr;
976 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
977 to_vvq(vq)->we_own_ring = true;
978
979 return vq;
980}
981
Tiwei Biee6f633e2018-11-21 18:03:20 +0800982
983/*
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800984 * Packed ring specific functions - *_packed().
985 */
986
987static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
Jason Wang1f287502021-06-04 13:53:45 +0800988 struct vring_desc_extra *state)
Tiwei Bie1ce9e602018-11-21 18:03:27 +0800989{
990 u16 flags;
991
992 if (!vq->use_dma_api)
993 return;
994
995 flags = state->flags;
996
997 if (flags & VRING_DESC_F_INDIRECT) {
998 dma_unmap_single(vring_dma_dev(vq),
999 state->addr, state->len,
1000 (flags & VRING_DESC_F_WRITE) ?
1001 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1002 } else {
1003 dma_unmap_page(vring_dma_dev(vq),
1004 state->addr, state->len,
1005 (flags & VRING_DESC_F_WRITE) ?
1006 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1007 }
1008}
1009
1010static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1011 struct vring_packed_desc *desc)
1012{
1013 u16 flags;
1014
1015 if (!vq->use_dma_api)
1016 return;
1017
1018 flags = le16_to_cpu(desc->flags);
1019
1020 if (flags & VRING_DESC_F_INDIRECT) {
1021 dma_unmap_single(vring_dma_dev(vq),
1022 le64_to_cpu(desc->addr),
1023 le32_to_cpu(desc->len),
1024 (flags & VRING_DESC_F_WRITE) ?
1025 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1026 } else {
1027 dma_unmap_page(vring_dma_dev(vq),
1028 le64_to_cpu(desc->addr),
1029 le32_to_cpu(desc->len),
1030 (flags & VRING_DESC_F_WRITE) ?
1031 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1032 }
1033}
1034
1035static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
1036 gfp_t gfp)
1037{
1038 struct vring_packed_desc *desc;
1039
1040 /*
1041 * We require lowmem mappings for the descriptors because
1042 * otherwise virt_to_phys will give us bogus addresses in the
1043 * virtqueue.
1044 */
1045 gfp &= ~__GFP_HIGHMEM;
1046
1047 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
1048
1049 return desc;
1050}
1051
1052static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
Xuan Zhuo8d7670f2021-10-20 19:23:22 +08001053 struct scatterlist *sgs[],
1054 unsigned int total_sg,
1055 unsigned int out_sgs,
1056 unsigned int in_sgs,
1057 void *data,
1058 gfp_t gfp)
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001059{
1060 struct vring_packed_desc *desc;
1061 struct scatterlist *sg;
1062 unsigned int i, n, err_idx;
1063 u16 head, id;
1064 dma_addr_t addr;
1065
1066 head = vq->packed.next_avail_idx;
1067 desc = alloc_indirect_packed(total_sg, gfp);
Xuan Zhuofc6d70f2021-10-20 19:23:23 +08001068 if (!desc)
1069 return -ENOMEM;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001070
1071 if (unlikely(vq->vq.num_free < 1)) {
1072 pr_debug("Can't add buf len 1 - avail = 0\n");
YueHaibingdf0bfe72019-03-12 15:06:53 +08001073 kfree(desc);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001074 END_USE(vq);
1075 return -ENOSPC;
1076 }
1077
1078 i = 0;
1079 id = vq->free_head;
1080 BUG_ON(id == vq->packed.vring.num);
1081
1082 for (n = 0; n < out_sgs + in_sgs; n++) {
1083 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1084 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1085 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1086 if (vring_mapping_error(vq, addr))
1087 goto unmap_release;
1088
1089 desc[i].flags = cpu_to_le16(n < out_sgs ?
1090 0 : VRING_DESC_F_WRITE);
1091 desc[i].addr = cpu_to_le64(addr);
1092 desc[i].len = cpu_to_le32(sg->length);
1093 i++;
1094 }
1095 }
1096
1097 /* Now that the indirect table is filled in, map it. */
1098 addr = vring_map_single(vq, desc,
1099 total_sg * sizeof(struct vring_packed_desc),
1100 DMA_TO_DEVICE);
1101 if (vring_mapping_error(vq, addr))
1102 goto unmap_release;
1103
1104 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1105 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1106 sizeof(struct vring_packed_desc));
1107 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1108
1109 if (vq->use_dma_api) {
1110 vq->packed.desc_extra[id].addr = addr;
1111 vq->packed.desc_extra[id].len = total_sg *
1112 sizeof(struct vring_packed_desc);
1113 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1114 vq->packed.avail_used_flags;
1115 }
1116
1117 /*
1118 * A driver MUST NOT make the first descriptor in the list
1119 * available before all subsequent descriptors comprising
1120 * the list are made available.
1121 */
1122 virtio_wmb(vq->weak_barriers);
1123 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1124 vq->packed.avail_used_flags);
1125
1126 /* We're using some buffers from the free list. */
1127 vq->vq.num_free -= 1;
1128
1129 /* Update free pointer */
1130 n = head + 1;
1131 if (n >= vq->packed.vring.num) {
1132 n = 0;
1133 vq->packed.avail_wrap_counter ^= 1;
1134 vq->packed.avail_used_flags ^=
1135 1 << VRING_PACKED_DESC_F_AVAIL |
1136 1 << VRING_PACKED_DESC_F_USED;
1137 }
1138 vq->packed.next_avail_idx = n;
Jason Wangaeef9b42021-06-04 13:53:44 +08001139 vq->free_head = vq->packed.desc_extra[id].next;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001140
1141 /* Store token and indirect buffer state. */
1142 vq->packed.desc_state[id].num = 1;
1143 vq->packed.desc_state[id].data = data;
1144 vq->packed.desc_state[id].indir_desc = desc;
1145 vq->packed.desc_state[id].last = id;
1146
1147 vq->num_added += 1;
1148
1149 pr_debug("Added buffer head %i to %p\n", head, vq);
1150 END_USE(vq);
1151
1152 return 0;
1153
1154unmap_release:
1155 err_idx = i;
1156
1157 for (i = 0; i < err_idx; i++)
1158 vring_unmap_desc_packed(vq, &desc[i]);
1159
1160 kfree(desc);
1161
1162 END_USE(vq);
Halil Pasicf7728002019-11-14 13:46:46 +01001163 return -ENOMEM;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001164}
1165
1166static inline int virtqueue_add_packed(struct virtqueue *_vq,
1167 struct scatterlist *sgs[],
1168 unsigned int total_sg,
1169 unsigned int out_sgs,
1170 unsigned int in_sgs,
1171 void *data,
1172 void *ctx,
1173 gfp_t gfp)
1174{
1175 struct vring_virtqueue *vq = to_vvq(_vq);
1176 struct vring_packed_desc *desc;
1177 struct scatterlist *sg;
1178 unsigned int i, n, c, descs_used, err_idx;
Kees Cook3f649ab2020-06-03 13:09:38 -07001179 __le16 head_flags, flags;
1180 u16 head, id, prev, curr, avail_used_flags;
Xuan Zhuofc6d70f2021-10-20 19:23:23 +08001181 int err;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001182
1183 START_USE(vq);
1184
1185 BUG_ON(data == NULL);
1186 BUG_ON(ctx && vq->indirect);
1187
1188 if (unlikely(vq->broken)) {
1189 END_USE(vq);
1190 return -EIO;
1191 }
1192
1193 LAST_ADD_TIME_UPDATE(vq);
1194
1195 BUG_ON(total_sg == 0);
1196
Xuan Zhuofc6d70f2021-10-20 19:23:23 +08001197 if (virtqueue_use_indirect(_vq, total_sg)) {
1198 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1199 in_sgs, data, gfp);
Michael S. Tsirkin1861ba62022-01-06 07:57:46 -05001200 if (err != -ENOMEM) {
1201 END_USE(vq);
Xuan Zhuofc6d70f2021-10-20 19:23:23 +08001202 return err;
Michael S. Tsirkin1861ba62022-01-06 07:57:46 -05001203 }
Xuan Zhuofc6d70f2021-10-20 19:23:23 +08001204
1205 /* fall back on direct */
1206 }
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001207
1208 head = vq->packed.next_avail_idx;
1209 avail_used_flags = vq->packed.avail_used_flags;
1210
1211 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1212
1213 desc = vq->packed.vring.desc;
1214 i = head;
1215 descs_used = total_sg;
1216
1217 if (unlikely(vq->vq.num_free < descs_used)) {
1218 pr_debug("Can't add buf len %i - avail = %i\n",
1219 descs_used, vq->vq.num_free);
1220 END_USE(vq);
1221 return -ENOSPC;
1222 }
1223
1224 id = vq->free_head;
1225 BUG_ON(id == vq->packed.vring.num);
1226
1227 curr = id;
1228 c = 0;
1229 for (n = 0; n < out_sgs + in_sgs; n++) {
1230 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1231 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1232 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1233 if (vring_mapping_error(vq, addr))
1234 goto unmap_release;
1235
1236 flags = cpu_to_le16(vq->packed.avail_used_flags |
1237 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1238 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1239 if (i == head)
1240 head_flags = flags;
1241 else
1242 desc[i].flags = flags;
1243
1244 desc[i].addr = cpu_to_le64(addr);
1245 desc[i].len = cpu_to_le32(sg->length);
1246 desc[i].id = cpu_to_le16(id);
1247
1248 if (unlikely(vq->use_dma_api)) {
1249 vq->packed.desc_extra[curr].addr = addr;
1250 vq->packed.desc_extra[curr].len = sg->length;
1251 vq->packed.desc_extra[curr].flags =
1252 le16_to_cpu(flags);
1253 }
1254 prev = curr;
Jason Wangaeef9b42021-06-04 13:53:44 +08001255 curr = vq->packed.desc_extra[curr].next;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001256
1257 if ((unlikely(++i >= vq->packed.vring.num))) {
1258 i = 0;
1259 vq->packed.avail_used_flags ^=
1260 1 << VRING_PACKED_DESC_F_AVAIL |
1261 1 << VRING_PACKED_DESC_F_USED;
1262 }
1263 }
1264 }
1265
1266 if (i < head)
1267 vq->packed.avail_wrap_counter ^= 1;
1268
1269 /* We're using some buffers from the free list. */
1270 vq->vq.num_free -= descs_used;
1271
1272 /* Update free pointer */
1273 vq->packed.next_avail_idx = i;
1274 vq->free_head = curr;
1275
1276 /* Store token. */
1277 vq->packed.desc_state[id].num = descs_used;
1278 vq->packed.desc_state[id].data = data;
1279 vq->packed.desc_state[id].indir_desc = ctx;
1280 vq->packed.desc_state[id].last = prev;
1281
1282 /*
1283 * A driver MUST NOT make the first descriptor in the list
1284 * available before all subsequent descriptors comprising
1285 * the list are made available.
1286 */
1287 virtio_wmb(vq->weak_barriers);
1288 vq->packed.vring.desc[head].flags = head_flags;
1289 vq->num_added += descs_used;
1290
1291 pr_debug("Added buffer head %i to %p\n", head, vq);
1292 END_USE(vq);
1293
1294 return 0;
1295
1296unmap_release:
1297 err_idx = i;
1298 i = head;
Jason Wang44593862021-06-04 13:53:47 +08001299 curr = vq->free_head;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001300
1301 vq->packed.avail_used_flags = avail_used_flags;
1302
1303 for (n = 0; n < total_sg; n++) {
1304 if (i == err_idx)
1305 break;
Jason Wang44593862021-06-04 13:53:47 +08001306 vring_unmap_state_packed(vq,
1307 &vq->packed.desc_extra[curr]);
1308 curr = vq->packed.desc_extra[curr].next;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001309 i++;
1310 if (i >= vq->packed.vring.num)
1311 i = 0;
1312 }
1313
1314 END_USE(vq);
1315 return -EIO;
1316}
1317
1318static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1319{
1320 struct vring_virtqueue *vq = to_vvq(_vq);
Tiwei Bief51f9822018-11-21 18:03:28 +08001321 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001322 bool needs_kick;
1323 union {
1324 struct {
1325 __le16 off_wrap;
1326 __le16 flags;
1327 };
1328 u32 u32;
1329 } snapshot;
1330
1331 START_USE(vq);
1332
1333 /*
1334 * We need to expose the new flags value before checking notification
1335 * suppressions.
1336 */
1337 virtio_mb(vq->weak_barriers);
1338
Tiwei Bief51f9822018-11-21 18:03:28 +08001339 old = vq->packed.next_avail_idx - vq->num_added;
1340 new = vq->packed.next_avail_idx;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001341 vq->num_added = 0;
1342
1343 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1344 flags = le16_to_cpu(snapshot.flags);
1345
1346 LAST_ADD_TIME_CHECK(vq);
1347 LAST_ADD_TIME_INVALID(vq);
1348
Tiwei Bief51f9822018-11-21 18:03:28 +08001349 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1350 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1351 goto out;
1352 }
1353
1354 off_wrap = le16_to_cpu(snapshot.off_wrap);
1355
1356 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1357 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1358 if (wrap_counter != vq->packed.avail_wrap_counter)
1359 event_idx -= vq->packed.vring.num;
1360
1361 needs_kick = vring_need_event(event_idx, new, old);
1362out:
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001363 END_USE(vq);
1364 return needs_kick;
1365}
1366
1367static void detach_buf_packed(struct vring_virtqueue *vq,
1368 unsigned int id, void **ctx)
1369{
1370 struct vring_desc_state_packed *state = NULL;
1371 struct vring_packed_desc *desc;
1372 unsigned int i, curr;
1373
1374 state = &vq->packed.desc_state[id];
1375
1376 /* Clear data ptr. */
1377 state->data = NULL;
1378
Jason Wangaeef9b42021-06-04 13:53:44 +08001379 vq->packed.desc_extra[state->last].next = vq->free_head;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001380 vq->free_head = id;
1381 vq->vq.num_free += state->num;
1382
1383 if (unlikely(vq->use_dma_api)) {
1384 curr = id;
1385 for (i = 0; i < state->num; i++) {
1386 vring_unmap_state_packed(vq,
1387 &vq->packed.desc_extra[curr]);
Jason Wangaeef9b42021-06-04 13:53:44 +08001388 curr = vq->packed.desc_extra[curr].next;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001389 }
1390 }
1391
1392 if (vq->indirect) {
1393 u32 len;
1394
1395 /* Free the indirect table, if any, now that it's unmapped. */
1396 desc = state->indir_desc;
1397 if (!desc)
1398 return;
1399
1400 if (vq->use_dma_api) {
1401 len = vq->packed.desc_extra[id].len;
1402 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1403 i++)
1404 vring_unmap_desc_packed(vq, &desc[i]);
1405 }
1406 kfree(desc);
1407 state->indir_desc = NULL;
1408 } else if (ctx) {
1409 *ctx = state->indir_desc;
1410 }
1411}
1412
1413static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1414 u16 idx, bool used_wrap_counter)
1415{
1416 bool avail, used;
1417 u16 flags;
1418
1419 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1420 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1421 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1422
1423 return avail == used && used == used_wrap_counter;
1424}
1425
1426static inline bool more_used_packed(const struct vring_virtqueue *vq)
1427{
1428 return is_used_desc_packed(vq, vq->last_used_idx,
1429 vq->packed.used_wrap_counter);
1430}
1431
1432static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1433 unsigned int *len,
1434 void **ctx)
1435{
1436 struct vring_virtqueue *vq = to_vvq(_vq);
1437 u16 last_used, id;
1438 void *ret;
1439
1440 START_USE(vq);
1441
1442 if (unlikely(vq->broken)) {
1443 END_USE(vq);
1444 return NULL;
1445 }
1446
1447 if (!more_used_packed(vq)) {
1448 pr_debug("No more buffers in queue\n");
1449 END_USE(vq);
1450 return NULL;
1451 }
1452
1453 /* Only get used elements after they have been exposed by host. */
1454 virtio_rmb(vq->weak_barriers);
1455
1456 last_used = vq->last_used_idx;
1457 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1458 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1459
1460 if (unlikely(id >= vq->packed.vring.num)) {
1461 BAD_RING(vq, "id %u out of range\n", id);
1462 return NULL;
1463 }
1464 if (unlikely(!vq->packed.desc_state[id].data)) {
1465 BAD_RING(vq, "id %u is not a head!\n", id);
1466 return NULL;
1467 }
1468
1469 /* detach_buf_packed clears data, so grab it now. */
1470 ret = vq->packed.desc_state[id].data;
1471 detach_buf_packed(vq, id, ctx);
1472
1473 vq->last_used_idx += vq->packed.desc_state[id].num;
1474 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1475 vq->last_used_idx -= vq->packed.vring.num;
1476 vq->packed.used_wrap_counter ^= 1;
1477 }
1478
Tiwei Bief51f9822018-11-21 18:03:28 +08001479 /*
1480 * If we expect an interrupt for the next entry, tell host
1481 * by writing event index and flush out the write before
1482 * the read in the next get_buf call.
1483 */
1484 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1485 virtio_store_mb(vq->weak_barriers,
1486 &vq->packed.vring.driver->off_wrap,
1487 cpu_to_le16(vq->last_used_idx |
1488 (vq->packed.used_wrap_counter <<
1489 VRING_PACKED_EVENT_F_WRAP_CTR)));
1490
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001491 LAST_ADD_TIME_INVALID(vq);
1492
1493 END_USE(vq);
1494 return ret;
1495}
1496
1497static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1498{
1499 struct vring_virtqueue *vq = to_vvq(_vq);
1500
1501 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1502 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1503 vq->packed.vring.driver->flags =
1504 cpu_to_le16(vq->packed.event_flags_shadow);
1505 }
1506}
1507
1508static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1509{
1510 struct vring_virtqueue *vq = to_vvq(_vq);
1511
1512 START_USE(vq);
1513
1514 /*
1515 * We optimistically turn back on interrupts, then check if there was
1516 * more to do.
1517 */
1518
Tiwei Bief51f9822018-11-21 18:03:28 +08001519 if (vq->event) {
1520 vq->packed.vring.driver->off_wrap =
1521 cpu_to_le16(vq->last_used_idx |
1522 (vq->packed.used_wrap_counter <<
1523 VRING_PACKED_EVENT_F_WRAP_CTR));
1524 /*
1525 * We need to update event offset and event wrap
1526 * counter first before updating event flags.
1527 */
1528 virtio_wmb(vq->weak_barriers);
1529 }
1530
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001531 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
Tiwei Bief51f9822018-11-21 18:03:28 +08001532 vq->packed.event_flags_shadow = vq->event ?
1533 VRING_PACKED_EVENT_FLAG_DESC :
1534 VRING_PACKED_EVENT_FLAG_ENABLE;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001535 vq->packed.vring.driver->flags =
1536 cpu_to_le16(vq->packed.event_flags_shadow);
1537 }
1538
1539 END_USE(vq);
1540 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1541 VRING_PACKED_EVENT_F_WRAP_CTR);
1542}
1543
1544static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1545{
1546 struct vring_virtqueue *vq = to_vvq(_vq);
1547 bool wrap_counter;
1548 u16 used_idx;
1549
1550 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1551 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1552
1553 return is_used_desc_packed(vq, used_idx, wrap_counter);
1554}
1555
1556static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1557{
1558 struct vring_virtqueue *vq = to_vvq(_vq);
1559 u16 used_idx, wrap_counter;
Tiwei Bief51f9822018-11-21 18:03:28 +08001560 u16 bufs;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001561
1562 START_USE(vq);
1563
1564 /*
1565 * We optimistically turn back on interrupts, then check if there was
1566 * more to do.
1567 */
1568
Tiwei Bief51f9822018-11-21 18:03:28 +08001569 if (vq->event) {
1570 /* TODO: tune this threshold */
1571 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1572 wrap_counter = vq->packed.used_wrap_counter;
1573
1574 used_idx = vq->last_used_idx + bufs;
1575 if (used_idx >= vq->packed.vring.num) {
1576 used_idx -= vq->packed.vring.num;
1577 wrap_counter ^= 1;
1578 }
1579
1580 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1581 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1582
1583 /*
1584 * We need to update event offset and event wrap
1585 * counter first before updating event flags.
1586 */
1587 virtio_wmb(vq->weak_barriers);
Tiwei Bief51f9822018-11-21 18:03:28 +08001588 }
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001589
1590 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
Tiwei Bief51f9822018-11-21 18:03:28 +08001591 vq->packed.event_flags_shadow = vq->event ?
1592 VRING_PACKED_EVENT_FLAG_DESC :
1593 VRING_PACKED_EVENT_FLAG_ENABLE;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001594 vq->packed.vring.driver->flags =
1595 cpu_to_le16(vq->packed.event_flags_shadow);
1596 }
1597
1598 /*
1599 * We need to update event suppression structure first
1600 * before re-checking for more used buffers.
1601 */
1602 virtio_mb(vq->weak_barriers);
1603
Marvin Liu40ce7912019-10-22 01:10:04 +08001604 if (is_used_desc_packed(vq,
1605 vq->last_used_idx,
1606 vq->packed.used_wrap_counter)) {
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001607 END_USE(vq);
1608 return false;
1609 }
1610
1611 END_USE(vq);
1612 return true;
1613}
1614
1615static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1616{
1617 struct vring_virtqueue *vq = to_vvq(_vq);
1618 unsigned int i;
1619 void *buf;
1620
1621 START_USE(vq);
1622
1623 for (i = 0; i < vq->packed.vring.num; i++) {
1624 if (!vq->packed.desc_state[i].data)
1625 continue;
1626 /* detach_buf clears data, so grab it now. */
1627 buf = vq->packed.desc_state[i].data;
1628 detach_buf_packed(vq, i, NULL);
1629 END_USE(vq);
1630 return buf;
1631 }
1632 /* That should have freed everything. */
1633 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1634
1635 END_USE(vq);
1636 return NULL;
1637}
1638
Jason Wang5a222422021-06-04 13:53:46 +08001639static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
1640 unsigned int num)
1641{
1642 struct vring_desc_extra *desc_extra;
1643 unsigned int i;
1644
1645 desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
1646 GFP_KERNEL);
1647 if (!desc_extra)
1648 return NULL;
1649
1650 memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
1651
1652 for (i = 0; i < num - 1; i++)
1653 desc_extra[i].next = i + 1;
1654
1655 return desc_extra;
1656}
1657
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001658static struct virtqueue *vring_create_virtqueue_packed(
1659 unsigned int index,
1660 unsigned int num,
1661 unsigned int vring_align,
1662 struct virtio_device *vdev,
1663 bool weak_barriers,
1664 bool may_reduce_num,
1665 bool context,
1666 bool (*notify)(struct virtqueue *),
1667 void (*callback)(struct virtqueue *),
1668 const char *name)
1669{
1670 struct vring_virtqueue *vq;
1671 struct vring_packed_desc *ring;
1672 struct vring_packed_desc_event *driver, *device;
1673 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1674 size_t ring_size_in_bytes, event_size_in_bytes;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001675
1676 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1677
1678 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1679 &ring_dma_addr,
1680 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1681 if (!ring)
1682 goto err_ring;
1683
1684 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1685
1686 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1687 &driver_event_dma_addr,
1688 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1689 if (!driver)
1690 goto err_driver;
1691
1692 device = vring_alloc_queue(vdev, event_size_in_bytes,
1693 &device_event_dma_addr,
1694 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1695 if (!device)
1696 goto err_device;
1697
1698 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1699 if (!vq)
1700 goto err_vq;
1701
1702 vq->vq.callback = callback;
1703 vq->vq.vdev = vdev;
1704 vq->vq.name = name;
1705 vq->vq.num_free = num;
1706 vq->vq.index = index;
1707 vq->we_own_ring = true;
1708 vq->notify = notify;
1709 vq->weak_barriers = weak_barriers;
1710 vq->broken = false;
1711 vq->last_used_idx = 0;
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -04001712 vq->event_triggered = false;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001713 vq->num_added = 0;
1714 vq->packed_ring = true;
1715 vq->use_dma_api = vring_use_dma_api(vdev);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001716#ifdef DEBUG
1717 vq->in_use = false;
1718 vq->last_add_time_valid = false;
1719#endif
1720
1721 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1722 !context;
1723 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1724
Tiwei Bie45383fb2019-01-23 17:50:26 +08001725 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1726 vq->weak_barriers = false;
1727
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001728 vq->packed.ring_dma_addr = ring_dma_addr;
1729 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1730 vq->packed.device_event_dma_addr = device_event_dma_addr;
1731
1732 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1733 vq->packed.event_size_in_bytes = event_size_in_bytes;
1734
1735 vq->packed.vring.num = num;
1736 vq->packed.vring.desc = ring;
1737 vq->packed.vring.driver = driver;
1738 vq->packed.vring.device = device;
1739
1740 vq->packed.next_avail_idx = 0;
1741 vq->packed.avail_wrap_counter = 1;
1742 vq->packed.used_wrap_counter = 1;
1743 vq->packed.event_flags_shadow = 0;
1744 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1745
1746 vq->packed.desc_state = kmalloc_array(num,
1747 sizeof(struct vring_desc_state_packed),
1748 GFP_KERNEL);
1749 if (!vq->packed.desc_state)
1750 goto err_desc_state;
1751
1752 memset(vq->packed.desc_state, 0,
1753 num * sizeof(struct vring_desc_state_packed));
1754
1755 /* Put everything in free lists. */
1756 vq->free_head = 0;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001757
Jason Wang5a222422021-06-04 13:53:46 +08001758 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001759 if (!vq->packed.desc_extra)
1760 goto err_desc_extra;
1761
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001762 /* No callback? Tell other side not to bother us. */
1763 if (!callback) {
1764 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1765 vq->packed.vring.driver->flags =
1766 cpu_to_le16(vq->packed.event_flags_shadow);
1767 }
1768
Parav Pandit0e566c82021-07-21 17:26:47 +03001769 spin_lock(&vdev->vqs_list_lock);
Dan Carpentere152d8a2020-12-04 17:23:36 +03001770 list_add_tail(&vq->vq.list, &vdev->vqs);
Parav Pandit0e566c82021-07-21 17:26:47 +03001771 spin_unlock(&vdev->vqs_list_lock);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001772 return &vq->vq;
1773
1774err_desc_extra:
1775 kfree(vq->packed.desc_state);
1776err_desc_state:
1777 kfree(vq);
1778err_vq:
Dan Carpenterae93d8e2020-12-04 17:23:00 +03001779 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001780err_device:
Dan Carpenterae93d8e2020-12-04 17:23:00 +03001781 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001782err_driver:
1783 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1784err_ring:
1785 return NULL;
1786}
1787
1788
1789/*
Tiwei Biee6f633e2018-11-21 18:03:20 +08001790 * Generic functions and exported symbols.
1791 */
1792
1793static inline int virtqueue_add(struct virtqueue *_vq,
1794 struct scatterlist *sgs[],
1795 unsigned int total_sg,
1796 unsigned int out_sgs,
1797 unsigned int in_sgs,
1798 void *data,
1799 void *ctx,
1800 gfp_t gfp)
1801{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001802 struct vring_virtqueue *vq = to_vvq(_vq);
1803
1804 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1805 out_sgs, in_sgs, data, ctx, gfp) :
1806 virtqueue_add_split(_vq, sgs, total_sg,
1807 out_sgs, in_sgs, data, ctx, gfp);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001808}
1809
1810/**
1811 * virtqueue_add_sgs - expose buffers to other end
Jiang Biaoa5581202019-04-23 18:25:12 +08001812 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08001813 * @sgs: array of terminated scatterlists.
Jiang Biaoa5581202019-04-23 18:25:12 +08001814 * @out_sgs: the number of scatterlists readable by other side
1815 * @in_sgs: the number of scatterlists which are writable (after readable ones)
Tiwei Biee6f633e2018-11-21 18:03:20 +08001816 * @data: the token identifying the buffer.
1817 * @gfp: how to do memory allocations (if necessary).
1818 *
1819 * Caller must ensure we don't call this with other virtqueue operations
1820 * at the same time (except where noted).
1821 *
1822 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1823 */
1824int virtqueue_add_sgs(struct virtqueue *_vq,
1825 struct scatterlist *sgs[],
1826 unsigned int out_sgs,
1827 unsigned int in_sgs,
1828 void *data,
1829 gfp_t gfp)
1830{
1831 unsigned int i, total_sg = 0;
1832
1833 /* Count them first. */
1834 for (i = 0; i < out_sgs + in_sgs; i++) {
1835 struct scatterlist *sg;
1836
1837 for (sg = sgs[i]; sg; sg = sg_next(sg))
1838 total_sg++;
1839 }
1840 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1841 data, NULL, gfp);
1842}
1843EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1844
1845/**
1846 * virtqueue_add_outbuf - expose output buffers to other end
1847 * @vq: the struct virtqueue we're talking about.
1848 * @sg: scatterlist (must be well-formed and terminated!)
1849 * @num: the number of entries in @sg readable by other side
1850 * @data: the token identifying the buffer.
1851 * @gfp: how to do memory allocations (if necessary).
1852 *
1853 * Caller must ensure we don't call this with other virtqueue operations
1854 * at the same time (except where noted).
1855 *
1856 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1857 */
1858int virtqueue_add_outbuf(struct virtqueue *vq,
1859 struct scatterlist *sg, unsigned int num,
1860 void *data,
1861 gfp_t gfp)
1862{
1863 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1864}
1865EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1866
1867/**
1868 * virtqueue_add_inbuf - expose input buffers to other end
1869 * @vq: the struct virtqueue we're talking about.
1870 * @sg: scatterlist (must be well-formed and terminated!)
1871 * @num: the number of entries in @sg writable by other side
1872 * @data: the token identifying the buffer.
1873 * @gfp: how to do memory allocations (if necessary).
1874 *
1875 * Caller must ensure we don't call this with other virtqueue operations
1876 * at the same time (except where noted).
1877 *
1878 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1879 */
1880int virtqueue_add_inbuf(struct virtqueue *vq,
1881 struct scatterlist *sg, unsigned int num,
1882 void *data,
1883 gfp_t gfp)
1884{
1885 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1886}
1887EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1888
1889/**
1890 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1891 * @vq: the struct virtqueue we're talking about.
1892 * @sg: scatterlist (must be well-formed and terminated!)
1893 * @num: the number of entries in @sg writable by other side
1894 * @data: the token identifying the buffer.
1895 * @ctx: extra context for the token
1896 * @gfp: how to do memory allocations (if necessary).
1897 *
1898 * Caller must ensure we don't call this with other virtqueue operations
1899 * at the same time (except where noted).
1900 *
1901 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1902 */
1903int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1904 struct scatterlist *sg, unsigned int num,
1905 void *data,
1906 void *ctx,
1907 gfp_t gfp)
1908{
1909 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1910}
1911EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1912
1913/**
1914 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
Jiang Biaoa5581202019-04-23 18:25:12 +08001915 * @_vq: the struct virtqueue
Tiwei Biee6f633e2018-11-21 18:03:20 +08001916 *
1917 * Instead of virtqueue_kick(), you can do:
1918 * if (virtqueue_kick_prepare(vq))
1919 * virtqueue_notify(vq);
1920 *
1921 * This is sometimes useful because the virtqueue_kick_prepare() needs
1922 * to be serialized, but the actual virtqueue_notify() call does not.
1923 */
1924bool virtqueue_kick_prepare(struct virtqueue *_vq)
1925{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001926 struct vring_virtqueue *vq = to_vvq(_vq);
1927
1928 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1929 virtqueue_kick_prepare_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08001930}
1931EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1932
1933/**
1934 * virtqueue_notify - second half of split virtqueue_kick call.
Jiang Biaoa5581202019-04-23 18:25:12 +08001935 * @_vq: the struct virtqueue
Tiwei Biee6f633e2018-11-21 18:03:20 +08001936 *
1937 * This does not need to be serialized.
1938 *
1939 * Returns false if host notify failed or queue is broken, otherwise true.
1940 */
1941bool virtqueue_notify(struct virtqueue *_vq)
1942{
1943 struct vring_virtqueue *vq = to_vvq(_vq);
1944
1945 if (unlikely(vq->broken))
1946 return false;
1947
1948 /* Prod other side to tell it about changes. */
1949 if (!vq->notify(_vq)) {
1950 vq->broken = true;
1951 return false;
1952 }
1953 return true;
1954}
1955EXPORT_SYMBOL_GPL(virtqueue_notify);
1956
1957/**
1958 * virtqueue_kick - update after add_buf
1959 * @vq: the struct virtqueue
1960 *
1961 * After one or more virtqueue_add_* calls, invoke this to kick
1962 * the other side.
1963 *
1964 * Caller must ensure we don't call this with other virtqueue
1965 * operations at the same time (except where noted).
1966 *
1967 * Returns false if kick failed, otherwise true.
1968 */
1969bool virtqueue_kick(struct virtqueue *vq)
1970{
1971 if (virtqueue_kick_prepare(vq))
1972 return virtqueue_notify(vq);
1973 return true;
1974}
1975EXPORT_SYMBOL_GPL(virtqueue_kick);
1976
1977/**
Yang Li31c11db2021-05-26 11:12:11 +08001978 * virtqueue_get_buf_ctx - get the next used buffer
Jiang Biaoa5581202019-04-23 18:25:12 +08001979 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08001980 * @len: the length written into the buffer
Jiang Biaoa5581202019-04-23 18:25:12 +08001981 * @ctx: extra context for the token
Tiwei Biee6f633e2018-11-21 18:03:20 +08001982 *
1983 * If the device wrote data into the buffer, @len will be set to the
1984 * amount written. This means you don't need to clear the buffer
1985 * beforehand to ensure there's no data leakage in the case of short
1986 * writes.
1987 *
1988 * Caller must ensure we don't call this with other virtqueue
1989 * operations at the same time (except where noted).
1990 *
1991 * Returns NULL if there are no used buffers, or the "data" token
1992 * handed to virtqueue_add_*().
1993 */
1994void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1995 void **ctx)
1996{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08001997 struct vring_virtqueue *vq = to_vvq(_vq);
1998
1999 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
2000 virtqueue_get_buf_ctx_split(_vq, len, ctx);
Tiwei Biee6f633e2018-11-21 18:03:20 +08002001}
2002EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2003
2004void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2005{
2006 return virtqueue_get_buf_ctx(_vq, len, NULL);
2007}
2008EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Tiwei Biee6f633e2018-11-21 18:03:20 +08002009/**
2010 * virtqueue_disable_cb - disable callbacks
Jiang Biaoa5581202019-04-23 18:25:12 +08002011 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08002012 *
2013 * Note that this is not necessarily synchronous, hence unreliable and only
2014 * useful as an optimization.
2015 *
2016 * Unlike other operations, this need not be serialized.
2017 */
2018void virtqueue_disable_cb(struct virtqueue *_vq)
2019{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002020 struct vring_virtqueue *vq = to_vvq(_vq);
2021
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -04002022 /* If device triggered an event already it won't trigger one again:
2023 * no need to disable.
2024 */
2025 if (vq->event_triggered)
2026 return;
2027
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002028 if (vq->packed_ring)
2029 virtqueue_disable_cb_packed(_vq);
2030 else
2031 virtqueue_disable_cb_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08002032}
2033EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2034
2035/**
2036 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
Jiang Biaoa5581202019-04-23 18:25:12 +08002037 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08002038 *
2039 * This re-enables callbacks; it returns current queue state
2040 * in an opaque unsigned value. This value should be later tested by
2041 * virtqueue_poll, to detect a possible race between the driver checking for
2042 * more work, and enabling callbacks.
2043 *
2044 * Caller must ensure we don't call this with other virtqueue
2045 * operations at the same time (except where noted).
2046 */
2047unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2048{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002049 struct vring_virtqueue *vq = to_vvq(_vq);
2050
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -04002051 if (vq->event_triggered)
2052 vq->event_triggered = false;
2053
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002054 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
2055 virtqueue_enable_cb_prepare_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08002056}
2057EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2058
2059/**
2060 * virtqueue_poll - query pending used buffers
Jiang Biaoa5581202019-04-23 18:25:12 +08002061 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08002062 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2063 *
2064 * Returns "true" if there are pending used buffers in the queue.
2065 *
2066 * This does not need to be serialized.
2067 */
2068bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
2069{
2070 struct vring_virtqueue *vq = to_vvq(_vq);
2071
Mao Wenan481a0d72020-08-02 15:44:09 +08002072 if (unlikely(vq->broken))
2073 return false;
2074
Tiwei Biee6f633e2018-11-21 18:03:20 +08002075 virtio_mb(vq->weak_barriers);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002076 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
2077 virtqueue_poll_split(_vq, last_used_idx);
Tiwei Biee6f633e2018-11-21 18:03:20 +08002078}
2079EXPORT_SYMBOL_GPL(virtqueue_poll);
2080
2081/**
2082 * virtqueue_enable_cb - restart callbacks after disable_cb.
Jiang Biaoa5581202019-04-23 18:25:12 +08002083 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08002084 *
2085 * This re-enables callbacks; it returns "false" if there are pending
2086 * buffers in the queue, to detect a possible race between the driver
2087 * checking for more work, and enabling callbacks.
2088 *
2089 * Caller must ensure we don't call this with other virtqueue
2090 * operations at the same time (except where noted).
2091 */
2092bool virtqueue_enable_cb(struct virtqueue *_vq)
2093{
2094 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2095
2096 return !virtqueue_poll(_vq, last_used_idx);
2097}
2098EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2099
2100/**
2101 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
Jiang Biaoa5581202019-04-23 18:25:12 +08002102 * @_vq: the struct virtqueue we're talking about.
Tiwei Biee6f633e2018-11-21 18:03:20 +08002103 *
2104 * This re-enables callbacks but hints to the other side to delay
2105 * interrupts until most of the available buffers have been processed;
2106 * it returns "false" if there are many pending buffers in the queue,
2107 * to detect a possible race between the driver checking for more work,
2108 * and enabling callbacks.
2109 *
2110 * Caller must ensure we don't call this with other virtqueue
2111 * operations at the same time (except where noted).
2112 */
2113bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2114{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002115 struct vring_virtqueue *vq = to_vvq(_vq);
2116
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -04002117 if (vq->event_triggered)
2118 vq->event_triggered = false;
2119
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002120 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2121 virtqueue_enable_cb_delayed_split(_vq);
Tiwei Biee6f633e2018-11-21 18:03:20 +08002122}
2123EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2124
Tiwei Bie138fd252018-11-21 18:03:19 +08002125/**
2126 * virtqueue_detach_unused_buf - detach first unused buffer
Jiang Biaoa5581202019-04-23 18:25:12 +08002127 * @_vq: the struct virtqueue we're talking about.
Tiwei Bie138fd252018-11-21 18:03:19 +08002128 *
2129 * Returns NULL or the "data" token handed to virtqueue_add_*().
2130 * This is not valid on an active queue; it is useful only for device
2131 * shutdown.
2132 */
2133void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2134{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002135 struct vring_virtqueue *vq = to_vvq(_vq);
2136
2137 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2138 virtqueue_detach_unused_buf_split(_vq);
Tiwei Bie138fd252018-11-21 18:03:19 +08002139}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +03002140EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +05302141
Tiwei Bie138fd252018-11-21 18:03:19 +08002142static inline bool more_used(const struct vring_virtqueue *vq)
2143{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002144 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
Tiwei Bie138fd252018-11-21 18:03:19 +08002145}
2146
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002147irqreturn_t vring_interrupt(int irq, void *_vq)
2148{
2149 struct vring_virtqueue *vq = to_vvq(_vq);
2150
2151 if (!more_used(vq)) {
2152 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2153 return IRQ_NONE;
2154 }
2155
2156 if (unlikely(vq->broken))
2157 return IRQ_HANDLED;
2158
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -04002159 /* Just a hint for performance: so it's ok that this can be racy! */
2160 if (vq->event)
2161 vq->event_triggered = true;
2162
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002163 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -05002164 if (vq->vq.callback)
2165 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002166
2167 return IRQ_HANDLED;
2168}
Rusty Russellc6fd4702008-02-04 23:50:05 -05002169EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002170
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002171/* Only available for split ring */
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002172struct virtqueue *__vring_new_virtqueue(unsigned int index,
2173 struct vring vring,
2174 struct virtio_device *vdev,
2175 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002176 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002177 bool (*notify)(struct virtqueue *),
2178 void (*callback)(struct virtqueue *),
2179 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002180{
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002181 struct vring_virtqueue *vq;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002182
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002183 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2184 return NULL;
2185
Tiwei Biecbeedb72018-11-21 18:03:24 +08002186 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002187 if (!vq)
2188 return NULL;
2189
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002190 vq->packed_ring = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002191 vq->vq.callback = callback;
2192 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -06002193 vq->vq.name = name;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002194 vq->vq.num_free = vring.num;
Rusty Russell06ca2872012-10-16 23:56:14 +10302195 vq->vq.index = index;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002196 vq->we_own_ring = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002197 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +10302198 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002199 vq->broken = false;
2200 vq->last_used_idx = 0;
Michael S. Tsirkin8d622d22021-04-13 01:19:16 -04002201 vq->event_triggered = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002202 vq->num_added = 0;
Tiwei Biefb3fba62018-11-21 18:03:26 +08002203 vq->use_dma_api = vring_use_dma_api(vdev);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002204#ifdef DEBUG
2205 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +10302206 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002207#endif
2208
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +02002209 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2210 !context;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03002211 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01002212
Tiwei Bie45383fb2019-01-23 17:50:26 +08002213 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2214 vq->weak_barriers = false;
2215
Tiwei Bied79dca72018-11-21 18:03:25 +08002216 vq->split.queue_dma_addr = 0;
2217 vq->split.queue_size_in_bytes = 0;
2218
Tiwei Biee593bf92018-11-21 18:03:21 +08002219 vq->split.vring = vring;
2220 vq->split.avail_flags_shadow = 0;
2221 vq->split.avail_idx_shadow = 0;
2222
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002223 /* No callback? Tell other side not to bother us. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08002224 if (!callback) {
Tiwei Biee593bf92018-11-21 18:03:21 +08002225 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +02002226 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +08002227 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2228 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08002229 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002230
Tiwei Biecbeedb72018-11-21 18:03:24 +08002231 vq->split.desc_state = kmalloc_array(vring.num,
2232 sizeof(struct vring_desc_state_split), GFP_KERNEL);
Jason Wang5bc72232021-06-04 13:53:49 +08002233 if (!vq->split.desc_state)
2234 goto err_state;
Tiwei Biecbeedb72018-11-21 18:03:24 +08002235
Jason Wang72b5e892021-06-04 13:53:50 +08002236 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
2237 if (!vq->split.desc_extra)
2238 goto err_extra;
2239
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002240 /* Put everything in free lists. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002241 vq->free_head = 0;
Tiwei Biecbeedb72018-11-21 18:03:24 +08002242 memset(vq->split.desc_state, 0, vring.num *
2243 sizeof(struct vring_desc_state_split));
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002244
Parav Pandit0e566c82021-07-21 17:26:47 +03002245 spin_lock(&vdev->vqs_list_lock);
Dan Carpentere152d8a2020-12-04 17:23:36 +03002246 list_add_tail(&vq->vq.list, &vdev->vqs);
Parav Pandit0e566c82021-07-21 17:26:47 +03002247 spin_unlock(&vdev->vqs_list_lock);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002248 return &vq->vq;
Jason Wang5bc72232021-06-04 13:53:49 +08002249
Jason Wang72b5e892021-06-04 13:53:50 +08002250err_extra:
2251 kfree(vq->split.desc_state);
Jason Wang5bc72232021-06-04 13:53:49 +08002252err_state:
2253 kfree(vq);
2254 return NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002255}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002256EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2257
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002258struct virtqueue *vring_create_virtqueue(
2259 unsigned int index,
2260 unsigned int num,
2261 unsigned int vring_align,
2262 struct virtio_device *vdev,
2263 bool weak_barriers,
2264 bool may_reduce_num,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002265 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002266 bool (*notify)(struct virtqueue *),
2267 void (*callback)(struct virtqueue *),
2268 const char *name)
2269{
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002270
2271 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2272 return vring_create_virtqueue_packed(index, num, vring_align,
2273 vdev, weak_barriers, may_reduce_num,
2274 context, notify, callback, name);
2275
Tiwei Bied79dca72018-11-21 18:03:25 +08002276 return vring_create_virtqueue_split(index, num, vring_align,
2277 vdev, weak_barriers, may_reduce_num,
2278 context, notify, callback, name);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002279}
2280EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2281
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002282/* Only available for split ring */
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002283struct virtqueue *vring_new_virtqueue(unsigned int index,
2284 unsigned int num,
2285 unsigned int vring_align,
2286 struct virtio_device *vdev,
2287 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002288 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002289 void *pages,
2290 bool (*notify)(struct virtqueue *vq),
2291 void (*callback)(struct virtqueue *vq),
2292 const char *name)
2293{
2294 struct vring vring;
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002295
2296 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2297 return NULL;
2298
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002299 vring_init(&vring, num, pages, vring_align);
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02002300 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002301 notify, callback, name);
2302}
Rusty Russellc6fd4702008-02-04 23:50:05 -05002303EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002304
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002305void vring_del_virtqueue(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002306{
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002307 struct vring_virtqueue *vq = to_vvq(_vq);
2308
Parav Pandit0e566c82021-07-21 17:26:47 +03002309 spin_lock(&vq->vq.vdev->vqs_list_lock);
Parav Pandit249f2552021-07-21 17:26:46 +03002310 list_del(&_vq->list);
Parav Pandit0e566c82021-07-21 17:26:47 +03002311 spin_unlock(&vq->vq.vdev->vqs_list_lock);
Parav Pandit249f2552021-07-21 17:26:46 +03002312
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002313 if (vq->we_own_ring) {
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002314 if (vq->packed_ring) {
2315 vring_free_queue(vq->vq.vdev,
2316 vq->packed.ring_size_in_bytes,
2317 vq->packed.vring.desc,
2318 vq->packed.ring_dma_addr);
2319
2320 vring_free_queue(vq->vq.vdev,
2321 vq->packed.event_size_in_bytes,
2322 vq->packed.vring.driver,
2323 vq->packed.driver_event_dma_addr);
2324
2325 vring_free_queue(vq->vq.vdev,
2326 vq->packed.event_size_in_bytes,
2327 vq->packed.vring.device,
2328 vq->packed.device_event_dma_addr);
2329
2330 kfree(vq->packed.desc_state);
2331 kfree(vq->packed.desc_extra);
2332 } else {
2333 vring_free_queue(vq->vq.vdev,
2334 vq->split.queue_size_in_bytes,
2335 vq->split.vring.desc,
2336 vq->split.queue_dma_addr);
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002337 }
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002338 }
Jason Wang72b5e892021-06-04 13:53:50 +08002339 if (!vq->packed_ring) {
Suman Annaf13f09a2020-02-24 15:26:43 -06002340 kfree(vq->split.desc_state);
Jason Wang72b5e892021-06-04 13:53:50 +08002341 kfree(vq->split.desc_extra);
2342 }
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002343 kfree(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002344}
Rusty Russellc6fd4702008-02-04 23:50:05 -05002345EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002346
Rusty Russelle34f8722008-07-25 12:06:13 -05002347/* Manipulates transport-specific feature bits. */
2348void vring_transport_features(struct virtio_device *vdev)
2349{
2350 unsigned int i;
2351
2352 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2353 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01002354 case VIRTIO_RING_F_INDIRECT_DESC:
2355 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03002356 case VIRTIO_RING_F_EVENT_IDX:
2357 break;
Michael S. Tsirkin747ae342014-12-01 15:52:40 +02002358 case VIRTIO_F_VERSION_1:
2359 break;
Michael S. Tsirkin321bd212020-06-24 18:24:33 -04002360 case VIRTIO_F_ACCESS_PLATFORM:
Michael S. Tsirkin1a937692016-04-18 12:58:14 +03002361 break;
Tiwei Bief959a122018-11-21 18:03:30 +08002362 case VIRTIO_F_RING_PACKED:
2363 break;
Tiwei Bie45383fb2019-01-23 17:50:26 +08002364 case VIRTIO_F_ORDER_PLATFORM:
2365 break;
Rusty Russelle34f8722008-07-25 12:06:13 -05002366 default:
2367 /* We don't understand this bit. */
Michael S. Tsirkine16e12b2014-10-07 16:39:42 +02002368 __virtio_clear_bit(vdev, i);
Rusty Russelle34f8722008-07-25 12:06:13 -05002369 }
2370 }
2371}
2372EXPORT_SYMBOL_GPL(vring_transport_features);
2373
Rusty Russell5dfc1762012-01-12 15:44:42 +10302374/**
2375 * virtqueue_get_vring_size - return the size of the virtqueue's vring
Jiang Biaoa5581202019-04-23 18:25:12 +08002376 * @_vq: the struct virtqueue containing the vring of interest.
Rusty Russell5dfc1762012-01-12 15:44:42 +10302377 *
2378 * Returns the size of the vring. This is mainly used for boasting to
2379 * userspace. Unlike other operations, this need not be serialized.
2380 */
Rick Jones8f9f4662011-10-19 08:10:59 +00002381unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2382{
2383
2384 struct vring_virtqueue *vq = to_vvq(_vq);
2385
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002386 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
Rick Jones8f9f4662011-10-19 08:10:59 +00002387}
2388EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2389
Heinz Graalfsb3b32c92013-10-29 09:40:19 +10302390bool virtqueue_is_broken(struct virtqueue *_vq)
2391{
2392 struct vring_virtqueue *vq = to_vvq(_vq);
2393
Parav Pandit60f07792021-07-21 17:26:45 +03002394 return READ_ONCE(vq->broken);
Heinz Graalfsb3b32c92013-10-29 09:40:19 +10302395}
2396EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2397
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09302398/*
2399 * This should prevent the device from being used, allowing drivers to
2400 * recover. You may need to grab appropriate locks to flush.
2401 */
2402void virtio_break_device(struct virtio_device *dev)
2403{
2404 struct virtqueue *_vq;
2405
Parav Pandit0e566c82021-07-21 17:26:47 +03002406 spin_lock(&dev->vqs_list_lock);
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09302407 list_for_each_entry(_vq, &dev->vqs, list) {
2408 struct vring_virtqueue *vq = to_vvq(_vq);
Parav Pandit60f07792021-07-21 17:26:45 +03002409
2410 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2411 WRITE_ONCE(vq->broken, true);
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09302412 }
Parav Pandit0e566c82021-07-21 17:26:47 +03002413 spin_unlock(&dev->vqs_list_lock);
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09302414}
2415EXPORT_SYMBOL_GPL(virtio_break_device);
2416
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002417dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02002418{
2419 struct vring_virtqueue *vq = to_vvq(_vq);
2420
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002421 BUG_ON(!vq->we_own_ring);
Cornelia Huck89062652014-10-07 16:39:47 +02002422
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002423 if (vq->packed_ring)
2424 return vq->packed.ring_dma_addr;
2425
Tiwei Bied79dca72018-11-21 18:03:25 +08002426 return vq->split.queue_dma_addr;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002427}
2428EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2429
2430dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02002431{
2432 struct vring_virtqueue *vq = to_vvq(_vq);
2433
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002434 BUG_ON(!vq->we_own_ring);
2435
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002436 if (vq->packed_ring)
2437 return vq->packed.driver_event_dma_addr;
2438
Tiwei Bied79dca72018-11-21 18:03:25 +08002439 return vq->split.queue_dma_addr +
Tiwei Biee593bf92018-11-21 18:03:21 +08002440 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
Cornelia Huck89062652014-10-07 16:39:47 +02002441}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002442EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2443
2444dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2445{
2446 struct vring_virtqueue *vq = to_vvq(_vq);
2447
2448 BUG_ON(!vq->we_own_ring);
2449
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002450 if (vq->packed_ring)
2451 return vq->packed.device_event_dma_addr;
2452
Tiwei Bied79dca72018-11-21 18:03:25 +08002453 return vq->split.queue_dma_addr +
Tiwei Biee593bf92018-11-21 18:03:21 +08002454 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002455}
2456EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2457
Tiwei Bie1ce9e602018-11-21 18:03:27 +08002458/* Only available for split ring */
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002459const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2460{
Tiwei Biee593bf92018-11-21 18:03:21 +08002461 return &to_vvq(vq)->split.vring;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08002462}
2463EXPORT_SYMBOL_GPL(virtqueue_get_vring);
Cornelia Huck89062652014-10-07 16:39:47 +02002464
Rusty Russellc6fd4702008-02-04 23:50:05 -05002465MODULE_LICENSE("GPL");