blob: d00a87909a7e1707db87dccf96bb4a41605152d5 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Andy Lutomirski780bc792016-02-02 21:46:36 -080026#include <linux/dma-mapping.h>
Andy Lutomirski78fe3982016-02-02 21:46:40 -080027#include <xen/xen.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100028
29#ifdef DEBUG
30/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060031#define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060037/* Caller is supposed to guarantee no reentry. */
38#define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060041 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060043 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060044 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010045#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060046 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Tiwei Bie4d6a1052018-11-21 18:03:22 +080047#define LAST_ADD_TIME_UPDATE(_vq) \
48 do { \
49 ktime_t now = ktime_get(); \
50 \
51 /* No kick or get, with .1 second between? Warn. */ \
52 if ((_vq)->last_add_time_valid) \
53 WARN_ON(ktime_to_ms(ktime_sub(now, \
54 (_vq)->last_add_time)) > 100); \
55 (_vq)->last_add_time = now; \
56 (_vq)->last_add_time_valid = true; \
57 } while (0)
58#define LAST_ADD_TIME_CHECK(_vq) \
59 do { \
60 if ((_vq)->last_add_time_valid) { \
61 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
62 (_vq)->last_add_time)) > 100); \
63 } \
64 } while (0)
65#define LAST_ADD_TIME_INVALID(_vq) \
66 ((_vq)->last_add_time_valid = false)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100067#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060068#define BAD_RING(_vq, fmt, args...) \
69 do { \
70 dev_err(&_vq->vq.vdev->dev, \
71 "%s:"fmt, (_vq)->vq.name, ##args); \
72 (_vq)->broken = true; \
73 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100074#define START_USE(vq)
75#define END_USE(vq)
Tiwei Bie4d6a1052018-11-21 18:03:22 +080076#define LAST_ADD_TIME_UPDATE(vq)
77#define LAST_ADD_TIME_CHECK(vq)
78#define LAST_ADD_TIME_INVALID(vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100079#endif
80
Tiwei Biecbeedb72018-11-21 18:03:24 +080081struct vring_desc_state_split {
Andy Lutomirski780bc792016-02-02 21:46:36 -080082 void *data; /* Data for callback. */
83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
84};
85
Michael S. Tsirkin43b4f722015-01-15 13:33:31 +020086struct vring_virtqueue {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100087 struct virtqueue vq;
88
Rusty Russell7b21e342012-01-12 15:44:42 +103089 /* Can we use weak barriers? */
90 bool weak_barriers;
91
Rusty Russell0a8a69d2007-10-22 11:03:40 +100092 /* Other side has made a mess, don't try any more. */
93 bool broken;
94
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010095 /* Host supports indirect buffers */
96 bool indirect;
97
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +030098 /* Host publishes avail event idx */
99 bool event;
100
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000101 /* Head of free buffer list. */
102 unsigned int free_head;
103 /* Number we've added since last sync. */
104 unsigned int num_added;
105
106 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -0600107 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000108
Tiwei Biee593bf92018-11-21 18:03:21 +0800109 struct {
110 /* Actual memory layout for this queue */
111 struct vring vring;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800112
Tiwei Biee593bf92018-11-21 18:03:21 +0800113 /* Last written value to avail->flags */
114 u16 avail_flags_shadow;
115
116 /* Last written value to avail->idx in guest byte order */
117 u16 avail_idx_shadow;
Tiwei Biecbeedb72018-11-21 18:03:24 +0800118
119 /* Per-descriptor state. */
120 struct vring_desc_state_split *desc_state;
Tiwei Bied79dca72018-11-21 18:03:25 +0800121
122 /* DMA, allocation, and size information */
123 size_t queue_size_in_bytes;
124 dma_addr_t queue_dma_addr;
Tiwei Biee593bf92018-11-21 18:03:21 +0800125 } split;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800126
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000127 /* How to notify other side. FIXME: commonalize hcalls! */
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030128 bool (*notify)(struct virtqueue *vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000129
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800130 /* DMA, allocation, and size information */
131 bool we_own_ring;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800132
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000133#ifdef DEBUG
134 /* They're supposed to lock for us. */
135 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030136
137 /* Figure out if their kicks are too delayed. */
138 bool last_add_time_valid;
139 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000140#endif
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000141};
142
Tiwei Biee6f633e2018-11-21 18:03:20 +0800143
144/*
145 * Helpers.
146 */
147
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000148#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
149
Tiwei Bie2f18c2d2018-11-21 18:03:23 +0800150static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
151 unsigned int total_sg)
152{
153 struct vring_virtqueue *vq = to_vvq(_vq);
154
155 /*
156 * If the host supports indirect descriptor tables, and we have multiple
157 * buffers, then go indirect. FIXME: tune this threshold
158 */
159 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
160}
161
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800162/*
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300163 * Modern virtio devices have feature bits to specify whether they need a
164 * quirk and bypass the IOMMU. If not there, just use the DMA API.
165 *
166 * If there, the interaction between virtio and DMA API is messy.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800167 *
168 * On most systems with virtio, physical addresses match bus addresses,
169 * and it doesn't particularly matter whether we use the DMA API.
170 *
171 * On some systems, including Xen and any system with a physical device
172 * that speaks virtio behind a physical IOMMU, we must use the DMA API
173 * for virtio DMA to work at all.
174 *
175 * On other systems, including SPARC and PPC64, virtio-pci devices are
176 * enumerated as though they are behind an IOMMU, but the virtio host
177 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
178 * there or somehow map everything as the identity.
179 *
180 * For the time being, we preserve historic behavior and bypass the DMA
181 * API.
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300182 *
183 * TODO: install a per-device DMA ops structure that does the right thing
184 * taking into account all the above quirks, and use the DMA API
185 * unconditionally on data path.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800186 */
187
188static bool vring_use_dma_api(struct virtio_device *vdev)
189{
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300190 if (!virtio_has_iommu_quirk(vdev))
191 return true;
192
193 /* Otherwise, we are left to guess. */
Andy Lutomirski78fe3982016-02-02 21:46:40 -0800194 /*
195 * In theory, it's possible to have a buggy QEMU-supposed
196 * emulated Q35 IOMMU and Xen enabled at the same time. On
197 * such a configuration, virtio has never worked and will
198 * not work without an even larger kludge. Instead, enable
199 * the DMA API if we're a Xen guest, which at least allows
200 * all of the sensible Xen configurations to work correctly.
201 */
202 if (xen_domain())
203 return true;
204
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800205 return false;
206}
207
Tiwei Bied79dca72018-11-21 18:03:25 +0800208static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
209 dma_addr_t *dma_handle, gfp_t flag)
210{
211 if (vring_use_dma_api(vdev)) {
212 return dma_alloc_coherent(vdev->dev.parent, size,
213 dma_handle, flag);
214 } else {
215 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
216
217 if (queue) {
218 phys_addr_t phys_addr = virt_to_phys(queue);
219 *dma_handle = (dma_addr_t)phys_addr;
220
221 /*
222 * Sanity check: make sure we dind't truncate
223 * the address. The only arches I can find that
224 * have 64-bit phys_addr_t but 32-bit dma_addr_t
225 * are certain non-highmem MIPS and x86
226 * configurations, but these configurations
227 * should never allocate physical pages above 32
228 * bits, so this is fine. Just in case, throw a
229 * warning and abort if we end up with an
230 * unrepresentable address.
231 */
232 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
233 free_pages_exact(queue, PAGE_ALIGN(size));
234 return NULL;
235 }
236 }
237 return queue;
238 }
239}
240
241static void vring_free_queue(struct virtio_device *vdev, size_t size,
242 void *queue, dma_addr_t dma_handle)
243{
244 if (vring_use_dma_api(vdev))
245 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
246 else
247 free_pages_exact(queue, PAGE_ALIGN(size));
248}
249
Andy Lutomirski780bc792016-02-02 21:46:36 -0800250/*
251 * The DMA ops on various arches are rather gnarly right now, and
252 * making all of the arch DMA ops work on the vring device itself
253 * is a mess. For now, we use the parent device for DMA ops.
254 */
Michael S. Tsirkin75bfa812016-10-31 00:38:21 +0200255static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800256{
257 return vq->vq.vdev->dev.parent;
258}
259
260/* Map one sg entry. */
261static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
262 struct scatterlist *sg,
263 enum dma_data_direction direction)
264{
265 if (!vring_use_dma_api(vq->vq.vdev))
266 return (dma_addr_t)sg_phys(sg);
267
268 /*
269 * We can't use dma_map_sg, because we don't use scatterlists in
270 * the way it expects (we don't guarantee that the scatterlist
271 * will exist for the lifetime of the mapping).
272 */
273 return dma_map_page(vring_dma_dev(vq),
274 sg_page(sg), sg->offset, sg->length,
275 direction);
276}
277
278static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
279 void *cpu_addr, size_t size,
280 enum dma_data_direction direction)
281{
282 if (!vring_use_dma_api(vq->vq.vdev))
283 return (dma_addr_t)virt_to_phys(cpu_addr);
284
285 return dma_map_single(vring_dma_dev(vq),
286 cpu_addr, size, direction);
287}
288
Tiwei Biee6f633e2018-11-21 18:03:20 +0800289static int vring_mapping_error(const struct vring_virtqueue *vq,
290 dma_addr_t addr)
291{
292 if (!vring_use_dma_api(vq->vq.vdev))
293 return 0;
294
295 return dma_mapping_error(vring_dma_dev(vq), addr);
296}
297
298
299/*
300 * Split ring specific functions - *_split().
301 */
302
Tiwei Bie138fd252018-11-21 18:03:19 +0800303static void vring_unmap_one_split(const struct vring_virtqueue *vq,
304 struct vring_desc *desc)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800305{
306 u16 flags;
307
308 if (!vring_use_dma_api(vq->vq.vdev))
309 return;
310
311 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
312
313 if (flags & VRING_DESC_F_INDIRECT) {
314 dma_unmap_single(vring_dma_dev(vq),
315 virtio64_to_cpu(vq->vq.vdev, desc->addr),
316 virtio32_to_cpu(vq->vq.vdev, desc->len),
317 (flags & VRING_DESC_F_WRITE) ?
318 DMA_FROM_DEVICE : DMA_TO_DEVICE);
319 } else {
320 dma_unmap_page(vring_dma_dev(vq),
321 virtio64_to_cpu(vq->vq.vdev, desc->addr),
322 virtio32_to_cpu(vq->vq.vdev, desc->len),
323 (flags & VRING_DESC_F_WRITE) ?
324 DMA_FROM_DEVICE : DMA_TO_DEVICE);
325 }
326}
327
Tiwei Bie138fd252018-11-21 18:03:19 +0800328static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
329 unsigned int total_sg,
330 gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100331{
332 struct vring_desc *desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930333 unsigned int i;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100334
Will Deaconb92b1b82012-10-19 14:03:33 +0100335 /*
336 * We require lowmem mappings for the descriptors because
337 * otherwise virt_to_phys will give us bogus addresses in the
338 * virtqueue.
339 */
Michal Hocko82107532015-12-01 15:32:49 +0100340 gfp &= ~__GFP_HIGHMEM;
Will Deaconb92b1b82012-10-19 14:03:33 +0100341
Kees Cook6da2ec52018-06-12 13:55:00 -0700342 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100343 if (!desc)
Rusty Russellb25bd252014-09-11 10:17:38 +0930344 return NULL;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100345
Rusty Russellb25bd252014-09-11 10:17:38 +0930346 for (i = 0; i < total_sg; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300347 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
Rusty Russellb25bd252014-09-11 10:17:38 +0930348 return desc;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100349}
350
Tiwei Bie138fd252018-11-21 18:03:19 +0800351static inline int virtqueue_add_split(struct virtqueue *_vq,
352 struct scatterlist *sgs[],
353 unsigned int total_sg,
354 unsigned int out_sgs,
355 unsigned int in_sgs,
356 void *data,
357 void *ctx,
358 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000359{
360 struct vring_virtqueue *vq = to_vvq(_vq);
Rusty Russell13816c72013-03-20 15:37:09 +1030361 struct scatterlist *sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930362 struct vring_desc *desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800363 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930364 int head;
Rusty Russellb25bd252014-09-11 10:17:38 +0930365 bool indirect;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000366
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100367 START_USE(vq);
368
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000369 BUG_ON(data == NULL);
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200370 BUG_ON(ctx && vq->indirect);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100371
Rusty Russell70670442014-03-13 11:23:40 +1030372 if (unlikely(vq->broken)) {
373 END_USE(vq);
374 return -EIO;
375 }
376
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800377 LAST_ADD_TIME_UPDATE(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030378
Rusty Russell13816c72013-03-20 15:37:09 +1030379 BUG_ON(total_sg == 0);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000380
Rusty Russellb25bd252014-09-11 10:17:38 +0930381 head = vq->free_head;
382
Tiwei Bie2f18c2d2018-11-21 18:03:23 +0800383 if (virtqueue_use_indirect(_vq, total_sg))
Tiwei Bie138fd252018-11-21 18:03:19 +0800384 desc = alloc_indirect_split(_vq, total_sg, gfp);
Richard W.M. Jones44ed8082017-08-10 17:56:51 +0100385 else {
Rusty Russellb25bd252014-09-11 10:17:38 +0930386 desc = NULL;
Tiwei Biee593bf92018-11-21 18:03:21 +0800387 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
Richard W.M. Jones44ed8082017-08-10 17:56:51 +0100388 }
Rusty Russellb25bd252014-09-11 10:17:38 +0930389
390 if (desc) {
391 /* Use a single buffer which doesn't continue */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800392 indirect = true;
Rusty Russellb25bd252014-09-11 10:17:38 +0930393 /* Set up rest to use this indirect table. */
394 i = 0;
395 descs_used = 1;
Rusty Russellb25bd252014-09-11 10:17:38 +0930396 } else {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800397 indirect = false;
Tiwei Biee593bf92018-11-21 18:03:21 +0800398 desc = vq->split.vring.desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930399 i = head;
400 descs_used = total_sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930401 }
402
403 if (vq->vq.num_free < descs_used) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000404 pr_debug("Can't add buf len %i - avail = %i\n",
Rusty Russellb25bd252014-09-11 10:17:38 +0930405 descs_used, vq->vq.num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500406 /* FIXME: for historical reasons, we force a notify here if
407 * there are outgoing parts to the buffer. Presumably the
408 * host should service the ring ASAP. */
Rusty Russell13816c72013-03-20 15:37:09 +1030409 if (out_sgs)
Rusty Russell44653ea2008-07-25 12:06:04 -0500410 vq->notify(&vq->vq);
Wei Yongjun58625ed2016-08-02 14:16:31 +0000411 if (indirect)
412 kfree(desc);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000413 END_USE(vq);
414 return -ENOSPC;
415 }
416
Rusty Russell13816c72013-03-20 15:37:09 +1030417 for (n = 0; n < out_sgs; n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930418 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800419 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
420 if (vring_mapping_error(vq, addr))
421 goto unmap_release;
422
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300423 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800424 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300425 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030426 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300427 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030428 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000429 }
Rusty Russell13816c72013-03-20 15:37:09 +1030430 for (; n < (out_sgs + in_sgs); n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930431 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800432 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
433 if (vring_mapping_error(vq, addr))
434 goto unmap_release;
435
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300436 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800437 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300438 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030439 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300440 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030441 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000442 }
443 /* Last one doesn't continue. */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300444 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000445
Andy Lutomirski780bc792016-02-02 21:46:36 -0800446 if (indirect) {
447 /* Now that the indirect table is filled in, map it. */
448 dma_addr_t addr = vring_map_single(
449 vq, desc, total_sg * sizeof(struct vring_desc),
450 DMA_TO_DEVICE);
451 if (vring_mapping_error(vq, addr))
452 goto unmap_release;
453
Tiwei Biee593bf92018-11-21 18:03:21 +0800454 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
455 VRING_DESC_F_INDIRECT);
456 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
457 addr);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800458
Tiwei Biee593bf92018-11-21 18:03:21 +0800459 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
460 total_sg * sizeof(struct vring_desc));
Andy Lutomirski780bc792016-02-02 21:46:36 -0800461 }
462
463 /* We're using some buffers from the free list. */
464 vq->vq.num_free -= descs_used;
465
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000466 /* Update free pointer */
Rusty Russellb25bd252014-09-11 10:17:38 +0930467 if (indirect)
Tiwei Biee593bf92018-11-21 18:03:21 +0800468 vq->free_head = virtio16_to_cpu(_vq->vdev,
469 vq->split.vring.desc[head].next);
Rusty Russellb25bd252014-09-11 10:17:38 +0930470 else
471 vq->free_head = i;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000472
Andy Lutomirski780bc792016-02-02 21:46:36 -0800473 /* Store token and indirect buffer state. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800474 vq->split.desc_state[head].data = data;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800475 if (indirect)
Tiwei Biecbeedb72018-11-21 18:03:24 +0800476 vq->split.desc_state[head].indir_desc = desc;
Jason Wang87646a32017-07-19 16:54:45 +0800477 else
Tiwei Biecbeedb72018-11-21 18:03:24 +0800478 vq->split.desc_state[head].indir_desc = ctx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000479
480 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030481 * do sync). */
Tiwei Biee593bf92018-11-21 18:03:21 +0800482 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
483 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000484
Rusty Russellee7cd892012-01-12 15:44:43 +1030485 /* Descriptors and available array need to be set before we expose the
486 * new available array entries. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030487 virtio_wmb(vq->weak_barriers);
Tiwei Biee593bf92018-11-21 18:03:21 +0800488 vq->split.avail_idx_shadow++;
489 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
490 vq->split.avail_idx_shadow);
Rusty Russellee7cd892012-01-12 15:44:43 +1030491 vq->num_added++;
492
Tetsuo Handa5e05bf52015-02-11 15:01:13 +1030493 pr_debug("Added buffer head %i to %p\n", head, vq);
494 END_USE(vq);
495
Rusty Russellee7cd892012-01-12 15:44:43 +1030496 /* This is very unlikely, but theoretically possible. Kick
497 * just in case. */
498 if (unlikely(vq->num_added == (1 << 16) - 1))
499 virtqueue_kick(_vq);
500
Rusty Russell98e8c6b2012-10-16 23:56:15 +1030501 return 0;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800502
503unmap_release:
504 err_idx = i;
505 i = head;
506
507 for (n = 0; n < total_sg; n++) {
508 if (i == err_idx)
509 break;
Tiwei Bie138fd252018-11-21 18:03:19 +0800510 vring_unmap_one_split(vq, &desc[i]);
Tiwei Biee593bf92018-11-21 18:03:21 +0800511 i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800512 }
513
Andy Lutomirski780bc792016-02-02 21:46:36 -0800514 if (indirect)
515 kfree(desc);
516
Michael S. Tsirkin3cc36f62016-08-03 07:18:51 +0300517 END_USE(vq);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800518 return -EIO;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000519}
Rusty Russell13816c72013-03-20 15:37:09 +1030520
Tiwei Bie138fd252018-11-21 18:03:19 +0800521static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000522{
523 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300524 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030525 bool needs_kick;
526
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000527 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800528 /* We need to expose available array entries before checking avail
529 * event. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030530 virtio_mb(vq->weak_barriers);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000531
Tiwei Biee593bf92018-11-21 18:03:21 +0800532 old = vq->split.avail_idx_shadow - vq->num_added;
533 new = vq->split.avail_idx_shadow;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000534 vq->num_added = 0;
535
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800536 LAST_ADD_TIME_CHECK(vq);
537 LAST_ADD_TIME_INVALID(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030538
Rusty Russell41f03772012-01-12 15:44:43 +1030539 if (vq->event) {
Tiwei Biee593bf92018-11-21 18:03:21 +0800540 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
541 vring_avail_event(&vq->split.vring)),
Rusty Russell41f03772012-01-12 15:44:43 +1030542 new, old);
543 } else {
Tiwei Biee593bf92018-11-21 18:03:21 +0800544 needs_kick = !(vq->split.vring.used->flags &
545 cpu_to_virtio16(_vq->vdev,
546 VRING_USED_F_NO_NOTIFY));
Rusty Russell41f03772012-01-12 15:44:43 +1030547 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000548 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030549 return needs_kick;
550}
Tiwei Bie138fd252018-11-21 18:03:19 +0800551
Tiwei Bie138fd252018-11-21 18:03:19 +0800552static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
553 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000554{
Andy Lutomirski780bc792016-02-02 21:46:36 -0800555 unsigned int i, j;
Gongleic60923c2016-11-22 13:51:50 +0800556 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000557
558 /* Clear data ptr. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800559 vq->split.desc_state[head].data = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000560
Andy Lutomirski780bc792016-02-02 21:46:36 -0800561 /* Put back on free list: unmap first-level descriptors and find end */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000562 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100563
Tiwei Biee593bf92018-11-21 18:03:21 +0800564 while (vq->split.vring.desc[i].flags & nextflag) {
565 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
566 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
Rusty Russell06ca2872012-10-16 23:56:14 +1030567 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000568 }
569
Tiwei Biee593bf92018-11-21 18:03:21 +0800570 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
571 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
572 vq->free_head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000573 vq->free_head = head;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800574
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000575 /* Plus final descriptor */
Rusty Russell06ca2872012-10-16 23:56:14 +1030576 vq->vq.num_free++;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800577
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200578 if (vq->indirect) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800579 struct vring_desc *indir_desc =
580 vq->split.desc_state[head].indir_desc;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200581 u32 len;
582
583 /* Free the indirect table, if any, now that it's unmapped. */
584 if (!indir_desc)
585 return;
586
Tiwei Biee593bf92018-11-21 18:03:21 +0800587 len = virtio32_to_cpu(vq->vq.vdev,
588 vq->split.vring.desc[head].len);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800589
Tiwei Biee593bf92018-11-21 18:03:21 +0800590 BUG_ON(!(vq->split.vring.desc[head].flags &
Andy Lutomirski780bc792016-02-02 21:46:36 -0800591 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
592 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
593
594 for (j = 0; j < len / sizeof(struct vring_desc); j++)
Tiwei Bie138fd252018-11-21 18:03:19 +0800595 vring_unmap_one_split(vq, &indir_desc[j]);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800596
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200597 kfree(indir_desc);
Tiwei Biecbeedb72018-11-21 18:03:24 +0800598 vq->split.desc_state[head].indir_desc = NULL;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200599 } else if (ctx) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800600 *ctx = vq->split.desc_state[head].indir_desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800601 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000602}
603
Tiwei Bie138fd252018-11-21 18:03:19 +0800604static inline bool more_used_split(const struct vring_virtqueue *vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000605{
Tiwei Biee593bf92018-11-21 18:03:21 +0800606 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
607 vq->split.vring.used->idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000608}
609
Tiwei Bie138fd252018-11-21 18:03:19 +0800610static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
611 unsigned int *len,
612 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000613{
614 struct vring_virtqueue *vq = to_vvq(_vq);
615 void *ret;
616 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030617 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000618
619 START_USE(vq);
620
Rusty Russell5ef82752008-05-02 21:50:43 -0500621 if (unlikely(vq->broken)) {
622 END_USE(vq);
623 return NULL;
624 }
625
Tiwei Bie138fd252018-11-21 18:03:19 +0800626 if (!more_used_split(vq)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000627 pr_debug("No more buffers in queue\n");
628 END_USE(vq);
629 return NULL;
630 }
631
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200632 /* Only get used array entries after they have been exposed by host. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030633 virtio_rmb(vq->weak_barriers);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200634
Tiwei Biee593bf92018-11-21 18:03:21 +0800635 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
636 i = virtio32_to_cpu(_vq->vdev,
637 vq->split.vring.used->ring[last_used].id);
638 *len = virtio32_to_cpu(_vq->vdev,
639 vq->split.vring.used->ring[last_used].len);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000640
Tiwei Biee593bf92018-11-21 18:03:21 +0800641 if (unlikely(i >= vq->split.vring.num)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000642 BAD_RING(vq, "id %u out of range\n", i);
643 return NULL;
644 }
Tiwei Biecbeedb72018-11-21 18:03:24 +0800645 if (unlikely(!vq->split.desc_state[i].data)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000646 BAD_RING(vq, "id %u is not a head!\n", i);
647 return NULL;
648 }
649
Tiwei Bie138fd252018-11-21 18:03:19 +0800650 /* detach_buf_split clears data, so grab it now. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800651 ret = vq->split.desc_state[i].data;
Tiwei Bie138fd252018-11-21 18:03:19 +0800652 detach_buf_split(vq, i, ctx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000653 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300654 /* If we expect an interrupt for the next entry, tell host
655 * by writing event index and flush out the write before
656 * the read in the next get_buf call. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800657 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200658 virtio_store_mb(vq->weak_barriers,
Tiwei Biee593bf92018-11-21 18:03:21 +0800659 &vring_used_event(&vq->split.vring),
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200660 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300661
Tiwei Bie4d6a1052018-11-21 18:03:22 +0800662 LAST_ADD_TIME_INVALID(vq);
Rusty Russelle93300b2012-01-12 15:44:43 +1030663
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000664 END_USE(vq);
665 return ret;
666}
Tiwei Bie138fd252018-11-21 18:03:19 +0800667
Tiwei Bie138fd252018-11-21 18:03:19 +0800668static void virtqueue_disable_cb_split(struct virtqueue *_vq)
669{
670 struct vring_virtqueue *vq = to_vvq(_vq);
671
Tiwei Biee593bf92018-11-21 18:03:21 +0800672 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
673 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Tiwei Bie138fd252018-11-21 18:03:19 +0800674 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800675 vq->split.vring.avail->flags =
676 cpu_to_virtio16(_vq->vdev,
677 vq->split.avail_flags_shadow);
Tiwei Bie138fd252018-11-21 18:03:19 +0800678 }
679}
680
Tiwei Bie138fd252018-11-21 18:03:19 +0800681static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300682{
683 struct vring_virtqueue *vq = to_vvq(_vq);
684 u16 last_used_idx;
685
686 START_USE(vq);
687
688 /* We optimistically turn back on interrupts, then check if there was
689 * more to do. */
690 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
691 * either clear the flags bit or point the event index at the next
692 * entry. Always do both to keep code simple. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800693 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
694 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200695 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800696 vq->split.vring.avail->flags =
697 cpu_to_virtio16(_vq->vdev,
698 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800699 }
Tiwei Biee593bf92018-11-21 18:03:21 +0800700 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
701 last_used_idx = vq->last_used_idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300702 END_USE(vq);
703 return last_used_idx;
704}
Tiwei Bie138fd252018-11-21 18:03:19 +0800705
Tiwei Bie138fd252018-11-21 18:03:19 +0800706static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
707{
708 struct vring_virtqueue *vq = to_vvq(_vq);
709
710 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
Tiwei Biee593bf92018-11-21 18:03:21 +0800711 vq->split.vring.used->idx);
Tiwei Bie138fd252018-11-21 18:03:19 +0800712}
713
Tiwei Bie138fd252018-11-21 18:03:19 +0800714static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300715{
716 struct vring_virtqueue *vq = to_vvq(_vq);
717 u16 bufs;
718
719 START_USE(vq);
720
721 /* We optimistically turn back on interrupts, then check if there was
722 * more to do. */
723 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
724 * either clear the flags bit or point the event index at the next
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200725 * entry. Always update the event index to keep code simple. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800726 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
727 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200728 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +0800729 vq->split.vring.avail->flags =
730 cpu_to_virtio16(_vq->vdev,
731 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800732 }
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300733 /* TODO: tune this threshold */
Tiwei Biee593bf92018-11-21 18:03:21 +0800734 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200735
736 virtio_store_mb(vq->weak_barriers,
Tiwei Biee593bf92018-11-21 18:03:21 +0800737 &vring_used_event(&vq->split.vring),
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200738 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
739
Tiwei Biee593bf92018-11-21 18:03:21 +0800740 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
741 - vq->last_used_idx) > bufs)) {
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300742 END_USE(vq);
743 return false;
744 }
745
746 END_USE(vq);
747 return true;
748}
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300749
Tiwei Bie138fd252018-11-21 18:03:19 +0800750static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530751{
752 struct vring_virtqueue *vq = to_vvq(_vq);
753 unsigned int i;
754 void *buf;
755
756 START_USE(vq);
757
Tiwei Biee593bf92018-11-21 18:03:21 +0800758 for (i = 0; i < vq->split.vring.num; i++) {
Tiwei Biecbeedb72018-11-21 18:03:24 +0800759 if (!vq->split.desc_state[i].data)
Shirley Mac021eac2010-01-18 19:15:23 +0530760 continue;
Tiwei Bie138fd252018-11-21 18:03:19 +0800761 /* detach_buf_split clears data, so grab it now. */
Tiwei Biecbeedb72018-11-21 18:03:24 +0800762 buf = vq->split.desc_state[i].data;
Tiwei Bie138fd252018-11-21 18:03:19 +0800763 detach_buf_split(vq, i, NULL);
Tiwei Biee593bf92018-11-21 18:03:21 +0800764 vq->split.avail_idx_shadow--;
765 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
766 vq->split.avail_idx_shadow);
Shirley Mac021eac2010-01-18 19:15:23 +0530767 END_USE(vq);
768 return buf;
769 }
770 /* That should have freed everything. */
Tiwei Biee593bf92018-11-21 18:03:21 +0800771 BUG_ON(vq->vq.num_free != vq->split.vring.num);
Shirley Mac021eac2010-01-18 19:15:23 +0530772
773 END_USE(vq);
774 return NULL;
775}
Tiwei Bie138fd252018-11-21 18:03:19 +0800776
Tiwei Bied79dca72018-11-21 18:03:25 +0800777static struct virtqueue *vring_create_virtqueue_split(
778 unsigned int index,
779 unsigned int num,
780 unsigned int vring_align,
781 struct virtio_device *vdev,
782 bool weak_barriers,
783 bool may_reduce_num,
784 bool context,
785 bool (*notify)(struct virtqueue *),
786 void (*callback)(struct virtqueue *),
787 const char *name)
788{
789 struct virtqueue *vq;
790 void *queue = NULL;
791 dma_addr_t dma_addr;
792 size_t queue_size_in_bytes;
793 struct vring vring;
794
795 /* We assume num is a power of 2. */
796 if (num & (num - 1)) {
797 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
798 return NULL;
799 }
800
801 /* TODO: allocate each queue chunk individually */
802 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
803 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
804 &dma_addr,
805 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
806 if (queue)
807 break;
808 }
809
810 if (!num)
811 return NULL;
812
813 if (!queue) {
814 /* Try to get a single page. You are my only hope! */
815 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
816 &dma_addr, GFP_KERNEL|__GFP_ZERO);
817 }
818 if (!queue)
819 return NULL;
820
821 queue_size_in_bytes = vring_size(num, vring_align);
822 vring_init(&vring, num, queue, vring_align);
823
824 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
825 notify, callback, name);
826 if (!vq) {
827 vring_free_queue(vdev, queue_size_in_bytes, queue,
828 dma_addr);
829 return NULL;
830 }
831
832 to_vvq(vq)->split.queue_dma_addr = dma_addr;
833 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
834 to_vvq(vq)->we_own_ring = true;
835
836 return vq;
837}
838
Tiwei Biee6f633e2018-11-21 18:03:20 +0800839
840/*
841 * Generic functions and exported symbols.
842 */
843
844static inline int virtqueue_add(struct virtqueue *_vq,
845 struct scatterlist *sgs[],
846 unsigned int total_sg,
847 unsigned int out_sgs,
848 unsigned int in_sgs,
849 void *data,
850 void *ctx,
851 gfp_t gfp)
852{
853 return virtqueue_add_split(_vq, sgs, total_sg,
854 out_sgs, in_sgs, data, ctx, gfp);
855}
856
857/**
858 * virtqueue_add_sgs - expose buffers to other end
859 * @vq: the struct virtqueue we're talking about.
860 * @sgs: array of terminated scatterlists.
861 * @out_num: the number of scatterlists readable by other side
862 * @in_num: the number of scatterlists which are writable (after readable ones)
863 * @data: the token identifying the buffer.
864 * @gfp: how to do memory allocations (if necessary).
865 *
866 * Caller must ensure we don't call this with other virtqueue operations
867 * at the same time (except where noted).
868 *
869 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
870 */
871int virtqueue_add_sgs(struct virtqueue *_vq,
872 struct scatterlist *sgs[],
873 unsigned int out_sgs,
874 unsigned int in_sgs,
875 void *data,
876 gfp_t gfp)
877{
878 unsigned int i, total_sg = 0;
879
880 /* Count them first. */
881 for (i = 0; i < out_sgs + in_sgs; i++) {
882 struct scatterlist *sg;
883
884 for (sg = sgs[i]; sg; sg = sg_next(sg))
885 total_sg++;
886 }
887 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
888 data, NULL, gfp);
889}
890EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
891
892/**
893 * virtqueue_add_outbuf - expose output buffers to other end
894 * @vq: the struct virtqueue we're talking about.
895 * @sg: scatterlist (must be well-formed and terminated!)
896 * @num: the number of entries in @sg readable by other side
897 * @data: the token identifying the buffer.
898 * @gfp: how to do memory allocations (if necessary).
899 *
900 * Caller must ensure we don't call this with other virtqueue operations
901 * at the same time (except where noted).
902 *
903 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
904 */
905int virtqueue_add_outbuf(struct virtqueue *vq,
906 struct scatterlist *sg, unsigned int num,
907 void *data,
908 gfp_t gfp)
909{
910 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
911}
912EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
913
914/**
915 * virtqueue_add_inbuf - expose input buffers to other end
916 * @vq: the struct virtqueue we're talking about.
917 * @sg: scatterlist (must be well-formed and terminated!)
918 * @num: the number of entries in @sg writable by other side
919 * @data: the token identifying the buffer.
920 * @gfp: how to do memory allocations (if necessary).
921 *
922 * Caller must ensure we don't call this with other virtqueue operations
923 * at the same time (except where noted).
924 *
925 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
926 */
927int virtqueue_add_inbuf(struct virtqueue *vq,
928 struct scatterlist *sg, unsigned int num,
929 void *data,
930 gfp_t gfp)
931{
932 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
933}
934EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
935
936/**
937 * virtqueue_add_inbuf_ctx - expose input buffers to other end
938 * @vq: the struct virtqueue we're talking about.
939 * @sg: scatterlist (must be well-formed and terminated!)
940 * @num: the number of entries in @sg writable by other side
941 * @data: the token identifying the buffer.
942 * @ctx: extra context for the token
943 * @gfp: how to do memory allocations (if necessary).
944 *
945 * Caller must ensure we don't call this with other virtqueue operations
946 * at the same time (except where noted).
947 *
948 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
949 */
950int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
951 struct scatterlist *sg, unsigned int num,
952 void *data,
953 void *ctx,
954 gfp_t gfp)
955{
956 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
957}
958EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
959
960/**
961 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
962 * @vq: the struct virtqueue
963 *
964 * Instead of virtqueue_kick(), you can do:
965 * if (virtqueue_kick_prepare(vq))
966 * virtqueue_notify(vq);
967 *
968 * This is sometimes useful because the virtqueue_kick_prepare() needs
969 * to be serialized, but the actual virtqueue_notify() call does not.
970 */
971bool virtqueue_kick_prepare(struct virtqueue *_vq)
972{
973 return virtqueue_kick_prepare_split(_vq);
974}
975EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
976
977/**
978 * virtqueue_notify - second half of split virtqueue_kick call.
979 * @vq: the struct virtqueue
980 *
981 * This does not need to be serialized.
982 *
983 * Returns false if host notify failed or queue is broken, otherwise true.
984 */
985bool virtqueue_notify(struct virtqueue *_vq)
986{
987 struct vring_virtqueue *vq = to_vvq(_vq);
988
989 if (unlikely(vq->broken))
990 return false;
991
992 /* Prod other side to tell it about changes. */
993 if (!vq->notify(_vq)) {
994 vq->broken = true;
995 return false;
996 }
997 return true;
998}
999EXPORT_SYMBOL_GPL(virtqueue_notify);
1000
1001/**
1002 * virtqueue_kick - update after add_buf
1003 * @vq: the struct virtqueue
1004 *
1005 * After one or more virtqueue_add_* calls, invoke this to kick
1006 * the other side.
1007 *
1008 * Caller must ensure we don't call this with other virtqueue
1009 * operations at the same time (except where noted).
1010 *
1011 * Returns false if kick failed, otherwise true.
1012 */
1013bool virtqueue_kick(struct virtqueue *vq)
1014{
1015 if (virtqueue_kick_prepare(vq))
1016 return virtqueue_notify(vq);
1017 return true;
1018}
1019EXPORT_SYMBOL_GPL(virtqueue_kick);
1020
1021/**
1022 * virtqueue_get_buf - get the next used buffer
1023 * @vq: the struct virtqueue we're talking about.
1024 * @len: the length written into the buffer
1025 *
1026 * If the device wrote data into the buffer, @len will be set to the
1027 * amount written. This means you don't need to clear the buffer
1028 * beforehand to ensure there's no data leakage in the case of short
1029 * writes.
1030 *
1031 * Caller must ensure we don't call this with other virtqueue
1032 * operations at the same time (except where noted).
1033 *
1034 * Returns NULL if there are no used buffers, or the "data" token
1035 * handed to virtqueue_add_*().
1036 */
1037void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1038 void **ctx)
1039{
1040 return virtqueue_get_buf_ctx_split(_vq, len, ctx);
1041}
1042EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1043
1044void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1045{
1046 return virtqueue_get_buf_ctx(_vq, len, NULL);
1047}
1048EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1049
1050/**
1051 * virtqueue_disable_cb - disable callbacks
1052 * @vq: the struct virtqueue we're talking about.
1053 *
1054 * Note that this is not necessarily synchronous, hence unreliable and only
1055 * useful as an optimization.
1056 *
1057 * Unlike other operations, this need not be serialized.
1058 */
1059void virtqueue_disable_cb(struct virtqueue *_vq)
1060{
1061 virtqueue_disable_cb_split(_vq);
1062}
1063EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1064
1065/**
1066 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1067 * @vq: the struct virtqueue we're talking about.
1068 *
1069 * This re-enables callbacks; it returns current queue state
1070 * in an opaque unsigned value. This value should be later tested by
1071 * virtqueue_poll, to detect a possible race between the driver checking for
1072 * more work, and enabling callbacks.
1073 *
1074 * Caller must ensure we don't call this with other virtqueue
1075 * operations at the same time (except where noted).
1076 */
1077unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1078{
1079 return virtqueue_enable_cb_prepare_split(_vq);
1080}
1081EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1082
1083/**
1084 * virtqueue_poll - query pending used buffers
1085 * @vq: the struct virtqueue we're talking about.
1086 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1087 *
1088 * Returns "true" if there are pending used buffers in the queue.
1089 *
1090 * This does not need to be serialized.
1091 */
1092bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1093{
1094 struct vring_virtqueue *vq = to_vvq(_vq);
1095
1096 virtio_mb(vq->weak_barriers);
1097 return virtqueue_poll_split(_vq, last_used_idx);
1098}
1099EXPORT_SYMBOL_GPL(virtqueue_poll);
1100
1101/**
1102 * virtqueue_enable_cb - restart callbacks after disable_cb.
1103 * @vq: the struct virtqueue we're talking about.
1104 *
1105 * This re-enables callbacks; it returns "false" if there are pending
1106 * buffers in the queue, to detect a possible race between the driver
1107 * checking for more work, and enabling callbacks.
1108 *
1109 * Caller must ensure we don't call this with other virtqueue
1110 * operations at the same time (except where noted).
1111 */
1112bool virtqueue_enable_cb(struct virtqueue *_vq)
1113{
1114 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1115
1116 return !virtqueue_poll(_vq, last_used_idx);
1117}
1118EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1119
1120/**
1121 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
1122 * @vq: the struct virtqueue we're talking about.
1123 *
1124 * This re-enables callbacks but hints to the other side to delay
1125 * interrupts until most of the available buffers have been processed;
1126 * it returns "false" if there are many pending buffers in the queue,
1127 * to detect a possible race between the driver checking for more work,
1128 * and enabling callbacks.
1129 *
1130 * Caller must ensure we don't call this with other virtqueue
1131 * operations at the same time (except where noted).
1132 */
1133bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
1134{
1135 return virtqueue_enable_cb_delayed_split(_vq);
1136}
1137EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
1138
Tiwei Bie138fd252018-11-21 18:03:19 +08001139/**
1140 * virtqueue_detach_unused_buf - detach first unused buffer
1141 * @vq: the struct virtqueue we're talking about.
1142 *
1143 * Returns NULL or the "data" token handed to virtqueue_add_*().
1144 * This is not valid on an active queue; it is useful only for device
1145 * shutdown.
1146 */
1147void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
1148{
1149 return virtqueue_detach_unused_buf_split(_vq);
1150}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +03001151EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +05301152
Tiwei Bie138fd252018-11-21 18:03:19 +08001153static inline bool more_used(const struct vring_virtqueue *vq)
1154{
1155 return more_used_split(vq);
1156}
1157
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001158irqreturn_t vring_interrupt(int irq, void *_vq)
1159{
1160 struct vring_virtqueue *vq = to_vvq(_vq);
1161
1162 if (!more_used(vq)) {
1163 pr_debug("virtqueue interrupt with no work for %p\n", vq);
1164 return IRQ_NONE;
1165 }
1166
1167 if (unlikely(vq->broken))
1168 return IRQ_HANDLED;
1169
1170 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -05001171 if (vq->vq.callback)
1172 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001173
1174 return IRQ_HANDLED;
1175}
Rusty Russellc6fd4702008-02-04 23:50:05 -05001176EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001177
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001178struct virtqueue *__vring_new_virtqueue(unsigned int index,
1179 struct vring vring,
1180 struct virtio_device *vdev,
1181 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001182 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001183 bool (*notify)(struct virtqueue *),
1184 void (*callback)(struct virtqueue *),
1185 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001186{
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001187 unsigned int i;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001188 struct vring_virtqueue *vq;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001189
Tiwei Biecbeedb72018-11-21 18:03:24 +08001190 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001191 if (!vq)
1192 return NULL;
1193
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001194 vq->vq.callback = callback;
1195 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -06001196 vq->vq.name = name;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001197 vq->vq.num_free = vring.num;
Rusty Russell06ca2872012-10-16 23:56:14 +10301198 vq->vq.index = index;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001199 vq->we_own_ring = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001200 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +10301201 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001202 vq->broken = false;
1203 vq->last_used_idx = 0;
1204 vq->num_added = 0;
Rusty Russell9499f5e2009-06-12 22:16:35 -06001205 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001206#ifdef DEBUG
1207 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +10301208 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001209#endif
1210
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +02001211 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1212 !context;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03001213 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01001214
Tiwei Bied79dca72018-11-21 18:03:25 +08001215 vq->split.queue_dma_addr = 0;
1216 vq->split.queue_size_in_bytes = 0;
1217
Tiwei Biee593bf92018-11-21 18:03:21 +08001218 vq->split.vring = vring;
1219 vq->split.avail_flags_shadow = 0;
1220 vq->split.avail_idx_shadow = 0;
1221
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001222 /* No callback? Tell other side not to bother us. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08001223 if (!callback) {
Tiwei Biee593bf92018-11-21 18:03:21 +08001224 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +02001225 if (!vq->event)
Tiwei Biee593bf92018-11-21 18:03:21 +08001226 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
1227 vq->split.avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08001228 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001229
Tiwei Biecbeedb72018-11-21 18:03:24 +08001230 vq->split.desc_state = kmalloc_array(vring.num,
1231 sizeof(struct vring_desc_state_split), GFP_KERNEL);
1232 if (!vq->split.desc_state) {
1233 kfree(vq);
1234 return NULL;
1235 }
1236
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001237 /* Put everything in free lists. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001238 vq->free_head = 0;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001239 for (i = 0; i < vring.num-1; i++)
Tiwei Biee593bf92018-11-21 18:03:21 +08001240 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
Tiwei Biecbeedb72018-11-21 18:03:24 +08001241 memset(vq->split.desc_state, 0, vring.num *
1242 sizeof(struct vring_desc_state_split));
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001243
1244 return &vq->vq;
1245}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001246EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1247
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001248struct virtqueue *vring_create_virtqueue(
1249 unsigned int index,
1250 unsigned int num,
1251 unsigned int vring_align,
1252 struct virtio_device *vdev,
1253 bool weak_barriers,
1254 bool may_reduce_num,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001255 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001256 bool (*notify)(struct virtqueue *),
1257 void (*callback)(struct virtqueue *),
1258 const char *name)
1259{
Tiwei Bied79dca72018-11-21 18:03:25 +08001260 return vring_create_virtqueue_split(index, num, vring_align,
1261 vdev, weak_barriers, may_reduce_num,
1262 context, notify, callback, name);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001263}
1264EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1265
1266struct virtqueue *vring_new_virtqueue(unsigned int index,
1267 unsigned int num,
1268 unsigned int vring_align,
1269 struct virtio_device *vdev,
1270 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001271 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001272 void *pages,
1273 bool (*notify)(struct virtqueue *vq),
1274 void (*callback)(struct virtqueue *vq),
1275 const char *name)
1276{
1277 struct vring vring;
1278 vring_init(&vring, num, pages, vring_align);
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001279 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001280 notify, callback, name);
1281}
Rusty Russellc6fd4702008-02-04 23:50:05 -05001282EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001283
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001284void vring_del_virtqueue(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001285{
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001286 struct vring_virtqueue *vq = to_vvq(_vq);
1287
1288 if (vq->we_own_ring) {
Tiwei Bied79dca72018-11-21 18:03:25 +08001289 vring_free_queue(vq->vq.vdev,
1290 vq->split.queue_size_in_bytes,
1291 vq->split.vring.desc,
1292 vq->split.queue_dma_addr);
Tiwei Biecbeedb72018-11-21 18:03:24 +08001293 kfree(vq->split.desc_state);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001294 }
1295 list_del(&_vq->list);
1296 kfree(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001297}
Rusty Russellc6fd4702008-02-04 23:50:05 -05001298EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001299
Rusty Russelle34f8722008-07-25 12:06:13 -05001300/* Manipulates transport-specific feature bits. */
1301void vring_transport_features(struct virtio_device *vdev)
1302{
1303 unsigned int i;
1304
1305 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1306 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01001307 case VIRTIO_RING_F_INDIRECT_DESC:
1308 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03001309 case VIRTIO_RING_F_EVENT_IDX:
1310 break;
Michael S. Tsirkin747ae342014-12-01 15:52:40 +02001311 case VIRTIO_F_VERSION_1:
1312 break;
Michael S. Tsirkin1a937692016-04-18 12:58:14 +03001313 case VIRTIO_F_IOMMU_PLATFORM:
1314 break;
Rusty Russelle34f8722008-07-25 12:06:13 -05001315 default:
1316 /* We don't understand this bit. */
Michael S. Tsirkine16e12b2014-10-07 16:39:42 +02001317 __virtio_clear_bit(vdev, i);
Rusty Russelle34f8722008-07-25 12:06:13 -05001318 }
1319 }
1320}
1321EXPORT_SYMBOL_GPL(vring_transport_features);
1322
Rusty Russell5dfc1762012-01-12 15:44:42 +10301323/**
1324 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1325 * @vq: the struct virtqueue containing the vring of interest.
1326 *
1327 * Returns the size of the vring. This is mainly used for boasting to
1328 * userspace. Unlike other operations, this need not be serialized.
1329 */
Rick Jones8f9f4662011-10-19 08:10:59 +00001330unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1331{
1332
1333 struct vring_virtqueue *vq = to_vvq(_vq);
1334
Tiwei Biee593bf92018-11-21 18:03:21 +08001335 return vq->split.vring.num;
Rick Jones8f9f4662011-10-19 08:10:59 +00001336}
1337EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1338
Heinz Graalfsb3b32c92013-10-29 09:40:19 +10301339bool virtqueue_is_broken(struct virtqueue *_vq)
1340{
1341 struct vring_virtqueue *vq = to_vvq(_vq);
1342
1343 return vq->broken;
1344}
1345EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1346
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09301347/*
1348 * This should prevent the device from being used, allowing drivers to
1349 * recover. You may need to grab appropriate locks to flush.
1350 */
1351void virtio_break_device(struct virtio_device *dev)
1352{
1353 struct virtqueue *_vq;
1354
1355 list_for_each_entry(_vq, &dev->vqs, list) {
1356 struct vring_virtqueue *vq = to_vvq(_vq);
1357 vq->broken = true;
1358 }
1359}
1360EXPORT_SYMBOL_GPL(virtio_break_device);
1361
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001362dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02001363{
1364 struct vring_virtqueue *vq = to_vvq(_vq);
1365
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001366 BUG_ON(!vq->we_own_ring);
Cornelia Huck89062652014-10-07 16:39:47 +02001367
Tiwei Bied79dca72018-11-21 18:03:25 +08001368 return vq->split.queue_dma_addr;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001369}
1370EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1371
1372dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02001373{
1374 struct vring_virtqueue *vq = to_vvq(_vq);
1375
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001376 BUG_ON(!vq->we_own_ring);
1377
Tiwei Bied79dca72018-11-21 18:03:25 +08001378 return vq->split.queue_dma_addr +
Tiwei Biee593bf92018-11-21 18:03:21 +08001379 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
Cornelia Huck89062652014-10-07 16:39:47 +02001380}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001381EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1382
1383dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1384{
1385 struct vring_virtqueue *vq = to_vvq(_vq);
1386
1387 BUG_ON(!vq->we_own_ring);
1388
Tiwei Bied79dca72018-11-21 18:03:25 +08001389 return vq->split.queue_dma_addr +
Tiwei Biee593bf92018-11-21 18:03:21 +08001390 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001391}
1392EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1393
1394const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1395{
Tiwei Biee593bf92018-11-21 18:03:21 +08001396 return &to_vvq(vq)->split.vring;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001397}
1398EXPORT_SYMBOL_GPL(virtqueue_get_vring);
Cornelia Huck89062652014-10-07 16:39:47 +02001399
Rusty Russellc6fd4702008-02-04 23:50:05 -05001400MODULE_LICENSE("GPL");